1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * platform.c - platform 'pseudo' bus for legacy devices |
4 | * |
5 | * Copyright (c) 2002-3 Patrick Mochel |
6 | * Copyright (c) 2002-3 Open Source Development Labs |
7 | * |
8 | * Please see Documentation/driver-api/driver-model/platform.rst for more |
9 | * information. |
10 | */ |
11 | |
12 | #include <linux/string.h> |
13 | #include <linux/platform_device.h> |
14 | #include <linux/of_device.h> |
15 | #include <linux/of_irq.h> |
16 | #include <linux/module.h> |
17 | #include <linux/init.h> |
18 | #include <linux/interrupt.h> |
19 | #include <linux/ioport.h> |
20 | #include <linux/dma-mapping.h> |
21 | #include <linux/memblock.h> |
22 | #include <linux/err.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/pm_runtime.h> |
25 | #include <linux/pm_domain.h> |
26 | #include <linux/idr.h> |
27 | #include <linux/acpi.h> |
28 | #include <linux/clk/clk-conf.h> |
29 | #include <linux/limits.h> |
30 | #include <linux/property.h> |
31 | #include <linux/kmemleak.h> |
32 | #include <linux/types.h> |
33 | #include <linux/iommu.h> |
34 | #include <linux/dma-map-ops.h> |
35 | |
36 | #include "base.h" |
37 | #include "power/power.h" |
38 | |
39 | /* For automatically allocated device IDs */ |
40 | static DEFINE_IDA(platform_devid_ida); |
41 | |
42 | struct device platform_bus = { |
43 | .init_name = "platform" , |
44 | }; |
45 | EXPORT_SYMBOL_GPL(platform_bus); |
46 | |
47 | /** |
48 | * platform_get_resource - get a resource for a device |
49 | * @dev: platform device |
50 | * @type: resource type |
51 | * @num: resource index |
52 | * |
53 | * Return: a pointer to the resource or NULL on failure. |
54 | */ |
55 | struct resource *platform_get_resource(struct platform_device *dev, |
56 | unsigned int type, unsigned int num) |
57 | { |
58 | u32 i; |
59 | |
60 | for (i = 0; i < dev->num_resources; i++) { |
61 | struct resource *r = &dev->resource[i]; |
62 | |
63 | if (type == resource_type(res: r) && num-- == 0) |
64 | return r; |
65 | } |
66 | return NULL; |
67 | } |
68 | EXPORT_SYMBOL_GPL(platform_get_resource); |
69 | |
70 | struct resource *platform_get_mem_or_io(struct platform_device *dev, |
71 | unsigned int num) |
72 | { |
73 | u32 i; |
74 | |
75 | for (i = 0; i < dev->num_resources; i++) { |
76 | struct resource *r = &dev->resource[i]; |
77 | |
78 | if ((resource_type(res: r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0) |
79 | return r; |
80 | } |
81 | return NULL; |
82 | } |
83 | EXPORT_SYMBOL_GPL(platform_get_mem_or_io); |
84 | |
85 | #ifdef CONFIG_HAS_IOMEM |
86 | /** |
87 | * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a |
88 | * platform device and get resource |
89 | * |
90 | * @pdev: platform device to use both for memory resource lookup as well as |
91 | * resource management |
92 | * @index: resource index |
93 | * @res: optional output parameter to store a pointer to the obtained resource. |
94 | * |
95 | * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code |
96 | * on failure. |
97 | */ |
98 | void __iomem * |
99 | devm_platform_get_and_ioremap_resource(struct platform_device *pdev, |
100 | unsigned int index, struct resource **res) |
101 | { |
102 | struct resource *r; |
103 | |
104 | r = platform_get_resource(pdev, IORESOURCE_MEM, index); |
105 | if (res) |
106 | *res = r; |
107 | return devm_ioremap_resource(dev: &pdev->dev, res: r); |
108 | } |
109 | EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource); |
110 | |
111 | /** |
112 | * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform |
113 | * device |
114 | * |
115 | * @pdev: platform device to use both for memory resource lookup as well as |
116 | * resource management |
117 | * @index: resource index |
118 | * |
119 | * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code |
120 | * on failure. |
121 | */ |
122 | void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, |
123 | unsigned int index) |
124 | { |
125 | return devm_platform_get_and_ioremap_resource(pdev, index, NULL); |
126 | } |
127 | EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); |
128 | |
129 | /** |
130 | * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for |
131 | * a platform device, retrieve the |
132 | * resource by name |
133 | * |
134 | * @pdev: platform device to use both for memory resource lookup as well as |
135 | * resource management |
136 | * @name: name of the resource |
137 | * |
138 | * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code |
139 | * on failure. |
140 | */ |
141 | void __iomem * |
142 | devm_platform_ioremap_resource_byname(struct platform_device *pdev, |
143 | const char *name) |
144 | { |
145 | struct resource *res; |
146 | |
147 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); |
148 | return devm_ioremap_resource(dev: &pdev->dev, res); |
149 | } |
150 | EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname); |
151 | #endif /* CONFIG_HAS_IOMEM */ |
152 | |
153 | /** |
154 | * platform_get_irq_optional - get an optional IRQ for a device |
155 | * @dev: platform device |
156 | * @num: IRQ number index |
157 | * |
158 | * Gets an IRQ for a platform device. Device drivers should check the return |
159 | * value for errors so as to not pass a negative integer value to the |
160 | * request_irq() APIs. This is the same as platform_get_irq(), except that it |
161 | * does not print an error message if an IRQ can not be obtained. |
162 | * |
163 | * For example:: |
164 | * |
165 | * int irq = platform_get_irq_optional(pdev, 0); |
166 | * if (irq < 0) |
167 | * return irq; |
168 | * |
169 | * Return: non-zero IRQ number on success, negative error number on failure. |
170 | */ |
171 | int platform_get_irq_optional(struct platform_device *dev, unsigned int num) |
172 | { |
173 | int ret; |
174 | #ifdef CONFIG_SPARC |
175 | /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ |
176 | if (!dev || num >= dev->archdata.num_irqs) |
177 | goto out_not_found; |
178 | ret = dev->archdata.irqs[num]; |
179 | goto out; |
180 | #else |
181 | struct fwnode_handle *fwnode = dev_fwnode(&dev->dev); |
182 | struct resource *r; |
183 | |
184 | if (is_of_node(fwnode)) { |
185 | ret = of_irq_get(to_of_node(fwnode), index: num); |
186 | if (ret > 0 || ret == -EPROBE_DEFER) |
187 | goto out; |
188 | } |
189 | |
190 | r = platform_get_resource(dev, IORESOURCE_IRQ, num); |
191 | if (is_acpi_device_node(fwnode)) { |
192 | if (r && r->flags & IORESOURCE_DISABLED) { |
193 | ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), index: num, res: r); |
194 | if (ret) |
195 | goto out; |
196 | } |
197 | } |
198 | |
199 | /* |
200 | * The resources may pass trigger flags to the irqs that need |
201 | * to be set up. It so happens that the trigger flags for |
202 | * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* |
203 | * settings. |
204 | */ |
205 | if (r && r->flags & IORESOURCE_BITS) { |
206 | struct irq_data *irqd; |
207 | |
208 | irqd = irq_get_irq_data(irq: r->start); |
209 | if (!irqd) |
210 | goto out_not_found; |
211 | irqd_set_trigger_type(d: irqd, type: r->flags & IORESOURCE_BITS); |
212 | } |
213 | |
214 | if (r) { |
215 | ret = r->start; |
216 | goto out; |
217 | } |
218 | |
219 | /* |
220 | * For the index 0 interrupt, allow falling back to GpioInt |
221 | * resources. While a device could have both Interrupt and GpioInt |
222 | * resources, making this fallback ambiguous, in many common cases |
223 | * the device will only expose one IRQ, and this fallback |
224 | * allows a common code path across either kind of resource. |
225 | */ |
226 | if (num == 0 && is_acpi_device_node(fwnode)) { |
227 | ret = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), index: num); |
228 | /* Our callers expect -ENXIO for missing IRQs. */ |
229 | if (ret >= 0 || ret == -EPROBE_DEFER) |
230 | goto out; |
231 | } |
232 | |
233 | #endif |
234 | out_not_found: |
235 | ret = -ENXIO; |
236 | out: |
237 | if (WARN(!ret, "0 is an invalid IRQ number\n" )) |
238 | return -EINVAL; |
239 | return ret; |
240 | } |
241 | EXPORT_SYMBOL_GPL(platform_get_irq_optional); |
242 | |
243 | /** |
244 | * platform_get_irq - get an IRQ for a device |
245 | * @dev: platform device |
246 | * @num: IRQ number index |
247 | * |
248 | * Gets an IRQ for a platform device and prints an error message if finding the |
249 | * IRQ fails. Device drivers should check the return value for errors so as to |
250 | * not pass a negative integer value to the request_irq() APIs. |
251 | * |
252 | * For example:: |
253 | * |
254 | * int irq = platform_get_irq(pdev, 0); |
255 | * if (irq < 0) |
256 | * return irq; |
257 | * |
258 | * Return: non-zero IRQ number on success, negative error number on failure. |
259 | */ |
260 | int platform_get_irq(struct platform_device *dev, unsigned int num) |
261 | { |
262 | int ret; |
263 | |
264 | ret = platform_get_irq_optional(dev, num); |
265 | if (ret < 0) |
266 | return dev_err_probe(dev: &dev->dev, err: ret, |
267 | fmt: "IRQ index %u not found\n" , num); |
268 | |
269 | return ret; |
270 | } |
271 | EXPORT_SYMBOL_GPL(platform_get_irq); |
272 | |
273 | /** |
274 | * platform_irq_count - Count the number of IRQs a platform device uses |
275 | * @dev: platform device |
276 | * |
277 | * Return: Number of IRQs a platform device uses or EPROBE_DEFER |
278 | */ |
279 | int platform_irq_count(struct platform_device *dev) |
280 | { |
281 | int ret, nr = 0; |
282 | |
283 | while ((ret = platform_get_irq_optional(dev, nr)) >= 0) |
284 | nr++; |
285 | |
286 | if (ret == -EPROBE_DEFER) |
287 | return ret; |
288 | |
289 | return nr; |
290 | } |
291 | EXPORT_SYMBOL_GPL(platform_irq_count); |
292 | |
293 | struct irq_affinity_devres { |
294 | unsigned int count; |
295 | unsigned int irq[] __counted_by(count); |
296 | }; |
297 | |
298 | static void platform_disable_acpi_irq(struct platform_device *pdev, int index) |
299 | { |
300 | struct resource *r; |
301 | |
302 | r = platform_get_resource(pdev, IORESOURCE_IRQ, index); |
303 | if (r) |
304 | irqresource_disabled(res: r, irq: 0); |
305 | } |
306 | |
307 | static void devm_platform_get_irqs_affinity_release(struct device *dev, |
308 | void *res) |
309 | { |
310 | struct irq_affinity_devres *ptr = res; |
311 | int i; |
312 | |
313 | for (i = 0; i < ptr->count; i++) { |
314 | irq_dispose_mapping(virq: ptr->irq[i]); |
315 | |
316 | if (is_acpi_device_node(dev_fwnode(dev))) |
317 | platform_disable_acpi_irq(to_platform_device(dev), index: i); |
318 | } |
319 | } |
320 | |
321 | /** |
322 | * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a |
323 | * device using an interrupt affinity descriptor |
324 | * @dev: platform device pointer |
325 | * @affd: affinity descriptor |
326 | * @minvec: minimum count of interrupt vectors |
327 | * @maxvec: maximum count of interrupt vectors |
328 | * @irqs: pointer holder for IRQ numbers |
329 | * |
330 | * Gets a set of IRQs for a platform device, and updates IRQ afffinty according |
331 | * to the passed affinity descriptor |
332 | * |
333 | * Return: Number of vectors on success, negative error number on failure. |
334 | */ |
335 | int devm_platform_get_irqs_affinity(struct platform_device *dev, |
336 | struct irq_affinity *affd, |
337 | unsigned int minvec, |
338 | unsigned int maxvec, |
339 | int **irqs) |
340 | { |
341 | struct irq_affinity_devres *ptr; |
342 | struct irq_affinity_desc *desc; |
343 | size_t size; |
344 | int i, ret, nvec; |
345 | |
346 | if (!affd) |
347 | return -EPERM; |
348 | |
349 | if (maxvec < minvec) |
350 | return -ERANGE; |
351 | |
352 | nvec = platform_irq_count(dev); |
353 | if (nvec < 0) |
354 | return nvec; |
355 | |
356 | if (nvec < minvec) |
357 | return -ENOSPC; |
358 | |
359 | nvec = irq_calc_affinity_vectors(minvec, maxvec: nvec, affd); |
360 | if (nvec < minvec) |
361 | return -ENOSPC; |
362 | |
363 | if (nvec > maxvec) |
364 | nvec = maxvec; |
365 | |
366 | size = sizeof(*ptr) + sizeof(unsigned int) * nvec; |
367 | ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size, |
368 | GFP_KERNEL); |
369 | if (!ptr) |
370 | return -ENOMEM; |
371 | |
372 | ptr->count = nvec; |
373 | |
374 | for (i = 0; i < nvec; i++) { |
375 | int irq = platform_get_irq(dev, i); |
376 | if (irq < 0) { |
377 | ret = irq; |
378 | goto err_free_devres; |
379 | } |
380 | ptr->irq[i] = irq; |
381 | } |
382 | |
383 | desc = irq_create_affinity_masks(nvec, affd); |
384 | if (!desc) { |
385 | ret = -ENOMEM; |
386 | goto err_free_devres; |
387 | } |
388 | |
389 | for (i = 0; i < nvec; i++) { |
390 | ret = irq_update_affinity_desc(irq: ptr->irq[i], affinity: &desc[i]); |
391 | if (ret) { |
392 | dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n" , |
393 | ptr->irq[i], ret); |
394 | goto err_free_desc; |
395 | } |
396 | } |
397 | |
398 | devres_add(dev: &dev->dev, res: ptr); |
399 | |
400 | kfree(objp: desc); |
401 | |
402 | *irqs = ptr->irq; |
403 | |
404 | return nvec; |
405 | |
406 | err_free_desc: |
407 | kfree(objp: desc); |
408 | err_free_devres: |
409 | devres_free(res: ptr); |
410 | return ret; |
411 | } |
412 | EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity); |
413 | |
414 | /** |
415 | * platform_get_resource_byname - get a resource for a device by name |
416 | * @dev: platform device |
417 | * @type: resource type |
418 | * @name: resource name |
419 | */ |
420 | struct resource *platform_get_resource_byname(struct platform_device *dev, |
421 | unsigned int type, |
422 | const char *name) |
423 | { |
424 | u32 i; |
425 | |
426 | for (i = 0; i < dev->num_resources; i++) { |
427 | struct resource *r = &dev->resource[i]; |
428 | |
429 | if (unlikely(!r->name)) |
430 | continue; |
431 | |
432 | if (type == resource_type(res: r) && !strcmp(r->name, name)) |
433 | return r; |
434 | } |
435 | return NULL; |
436 | } |
437 | EXPORT_SYMBOL_GPL(platform_get_resource_byname); |
438 | |
439 | static int __platform_get_irq_byname(struct platform_device *dev, |
440 | const char *name) |
441 | { |
442 | struct resource *r; |
443 | int ret; |
444 | |
445 | ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name); |
446 | if (ret > 0 || ret == -EPROBE_DEFER) |
447 | return ret; |
448 | |
449 | r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); |
450 | if (r) { |
451 | if (WARN(!r->start, "0 is an invalid IRQ number\n" )) |
452 | return -EINVAL; |
453 | return r->start; |
454 | } |
455 | |
456 | return -ENXIO; |
457 | } |
458 | |
459 | /** |
460 | * platform_get_irq_byname - get an IRQ for a device by name |
461 | * @dev: platform device |
462 | * @name: IRQ name |
463 | * |
464 | * Get an IRQ like platform_get_irq(), but then by name rather then by index. |
465 | * |
466 | * Return: non-zero IRQ number on success, negative error number on failure. |
467 | */ |
468 | int platform_get_irq_byname(struct platform_device *dev, const char *name) |
469 | { |
470 | int ret; |
471 | |
472 | ret = __platform_get_irq_byname(dev, name); |
473 | if (ret < 0) |
474 | return dev_err_probe(dev: &dev->dev, err: ret, fmt: "IRQ %s not found\n" , |
475 | name); |
476 | return ret; |
477 | } |
478 | EXPORT_SYMBOL_GPL(platform_get_irq_byname); |
479 | |
480 | /** |
481 | * platform_get_irq_byname_optional - get an optional IRQ for a device by name |
482 | * @dev: platform device |
483 | * @name: IRQ name |
484 | * |
485 | * Get an optional IRQ by name like platform_get_irq_byname(). Except that it |
486 | * does not print an error message if an IRQ can not be obtained. |
487 | * |
488 | * Return: non-zero IRQ number on success, negative error number on failure. |
489 | */ |
490 | int platform_get_irq_byname_optional(struct platform_device *dev, |
491 | const char *name) |
492 | { |
493 | return __platform_get_irq_byname(dev, name); |
494 | } |
495 | EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional); |
496 | |
497 | /** |
498 | * platform_add_devices - add a numbers of platform devices |
499 | * @devs: array of platform devices to add |
500 | * @num: number of platform devices in array |
501 | * |
502 | * Return: 0 on success, negative error number on failure. |
503 | */ |
504 | int platform_add_devices(struct platform_device **devs, int num) |
505 | { |
506 | int i, ret = 0; |
507 | |
508 | for (i = 0; i < num; i++) { |
509 | ret = platform_device_register(devs[i]); |
510 | if (ret) { |
511 | while (--i >= 0) |
512 | platform_device_unregister(devs[i]); |
513 | break; |
514 | } |
515 | } |
516 | |
517 | return ret; |
518 | } |
519 | EXPORT_SYMBOL_GPL(platform_add_devices); |
520 | |
521 | struct platform_object { |
522 | struct platform_device pdev; |
523 | char name[]; |
524 | }; |
525 | |
526 | /* |
527 | * Set up default DMA mask for platform devices if the they weren't |
528 | * previously set by the architecture / DT. |
529 | */ |
530 | static void setup_pdev_dma_masks(struct platform_device *pdev) |
531 | { |
532 | pdev->dev.dma_parms = &pdev->dma_parms; |
533 | |
534 | if (!pdev->dev.coherent_dma_mask) |
535 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); |
536 | if (!pdev->dev.dma_mask) { |
537 | pdev->platform_dma_mask = DMA_BIT_MASK(32); |
538 | pdev->dev.dma_mask = &pdev->platform_dma_mask; |
539 | } |
540 | }; |
541 | |
542 | /** |
543 | * platform_device_put - destroy a platform device |
544 | * @pdev: platform device to free |
545 | * |
546 | * Free all memory associated with a platform device. This function must |
547 | * _only_ be externally called in error cases. All other usage is a bug. |
548 | */ |
549 | void platform_device_put(struct platform_device *pdev) |
550 | { |
551 | if (!IS_ERR_OR_NULL(ptr: pdev)) |
552 | put_device(dev: &pdev->dev); |
553 | } |
554 | EXPORT_SYMBOL_GPL(platform_device_put); |
555 | |
556 | static void platform_device_release(struct device *dev) |
557 | { |
558 | struct platform_object *pa = container_of(dev, struct platform_object, |
559 | pdev.dev); |
560 | |
561 | of_node_put(node: pa->pdev.dev.of_node); |
562 | kfree(objp: pa->pdev.dev.platform_data); |
563 | kfree(objp: pa->pdev.mfd_cell); |
564 | kfree(objp: pa->pdev.resource); |
565 | kfree(objp: pa->pdev.driver_override); |
566 | kfree(objp: pa); |
567 | } |
568 | |
569 | /** |
570 | * platform_device_alloc - create a platform device |
571 | * @name: base name of the device we're adding |
572 | * @id: instance id |
573 | * |
574 | * Create a platform device object which can have other objects attached |
575 | * to it, and which will have attached objects freed when it is released. |
576 | */ |
577 | struct platform_device *platform_device_alloc(const char *name, int id) |
578 | { |
579 | struct platform_object *pa; |
580 | |
581 | pa = kzalloc(size: sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); |
582 | if (pa) { |
583 | strcpy(p: pa->name, q: name); |
584 | pa->pdev.name = pa->name; |
585 | pa->pdev.id = id; |
586 | device_initialize(dev: &pa->pdev.dev); |
587 | pa->pdev.dev.release = platform_device_release; |
588 | setup_pdev_dma_masks(&pa->pdev); |
589 | } |
590 | |
591 | return pa ? &pa->pdev : NULL; |
592 | } |
593 | EXPORT_SYMBOL_GPL(platform_device_alloc); |
594 | |
595 | /** |
596 | * platform_device_add_resources - add resources to a platform device |
597 | * @pdev: platform device allocated by platform_device_alloc to add resources to |
598 | * @res: set of resources that needs to be allocated for the device |
599 | * @num: number of resources |
600 | * |
601 | * Add a copy of the resources to the platform device. The memory |
602 | * associated with the resources will be freed when the platform device is |
603 | * released. |
604 | */ |
605 | int platform_device_add_resources(struct platform_device *pdev, |
606 | const struct resource *res, unsigned int num) |
607 | { |
608 | struct resource *r = NULL; |
609 | |
610 | if (res) { |
611 | r = kmemdup(p: res, size: sizeof(struct resource) * num, GFP_KERNEL); |
612 | if (!r) |
613 | return -ENOMEM; |
614 | } |
615 | |
616 | kfree(objp: pdev->resource); |
617 | pdev->resource = r; |
618 | pdev->num_resources = num; |
619 | return 0; |
620 | } |
621 | EXPORT_SYMBOL_GPL(platform_device_add_resources); |
622 | |
623 | /** |
624 | * platform_device_add_data - add platform-specific data to a platform device |
625 | * @pdev: platform device allocated by platform_device_alloc to add resources to |
626 | * @data: platform specific data for this platform device |
627 | * @size: size of platform specific data |
628 | * |
629 | * Add a copy of platform specific data to the platform device's |
630 | * platform_data pointer. The memory associated with the platform data |
631 | * will be freed when the platform device is released. |
632 | */ |
633 | int platform_device_add_data(struct platform_device *pdev, const void *data, |
634 | size_t size) |
635 | { |
636 | void *d = NULL; |
637 | |
638 | if (data) { |
639 | d = kmemdup(p: data, size, GFP_KERNEL); |
640 | if (!d) |
641 | return -ENOMEM; |
642 | } |
643 | |
644 | kfree(objp: pdev->dev.platform_data); |
645 | pdev->dev.platform_data = d; |
646 | return 0; |
647 | } |
648 | EXPORT_SYMBOL_GPL(platform_device_add_data); |
649 | |
650 | /** |
651 | * platform_device_add - add a platform device to device hierarchy |
652 | * @pdev: platform device we're adding |
653 | * |
654 | * This is part 2 of platform_device_register(), though may be called |
655 | * separately _iff_ pdev was allocated by platform_device_alloc(). |
656 | */ |
657 | int platform_device_add(struct platform_device *pdev) |
658 | { |
659 | struct device *dev = &pdev->dev; |
660 | u32 i; |
661 | int ret; |
662 | |
663 | if (!dev->parent) |
664 | dev->parent = &platform_bus; |
665 | |
666 | dev->bus = &platform_bus_type; |
667 | |
668 | switch (pdev->id) { |
669 | default: |
670 | dev_set_name(dev, name: "%s.%d" , pdev->name, pdev->id); |
671 | break; |
672 | case PLATFORM_DEVID_NONE: |
673 | dev_set_name(dev, name: "%s" , pdev->name); |
674 | break; |
675 | case PLATFORM_DEVID_AUTO: |
676 | /* |
677 | * Automatically allocated device ID. We mark it as such so |
678 | * that we remember it must be freed, and we append a suffix |
679 | * to avoid namespace collision with explicit IDs. |
680 | */ |
681 | ret = ida_alloc(ida: &platform_devid_ida, GFP_KERNEL); |
682 | if (ret < 0) |
683 | return ret; |
684 | pdev->id = ret; |
685 | pdev->id_auto = true; |
686 | dev_set_name(dev, name: "%s.%d.auto" , pdev->name, pdev->id); |
687 | break; |
688 | } |
689 | |
690 | for (i = 0; i < pdev->num_resources; i++) { |
691 | struct resource *p, *r = &pdev->resource[i]; |
692 | |
693 | if (r->name == NULL) |
694 | r->name = dev_name(dev); |
695 | |
696 | p = r->parent; |
697 | if (!p) { |
698 | if (resource_type(res: r) == IORESOURCE_MEM) |
699 | p = &iomem_resource; |
700 | else if (resource_type(res: r) == IORESOURCE_IO) |
701 | p = &ioport_resource; |
702 | } |
703 | |
704 | if (p) { |
705 | ret = insert_resource(parent: p, new: r); |
706 | if (ret) { |
707 | dev_err(dev, "failed to claim resource %d: %pR\n" , i, r); |
708 | goto failed; |
709 | } |
710 | } |
711 | } |
712 | |
713 | pr_debug("Registering platform device '%s'. Parent at %s\n" , dev_name(dev), |
714 | dev_name(dev->parent)); |
715 | |
716 | ret = device_add(dev); |
717 | if (ret) |
718 | goto failed; |
719 | |
720 | return 0; |
721 | |
722 | failed: |
723 | if (pdev->id_auto) { |
724 | ida_free(&platform_devid_ida, id: pdev->id); |
725 | pdev->id = PLATFORM_DEVID_AUTO; |
726 | } |
727 | |
728 | while (i--) { |
729 | struct resource *r = &pdev->resource[i]; |
730 | if (r->parent) |
731 | release_resource(new: r); |
732 | } |
733 | |
734 | return ret; |
735 | } |
736 | EXPORT_SYMBOL_GPL(platform_device_add); |
737 | |
738 | /** |
739 | * platform_device_del - remove a platform-level device |
740 | * @pdev: platform device we're removing |
741 | * |
742 | * Note that this function will also release all memory- and port-based |
743 | * resources owned by the device (@dev->resource). This function must |
744 | * _only_ be externally called in error cases. All other usage is a bug. |
745 | */ |
746 | void platform_device_del(struct platform_device *pdev) |
747 | { |
748 | u32 i; |
749 | |
750 | if (!IS_ERR_OR_NULL(ptr: pdev)) { |
751 | device_del(dev: &pdev->dev); |
752 | |
753 | if (pdev->id_auto) { |
754 | ida_free(&platform_devid_ida, id: pdev->id); |
755 | pdev->id = PLATFORM_DEVID_AUTO; |
756 | } |
757 | |
758 | for (i = 0; i < pdev->num_resources; i++) { |
759 | struct resource *r = &pdev->resource[i]; |
760 | if (r->parent) |
761 | release_resource(new: r); |
762 | } |
763 | } |
764 | } |
765 | EXPORT_SYMBOL_GPL(platform_device_del); |
766 | |
767 | /** |
768 | * platform_device_register - add a platform-level device |
769 | * @pdev: platform device we're adding |
770 | * |
771 | * NOTE: _Never_ directly free @pdev after calling this function, even if it |
772 | * returned an error! Always use platform_device_put() to give up the |
773 | * reference initialised in this function instead. |
774 | */ |
775 | int platform_device_register(struct platform_device *pdev) |
776 | { |
777 | device_initialize(dev: &pdev->dev); |
778 | setup_pdev_dma_masks(pdev); |
779 | return platform_device_add(pdev); |
780 | } |
781 | EXPORT_SYMBOL_GPL(platform_device_register); |
782 | |
783 | /** |
784 | * platform_device_unregister - unregister a platform-level device |
785 | * @pdev: platform device we're unregistering |
786 | * |
787 | * Unregistration is done in 2 steps. First we release all resources |
788 | * and remove it from the subsystem, then we drop reference count by |
789 | * calling platform_device_put(). |
790 | */ |
791 | void platform_device_unregister(struct platform_device *pdev) |
792 | { |
793 | platform_device_del(pdev); |
794 | platform_device_put(pdev); |
795 | } |
796 | EXPORT_SYMBOL_GPL(platform_device_unregister); |
797 | |
798 | /** |
799 | * platform_device_register_full - add a platform-level device with |
800 | * resources and platform-specific data |
801 | * |
802 | * @pdevinfo: data used to create device |
803 | * |
804 | * Returns &struct platform_device pointer on success, or ERR_PTR() on error. |
805 | */ |
806 | struct platform_device *platform_device_register_full( |
807 | const struct platform_device_info *pdevinfo) |
808 | { |
809 | int ret; |
810 | struct platform_device *pdev; |
811 | |
812 | pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); |
813 | if (!pdev) |
814 | return ERR_PTR(error: -ENOMEM); |
815 | |
816 | pdev->dev.parent = pdevinfo->parent; |
817 | pdev->dev.fwnode = pdevinfo->fwnode; |
818 | pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); |
819 | pdev->dev.of_node_reused = pdevinfo->of_node_reused; |
820 | |
821 | if (pdevinfo->dma_mask) { |
822 | pdev->platform_dma_mask = pdevinfo->dma_mask; |
823 | pdev->dev.dma_mask = &pdev->platform_dma_mask; |
824 | pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; |
825 | } |
826 | |
827 | ret = platform_device_add_resources(pdev, |
828 | pdevinfo->res, pdevinfo->num_res); |
829 | if (ret) |
830 | goto err; |
831 | |
832 | ret = platform_device_add_data(pdev, |
833 | pdevinfo->data, pdevinfo->size_data); |
834 | if (ret) |
835 | goto err; |
836 | |
837 | if (pdevinfo->properties) { |
838 | ret = device_create_managed_software_node(dev: &pdev->dev, |
839 | properties: pdevinfo->properties, NULL); |
840 | if (ret) |
841 | goto err; |
842 | } |
843 | |
844 | ret = platform_device_add(pdev); |
845 | if (ret) { |
846 | err: |
847 | ACPI_COMPANION_SET(&pdev->dev, NULL); |
848 | platform_device_put(pdev); |
849 | return ERR_PTR(error: ret); |
850 | } |
851 | |
852 | return pdev; |
853 | } |
854 | EXPORT_SYMBOL_GPL(platform_device_register_full); |
855 | |
856 | /** |
857 | * __platform_driver_register - register a driver for platform-level devices |
858 | * @drv: platform driver structure |
859 | * @owner: owning module/driver |
860 | */ |
861 | int __platform_driver_register(struct platform_driver *drv, |
862 | struct module *owner) |
863 | { |
864 | drv->driver.owner = owner; |
865 | drv->driver.bus = &platform_bus_type; |
866 | |
867 | return driver_register(drv: &drv->driver); |
868 | } |
869 | EXPORT_SYMBOL_GPL(__platform_driver_register); |
870 | |
871 | /** |
872 | * platform_driver_unregister - unregister a driver for platform-level devices |
873 | * @drv: platform driver structure |
874 | */ |
875 | void platform_driver_unregister(struct platform_driver *drv) |
876 | { |
877 | driver_unregister(drv: &drv->driver); |
878 | } |
879 | EXPORT_SYMBOL_GPL(platform_driver_unregister); |
880 | |
881 | static int platform_probe_fail(struct platform_device *pdev) |
882 | { |
883 | return -ENXIO; |
884 | } |
885 | |
886 | static int is_bound_to_driver(struct device *dev, void *driver) |
887 | { |
888 | if (dev->driver == driver) |
889 | return 1; |
890 | return 0; |
891 | } |
892 | |
893 | /** |
894 | * __platform_driver_probe - register driver for non-hotpluggable device |
895 | * @drv: platform driver structure |
896 | * @probe: the driver probe routine, probably from an __init section |
897 | * @module: module which will be the owner of the driver |
898 | * |
899 | * Use this instead of platform_driver_register() when you know the device |
900 | * is not hotpluggable and has already been registered, and you want to |
901 | * remove its run-once probe() infrastructure from memory after the driver |
902 | * has bound to the device. |
903 | * |
904 | * One typical use for this would be with drivers for controllers integrated |
905 | * into system-on-chip processors, where the controller devices have been |
906 | * configured as part of board setup. |
907 | * |
908 | * Note that this is incompatible with deferred probing. |
909 | * |
910 | * Returns zero if the driver registered and bound to a device, else returns |
911 | * a negative error code and with the driver not registered. |
912 | */ |
913 | int __init_or_module __platform_driver_probe(struct platform_driver *drv, |
914 | int (*probe)(struct platform_device *), struct module *module) |
915 | { |
916 | int retval; |
917 | |
918 | if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { |
919 | pr_err("%s: drivers registered with %s can not be probed asynchronously\n" , |
920 | drv->driver.name, __func__); |
921 | return -EINVAL; |
922 | } |
923 | |
924 | /* |
925 | * We have to run our probes synchronously because we check if |
926 | * we find any devices to bind to and exit with error if there |
927 | * are any. |
928 | */ |
929 | drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; |
930 | |
931 | /* |
932 | * Prevent driver from requesting probe deferral to avoid further |
933 | * futile probe attempts. |
934 | */ |
935 | drv->prevent_deferred_probe = true; |
936 | |
937 | /* make sure driver won't have bind/unbind attributes */ |
938 | drv->driver.suppress_bind_attrs = true; |
939 | |
940 | /* temporary section violation during probe() */ |
941 | drv->probe = probe; |
942 | retval = __platform_driver_register(drv, module); |
943 | if (retval) |
944 | return retval; |
945 | |
946 | /* Force all new probes of this driver to fail */ |
947 | drv->probe = platform_probe_fail; |
948 | |
949 | /* Walk all platform devices and see if any actually bound to this driver. |
950 | * If not, return an error as the device should have done so by now. |
951 | */ |
952 | if (!bus_for_each_dev(bus: &platform_bus_type, NULL, data: &drv->driver, fn: is_bound_to_driver)) { |
953 | retval = -ENODEV; |
954 | platform_driver_unregister(drv); |
955 | } |
956 | |
957 | return retval; |
958 | } |
959 | EXPORT_SYMBOL_GPL(__platform_driver_probe); |
960 | |
961 | /** |
962 | * __platform_create_bundle - register driver and create corresponding device |
963 | * @driver: platform driver structure |
964 | * @probe: the driver probe routine, probably from an __init section |
965 | * @res: set of resources that needs to be allocated for the device |
966 | * @n_res: number of resources |
967 | * @data: platform specific data for this platform device |
968 | * @size: size of platform specific data |
969 | * @module: module which will be the owner of the driver |
970 | * |
971 | * Use this in legacy-style modules that probe hardware directly and |
972 | * register a single platform device and corresponding platform driver. |
973 | * |
974 | * Returns &struct platform_device pointer on success, or ERR_PTR() on error. |
975 | */ |
976 | struct platform_device * __init_or_module __platform_create_bundle( |
977 | struct platform_driver *driver, |
978 | int (*probe)(struct platform_device *), |
979 | struct resource *res, unsigned int n_res, |
980 | const void *data, size_t size, struct module *module) |
981 | { |
982 | struct platform_device *pdev; |
983 | int error; |
984 | |
985 | pdev = platform_device_alloc(driver->driver.name, -1); |
986 | if (!pdev) { |
987 | error = -ENOMEM; |
988 | goto err_out; |
989 | } |
990 | |
991 | error = platform_device_add_resources(pdev, res, n_res); |
992 | if (error) |
993 | goto err_pdev_put; |
994 | |
995 | error = platform_device_add_data(pdev, data, size); |
996 | if (error) |
997 | goto err_pdev_put; |
998 | |
999 | error = platform_device_add(pdev); |
1000 | if (error) |
1001 | goto err_pdev_put; |
1002 | |
1003 | error = __platform_driver_probe(driver, probe, module); |
1004 | if (error) |
1005 | goto err_pdev_del; |
1006 | |
1007 | return pdev; |
1008 | |
1009 | err_pdev_del: |
1010 | platform_device_del(pdev); |
1011 | err_pdev_put: |
1012 | platform_device_put(pdev); |
1013 | err_out: |
1014 | return ERR_PTR(error); |
1015 | } |
1016 | EXPORT_SYMBOL_GPL(__platform_create_bundle); |
1017 | |
1018 | /** |
1019 | * __platform_register_drivers - register an array of platform drivers |
1020 | * @drivers: an array of drivers to register |
1021 | * @count: the number of drivers to register |
1022 | * @owner: module owning the drivers |
1023 | * |
1024 | * Registers platform drivers specified by an array. On failure to register a |
1025 | * driver, all previously registered drivers will be unregistered. Callers of |
1026 | * this API should use platform_unregister_drivers() to unregister drivers in |
1027 | * the reverse order. |
1028 | * |
1029 | * Returns: 0 on success or a negative error code on failure. |
1030 | */ |
1031 | int __platform_register_drivers(struct platform_driver * const *drivers, |
1032 | unsigned int count, struct module *owner) |
1033 | { |
1034 | unsigned int i; |
1035 | int err; |
1036 | |
1037 | for (i = 0; i < count; i++) { |
1038 | pr_debug("registering platform driver %ps\n" , drivers[i]); |
1039 | |
1040 | err = __platform_driver_register(drivers[i], owner); |
1041 | if (err < 0) { |
1042 | pr_err("failed to register platform driver %ps: %d\n" , |
1043 | drivers[i], err); |
1044 | goto error; |
1045 | } |
1046 | } |
1047 | |
1048 | return 0; |
1049 | |
1050 | error: |
1051 | while (i--) { |
1052 | pr_debug("unregistering platform driver %ps\n" , drivers[i]); |
1053 | platform_driver_unregister(drivers[i]); |
1054 | } |
1055 | |
1056 | return err; |
1057 | } |
1058 | EXPORT_SYMBOL_GPL(__platform_register_drivers); |
1059 | |
1060 | /** |
1061 | * platform_unregister_drivers - unregister an array of platform drivers |
1062 | * @drivers: an array of drivers to unregister |
1063 | * @count: the number of drivers to unregister |
1064 | * |
1065 | * Unregisters platform drivers specified by an array. This is typically used |
1066 | * to complement an earlier call to platform_register_drivers(). Drivers are |
1067 | * unregistered in the reverse order in which they were registered. |
1068 | */ |
1069 | void platform_unregister_drivers(struct platform_driver * const *drivers, |
1070 | unsigned int count) |
1071 | { |
1072 | while (count--) { |
1073 | pr_debug("unregistering platform driver %ps\n" , drivers[count]); |
1074 | platform_driver_unregister(drivers[count]); |
1075 | } |
1076 | } |
1077 | EXPORT_SYMBOL_GPL(platform_unregister_drivers); |
1078 | |
1079 | static const struct platform_device_id *platform_match_id( |
1080 | const struct platform_device_id *id, |
1081 | struct platform_device *pdev) |
1082 | { |
1083 | while (id->name[0]) { |
1084 | if (strcmp(pdev->name, id->name) == 0) { |
1085 | pdev->id_entry = id; |
1086 | return id; |
1087 | } |
1088 | id++; |
1089 | } |
1090 | return NULL; |
1091 | } |
1092 | |
1093 | #ifdef CONFIG_PM_SLEEP |
1094 | |
1095 | static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) |
1096 | { |
1097 | struct platform_driver *pdrv = to_platform_driver(dev->driver); |
1098 | struct platform_device *pdev = to_platform_device(dev); |
1099 | int ret = 0; |
1100 | |
1101 | if (dev->driver && pdrv->suspend) |
1102 | ret = pdrv->suspend(pdev, mesg); |
1103 | |
1104 | return ret; |
1105 | } |
1106 | |
1107 | static int platform_legacy_resume(struct device *dev) |
1108 | { |
1109 | struct platform_driver *pdrv = to_platform_driver(dev->driver); |
1110 | struct platform_device *pdev = to_platform_device(dev); |
1111 | int ret = 0; |
1112 | |
1113 | if (dev->driver && pdrv->resume) |
1114 | ret = pdrv->resume(pdev); |
1115 | |
1116 | return ret; |
1117 | } |
1118 | |
1119 | #endif /* CONFIG_PM_SLEEP */ |
1120 | |
1121 | #ifdef CONFIG_SUSPEND |
1122 | |
1123 | int platform_pm_suspend(struct device *dev) |
1124 | { |
1125 | struct device_driver *drv = dev->driver; |
1126 | int ret = 0; |
1127 | |
1128 | if (!drv) |
1129 | return 0; |
1130 | |
1131 | if (drv->pm) { |
1132 | if (drv->pm->suspend) |
1133 | ret = drv->pm->suspend(dev); |
1134 | } else { |
1135 | ret = platform_legacy_suspend(dev, PMSG_SUSPEND); |
1136 | } |
1137 | |
1138 | return ret; |
1139 | } |
1140 | |
1141 | int platform_pm_resume(struct device *dev) |
1142 | { |
1143 | struct device_driver *drv = dev->driver; |
1144 | int ret = 0; |
1145 | |
1146 | if (!drv) |
1147 | return 0; |
1148 | |
1149 | if (drv->pm) { |
1150 | if (drv->pm->resume) |
1151 | ret = drv->pm->resume(dev); |
1152 | } else { |
1153 | ret = platform_legacy_resume(dev); |
1154 | } |
1155 | |
1156 | return ret; |
1157 | } |
1158 | |
1159 | #endif /* CONFIG_SUSPEND */ |
1160 | |
1161 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
1162 | |
1163 | int platform_pm_freeze(struct device *dev) |
1164 | { |
1165 | struct device_driver *drv = dev->driver; |
1166 | int ret = 0; |
1167 | |
1168 | if (!drv) |
1169 | return 0; |
1170 | |
1171 | if (drv->pm) { |
1172 | if (drv->pm->freeze) |
1173 | ret = drv->pm->freeze(dev); |
1174 | } else { |
1175 | ret = platform_legacy_suspend(dev, PMSG_FREEZE); |
1176 | } |
1177 | |
1178 | return ret; |
1179 | } |
1180 | |
1181 | int platform_pm_thaw(struct device *dev) |
1182 | { |
1183 | struct device_driver *drv = dev->driver; |
1184 | int ret = 0; |
1185 | |
1186 | if (!drv) |
1187 | return 0; |
1188 | |
1189 | if (drv->pm) { |
1190 | if (drv->pm->thaw) |
1191 | ret = drv->pm->thaw(dev); |
1192 | } else { |
1193 | ret = platform_legacy_resume(dev); |
1194 | } |
1195 | |
1196 | return ret; |
1197 | } |
1198 | |
1199 | int platform_pm_poweroff(struct device *dev) |
1200 | { |
1201 | struct device_driver *drv = dev->driver; |
1202 | int ret = 0; |
1203 | |
1204 | if (!drv) |
1205 | return 0; |
1206 | |
1207 | if (drv->pm) { |
1208 | if (drv->pm->poweroff) |
1209 | ret = drv->pm->poweroff(dev); |
1210 | } else { |
1211 | ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); |
1212 | } |
1213 | |
1214 | return ret; |
1215 | } |
1216 | |
1217 | int platform_pm_restore(struct device *dev) |
1218 | { |
1219 | struct device_driver *drv = dev->driver; |
1220 | int ret = 0; |
1221 | |
1222 | if (!drv) |
1223 | return 0; |
1224 | |
1225 | if (drv->pm) { |
1226 | if (drv->pm->restore) |
1227 | ret = drv->pm->restore(dev); |
1228 | } else { |
1229 | ret = platform_legacy_resume(dev); |
1230 | } |
1231 | |
1232 | return ret; |
1233 | } |
1234 | |
1235 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
1236 | |
1237 | /* modalias support enables more hands-off userspace setup: |
1238 | * (a) environment variable lets new-style hotplug events work once system is |
1239 | * fully running: "modprobe $MODALIAS" |
1240 | * (b) sysfs attribute lets new-style coldplug recover from hotplug events |
1241 | * mishandled before system is fully running: "modprobe $(cat modalias)" |
1242 | */ |
1243 | static ssize_t modalias_show(struct device *dev, |
1244 | struct device_attribute *attr, char *buf) |
1245 | { |
1246 | struct platform_device *pdev = to_platform_device(dev); |
1247 | int len; |
1248 | |
1249 | len = of_device_modalias(dev, str: buf, PAGE_SIZE); |
1250 | if (len != -ENODEV) |
1251 | return len; |
1252 | |
1253 | len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); |
1254 | if (len != -ENODEV) |
1255 | return len; |
1256 | |
1257 | return sysfs_emit(buf, fmt: "platform:%s\n" , pdev->name); |
1258 | } |
1259 | static DEVICE_ATTR_RO(modalias); |
1260 | |
1261 | static ssize_t numa_node_show(struct device *dev, |
1262 | struct device_attribute *attr, char *buf) |
1263 | { |
1264 | return sysfs_emit(buf, fmt: "%d\n" , dev_to_node(dev)); |
1265 | } |
1266 | static DEVICE_ATTR_RO(numa_node); |
1267 | |
1268 | static ssize_t driver_override_show(struct device *dev, |
1269 | struct device_attribute *attr, char *buf) |
1270 | { |
1271 | struct platform_device *pdev = to_platform_device(dev); |
1272 | ssize_t len; |
1273 | |
1274 | device_lock(dev); |
1275 | len = sysfs_emit(buf, fmt: "%s\n" , pdev->driver_override); |
1276 | device_unlock(dev); |
1277 | |
1278 | return len; |
1279 | } |
1280 | |
1281 | static ssize_t driver_override_store(struct device *dev, |
1282 | struct device_attribute *attr, |
1283 | const char *buf, size_t count) |
1284 | { |
1285 | struct platform_device *pdev = to_platform_device(dev); |
1286 | int ret; |
1287 | |
1288 | ret = driver_set_override(dev, override: &pdev->driver_override, s: buf, len: count); |
1289 | if (ret) |
1290 | return ret; |
1291 | |
1292 | return count; |
1293 | } |
1294 | static DEVICE_ATTR_RW(driver_override); |
1295 | |
1296 | static struct attribute *platform_dev_attrs[] = { |
1297 | &dev_attr_modalias.attr, |
1298 | &dev_attr_numa_node.attr, |
1299 | &dev_attr_driver_override.attr, |
1300 | NULL, |
1301 | }; |
1302 | |
1303 | static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, |
1304 | int n) |
1305 | { |
1306 | struct device *dev = container_of(kobj, typeof(*dev), kobj); |
1307 | |
1308 | if (a == &dev_attr_numa_node.attr && |
1309 | dev_to_node(dev) == NUMA_NO_NODE) |
1310 | return 0; |
1311 | |
1312 | return a->mode; |
1313 | } |
1314 | |
1315 | static const struct attribute_group platform_dev_group = { |
1316 | .attrs = platform_dev_attrs, |
1317 | .is_visible = platform_dev_attrs_visible, |
1318 | }; |
1319 | __ATTRIBUTE_GROUPS(platform_dev); |
1320 | |
1321 | |
1322 | /** |
1323 | * platform_match - bind platform device to platform driver. |
1324 | * @dev: device. |
1325 | * @drv: driver. |
1326 | * |
1327 | * Platform device IDs are assumed to be encoded like this: |
1328 | * "<name><instance>", where <name> is a short description of the type of |
1329 | * device, like "pci" or "floppy", and <instance> is the enumerated |
1330 | * instance of the device, like '0' or '42'. Driver IDs are simply |
1331 | * "<name>". So, extract the <name> from the platform_device structure, |
1332 | * and compare it against the name of the driver. Return whether they match |
1333 | * or not. |
1334 | */ |
1335 | static int platform_match(struct device *dev, struct device_driver *drv) |
1336 | { |
1337 | struct platform_device *pdev = to_platform_device(dev); |
1338 | struct platform_driver *pdrv = to_platform_driver(drv); |
1339 | |
1340 | /* When driver_override is set, only bind to the matching driver */ |
1341 | if (pdev->driver_override) |
1342 | return !strcmp(pdev->driver_override, drv->name); |
1343 | |
1344 | /* Attempt an OF style match first */ |
1345 | if (of_driver_match_device(dev, drv)) |
1346 | return 1; |
1347 | |
1348 | /* Then try ACPI style match */ |
1349 | if (acpi_driver_match_device(dev, drv)) |
1350 | return 1; |
1351 | |
1352 | /* Then try to match against the id table */ |
1353 | if (pdrv->id_table) |
1354 | return platform_match_id(id: pdrv->id_table, pdev) != NULL; |
1355 | |
1356 | /* fall-back to driver name match */ |
1357 | return (strcmp(pdev->name, drv->name) == 0); |
1358 | } |
1359 | |
1360 | static int platform_uevent(const struct device *dev, struct kobj_uevent_env *env) |
1361 | { |
1362 | const struct platform_device *pdev = to_platform_device(dev); |
1363 | int rc; |
1364 | |
1365 | /* Some devices have extra OF data and an OF-style MODALIAS */ |
1366 | rc = of_device_uevent_modalias(dev, env); |
1367 | if (rc != -ENODEV) |
1368 | return rc; |
1369 | |
1370 | rc = acpi_device_uevent_modalias(dev, env); |
1371 | if (rc != -ENODEV) |
1372 | return rc; |
1373 | |
1374 | add_uevent_var(env, format: "MODALIAS=%s%s" , PLATFORM_MODULE_PREFIX, |
1375 | pdev->name); |
1376 | return 0; |
1377 | } |
1378 | |
1379 | static int platform_probe(struct device *_dev) |
1380 | { |
1381 | struct platform_driver *drv = to_platform_driver(_dev->driver); |
1382 | struct platform_device *dev = to_platform_device(_dev); |
1383 | int ret; |
1384 | |
1385 | /* |
1386 | * A driver registered using platform_driver_probe() cannot be bound |
1387 | * again later because the probe function usually lives in __init code |
1388 | * and so is gone. For these drivers .probe is set to |
1389 | * platform_probe_fail in __platform_driver_probe(). Don't even prepare |
1390 | * clocks and PM domains for these to match the traditional behaviour. |
1391 | */ |
1392 | if (unlikely(drv->probe == platform_probe_fail)) |
1393 | return -ENXIO; |
1394 | |
1395 | ret = of_clk_set_defaults(node: _dev->of_node, clk_supplier: false); |
1396 | if (ret < 0) |
1397 | return ret; |
1398 | |
1399 | ret = dev_pm_domain_attach(dev: _dev, power_on: true); |
1400 | if (ret) |
1401 | goto out; |
1402 | |
1403 | if (drv->probe) { |
1404 | ret = drv->probe(dev); |
1405 | if (ret) |
1406 | dev_pm_domain_detach(dev: _dev, power_off: true); |
1407 | } |
1408 | |
1409 | out: |
1410 | if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { |
1411 | dev_warn(_dev, "probe deferral not supported\n" ); |
1412 | ret = -ENXIO; |
1413 | } |
1414 | |
1415 | return ret; |
1416 | } |
1417 | |
1418 | static void platform_remove(struct device *_dev) |
1419 | { |
1420 | struct platform_driver *drv = to_platform_driver(_dev->driver); |
1421 | struct platform_device *dev = to_platform_device(_dev); |
1422 | |
1423 | if (drv->remove_new) { |
1424 | drv->remove_new(dev); |
1425 | } else if (drv->remove) { |
1426 | int ret = drv->remove(dev); |
1427 | |
1428 | if (ret) |
1429 | dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n" ); |
1430 | } |
1431 | dev_pm_domain_detach(dev: _dev, power_off: true); |
1432 | } |
1433 | |
1434 | static void platform_shutdown(struct device *_dev) |
1435 | { |
1436 | struct platform_device *dev = to_platform_device(_dev); |
1437 | struct platform_driver *drv; |
1438 | |
1439 | if (!_dev->driver) |
1440 | return; |
1441 | |
1442 | drv = to_platform_driver(_dev->driver); |
1443 | if (drv->shutdown) |
1444 | drv->shutdown(dev); |
1445 | } |
1446 | |
1447 | static int platform_dma_configure(struct device *dev) |
1448 | { |
1449 | struct platform_driver *drv = to_platform_driver(dev->driver); |
1450 | struct fwnode_handle *fwnode = dev_fwnode(dev); |
1451 | enum dev_dma_attr attr; |
1452 | int ret = 0; |
1453 | |
1454 | if (is_of_node(fwnode)) { |
1455 | ret = of_dma_configure(dev, to_of_node(fwnode), force_dma: true); |
1456 | } else if (is_acpi_device_node(fwnode)) { |
1457 | attr = acpi_get_dma_attr(to_acpi_device_node(fwnode)); |
1458 | ret = acpi_dma_configure(dev, attr); |
1459 | } |
1460 | if (ret || drv->driver_managed_dma) |
1461 | return ret; |
1462 | |
1463 | ret = iommu_device_use_default_domain(dev); |
1464 | if (ret) |
1465 | arch_teardown_dma_ops(dev); |
1466 | |
1467 | return ret; |
1468 | } |
1469 | |
1470 | static void platform_dma_cleanup(struct device *dev) |
1471 | { |
1472 | struct platform_driver *drv = to_platform_driver(dev->driver); |
1473 | |
1474 | if (!drv->driver_managed_dma) |
1475 | iommu_device_unuse_default_domain(dev); |
1476 | } |
1477 | |
1478 | static const struct dev_pm_ops platform_dev_pm_ops = { |
1479 | SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) |
1480 | USE_PLATFORM_PM_SLEEP_OPS |
1481 | }; |
1482 | |
1483 | struct bus_type platform_bus_type = { |
1484 | .name = "platform" , |
1485 | .dev_groups = platform_dev_groups, |
1486 | .match = platform_match, |
1487 | .uevent = platform_uevent, |
1488 | .probe = platform_probe, |
1489 | .remove = platform_remove, |
1490 | .shutdown = platform_shutdown, |
1491 | .dma_configure = platform_dma_configure, |
1492 | .dma_cleanup = platform_dma_cleanup, |
1493 | .pm = &platform_dev_pm_ops, |
1494 | }; |
1495 | EXPORT_SYMBOL_GPL(platform_bus_type); |
1496 | |
1497 | static inline int __platform_match(struct device *dev, const void *drv) |
1498 | { |
1499 | return platform_match(dev, drv: (struct device_driver *)drv); |
1500 | } |
1501 | |
1502 | /** |
1503 | * platform_find_device_by_driver - Find a platform device with a given |
1504 | * driver. |
1505 | * @start: The device to start the search from. |
1506 | * @drv: The device driver to look for. |
1507 | */ |
1508 | struct device *platform_find_device_by_driver(struct device *start, |
1509 | const struct device_driver *drv) |
1510 | { |
1511 | return bus_find_device(bus: &platform_bus_type, start, data: drv, |
1512 | match: __platform_match); |
1513 | } |
1514 | EXPORT_SYMBOL_GPL(platform_find_device_by_driver); |
1515 | |
1516 | void __weak __init early_platform_cleanup(void) { } |
1517 | |
1518 | int __init platform_bus_init(void) |
1519 | { |
1520 | int error; |
1521 | |
1522 | early_platform_cleanup(); |
1523 | |
1524 | error = device_register(dev: &platform_bus); |
1525 | if (error) { |
1526 | put_device(dev: &platform_bus); |
1527 | return error; |
1528 | } |
1529 | error = bus_register(bus: &platform_bus_type); |
1530 | if (error) |
1531 | device_unregister(dev: &platform_bus); |
1532 | |
1533 | return error; |
1534 | } |
1535 | |