1
2
3
4
5
6
7
8
9
10
11
12#include <linux/string.h>
13#include <linux/platform_device.h>
14#include <linux/of_device.h>
15#include <linux/of_irq.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/dma-mapping.h>
19#include <linux/memblock.h>
20#include <linux/err.h>
21#include <linux/slab.h>
22#include <linux/pm_runtime.h>
23#include <linux/pm_domain.h>
24#include <linux/idr.h>
25#include <linux/acpi.h>
26#include <linux/clk/clk-conf.h>
27#include <linux/limits.h>
28#include <linux/property.h>
29#include <linux/kmemleak.h>
30
31#include "base.h"
32#include "power/power.h"
33
34
35static DEFINE_IDA(platform_devid_ida);
36
37struct device platform_bus = {
38 .init_name = "platform",
39};
40EXPORT_SYMBOL_GPL(platform_bus);
41
42
43
44
45
46
47
48struct resource *platform_get_resource(struct platform_device *dev,
49 unsigned int type, unsigned int num)
50{
51 int i;
52
53 for (i = 0; i < dev->num_resources; i++) {
54 struct resource *r = &dev->resource[i];
55
56 if (type == resource_type(r) && num-- == 0)
57 return r;
58 }
59 return NULL;
60}
61EXPORT_SYMBOL_GPL(platform_get_resource);
62
63#ifdef CONFIG_HAS_IOMEM
64
65
66
67
68
69
70
71
72void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
73 unsigned int index)
74{
75 struct resource *res;
76
77 res = platform_get_resource(pdev, IORESOURCE_MEM, index);
78 return devm_ioremap_resource(&pdev->dev, res);
79}
80EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
81
82
83
84
85
86
87
88
89
90void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev,
91 unsigned int index)
92{
93 struct resource *res;
94
95 res = platform_get_resource(pdev, IORESOURCE_MEM, index);
96 return devm_ioremap_resource_wc(&pdev->dev, res);
97}
98
99
100
101
102
103
104
105
106
107
108void __iomem *
109devm_platform_ioremap_resource_byname(struct platform_device *pdev,
110 const char *name)
111{
112 struct resource *res;
113
114 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
115 return devm_ioremap_resource(&pdev->dev, res);
116}
117EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
118#endif
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
138{
139#ifdef CONFIG_SPARC
140
141 if (!dev || num >= dev->archdata.num_irqs)
142 return -ENXIO;
143 return dev->archdata.irqs[num];
144#else
145 struct resource *r;
146 int ret;
147
148 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
149 ret = of_irq_get(dev->dev.of_node, num);
150 if (ret > 0 || ret == -EPROBE_DEFER)
151 return ret;
152 }
153
154 r = platform_get_resource(dev, IORESOURCE_IRQ, num);
155 if (has_acpi_companion(&dev->dev)) {
156 if (r && r->flags & IORESOURCE_DISABLED) {
157 ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
158 if (ret)
159 return ret;
160 }
161 }
162
163
164
165
166
167
168
169 if (r && r->flags & IORESOURCE_BITS) {
170 struct irq_data *irqd;
171
172 irqd = irq_get_irq_data(r->start);
173 if (!irqd)
174 return -ENXIO;
175 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
176 }
177
178 if (r)
179 return r->start;
180
181
182
183
184
185
186
187
188 if (num == 0 && has_acpi_companion(&dev->dev)) {
189 ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
190
191 if (ret >= 0 || ret == -EPROBE_DEFER)
192 return ret;
193 }
194
195 return -ENXIO;
196#endif
197}
198EXPORT_SYMBOL_GPL(platform_get_irq_optional);
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216int platform_get_irq(struct platform_device *dev, unsigned int num)
217{
218 int ret;
219
220 ret = platform_get_irq_optional(dev, num);
221 if (ret < 0 && ret != -EPROBE_DEFER)
222 dev_err(&dev->dev, "IRQ index %u not found\n", num);
223
224 return ret;
225}
226EXPORT_SYMBOL_GPL(platform_get_irq);
227
228
229
230
231
232
233
234int platform_irq_count(struct platform_device *dev)
235{
236 int ret, nr = 0;
237
238 while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
239 nr++;
240
241 if (ret == -EPROBE_DEFER)
242 return ret;
243
244 return nr;
245}
246EXPORT_SYMBOL_GPL(platform_irq_count);
247
248
249
250
251
252
253
254struct resource *platform_get_resource_byname(struct platform_device *dev,
255 unsigned int type,
256 const char *name)
257{
258 int i;
259
260 for (i = 0; i < dev->num_resources; i++) {
261 struct resource *r = &dev->resource[i];
262
263 if (unlikely(!r->name))
264 continue;
265
266 if (type == resource_type(r) && !strcmp(r->name, name))
267 return r;
268 }
269 return NULL;
270}
271EXPORT_SYMBOL_GPL(platform_get_resource_byname);
272
273static int __platform_get_irq_byname(struct platform_device *dev,
274 const char *name)
275{
276 struct resource *r;
277 int ret;
278
279 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
280 ret = of_irq_get_byname(dev->dev.of_node, name);
281 if (ret > 0 || ret == -EPROBE_DEFER)
282 return ret;
283 }
284
285 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
286 if (r)
287 return r->start;
288
289 return -ENXIO;
290}
291
292
293
294
295
296
297
298
299
300
301int platform_get_irq_byname(struct platform_device *dev, const char *name)
302{
303 int ret;
304
305 ret = __platform_get_irq_byname(dev, name);
306 if (ret < 0 && ret != -EPROBE_DEFER)
307 dev_err(&dev->dev, "IRQ %s not found\n", name);
308
309 return ret;
310}
311EXPORT_SYMBOL_GPL(platform_get_irq_byname);
312
313
314
315
316
317
318
319
320
321
322
323int platform_get_irq_byname_optional(struct platform_device *dev,
324 const char *name)
325{
326 return __platform_get_irq_byname(dev, name);
327}
328EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
329
330
331
332
333
334
335int platform_add_devices(struct platform_device **devs, int num)
336{
337 int i, ret = 0;
338
339 for (i = 0; i < num; i++) {
340 ret = platform_device_register(devs[i]);
341 if (ret) {
342 while (--i >= 0)
343 platform_device_unregister(devs[i]);
344 break;
345 }
346 }
347
348 return ret;
349}
350EXPORT_SYMBOL_GPL(platform_add_devices);
351
352struct platform_object {
353 struct platform_device pdev;
354 char name[];
355};
356
357
358
359
360
361static void setup_pdev_dma_masks(struct platform_device *pdev)
362{
363 if (!pdev->dev.coherent_dma_mask)
364 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
365 if (!pdev->dma_mask)
366 pdev->dma_mask = DMA_BIT_MASK(32);
367 if (!pdev->dev.dma_mask)
368 pdev->dev.dma_mask = &pdev->dma_mask;
369};
370
371
372
373
374
375
376
377
378void platform_device_put(struct platform_device *pdev)
379{
380 if (!IS_ERR_OR_NULL(pdev))
381 put_device(&pdev->dev);
382}
383EXPORT_SYMBOL_GPL(platform_device_put);
384
385static void platform_device_release(struct device *dev)
386{
387 struct platform_object *pa = container_of(dev, struct platform_object,
388 pdev.dev);
389
390 of_device_node_put(&pa->pdev.dev);
391 kfree(pa->pdev.dev.platform_data);
392 kfree(pa->pdev.mfd_cell);
393 kfree(pa->pdev.resource);
394 kfree(pa->pdev.driver_override);
395 kfree(pa);
396}
397
398
399
400
401
402
403
404
405
406struct platform_device *platform_device_alloc(const char *name, int id)
407{
408 struct platform_object *pa;
409
410 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
411 if (pa) {
412 strcpy(pa->name, name);
413 pa->pdev.name = pa->name;
414 pa->pdev.id = id;
415 device_initialize(&pa->pdev.dev);
416 pa->pdev.dev.release = platform_device_release;
417 setup_pdev_dma_masks(&pa->pdev);
418 }
419
420 return pa ? &pa->pdev : NULL;
421}
422EXPORT_SYMBOL_GPL(platform_device_alloc);
423
424
425
426
427
428
429
430
431
432
433
434int platform_device_add_resources(struct platform_device *pdev,
435 const struct resource *res, unsigned int num)
436{
437 struct resource *r = NULL;
438
439 if (res) {
440 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
441 if (!r)
442 return -ENOMEM;
443 }
444
445 kfree(pdev->resource);
446 pdev->resource = r;
447 pdev->num_resources = num;
448 return 0;
449}
450EXPORT_SYMBOL_GPL(platform_device_add_resources);
451
452
453
454
455
456
457
458
459
460
461
462int platform_device_add_data(struct platform_device *pdev, const void *data,
463 size_t size)
464{
465 void *d = NULL;
466
467 if (data) {
468 d = kmemdup(data, size, GFP_KERNEL);
469 if (!d)
470 return -ENOMEM;
471 }
472
473 kfree(pdev->dev.platform_data);
474 pdev->dev.platform_data = d;
475 return 0;
476}
477EXPORT_SYMBOL_GPL(platform_device_add_data);
478
479
480
481
482
483
484
485
486
487
488int platform_device_add_properties(struct platform_device *pdev,
489 const struct property_entry *properties)
490{
491 return device_add_properties(&pdev->dev, properties);
492}
493EXPORT_SYMBOL_GPL(platform_device_add_properties);
494
495
496
497
498
499
500
501
502int platform_device_add(struct platform_device *pdev)
503{
504 int i, ret;
505
506 if (!pdev)
507 return -EINVAL;
508
509 if (!pdev->dev.parent)
510 pdev->dev.parent = &platform_bus;
511
512 pdev->dev.bus = &platform_bus_type;
513
514 switch (pdev->id) {
515 default:
516 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
517 break;
518 case PLATFORM_DEVID_NONE:
519 dev_set_name(&pdev->dev, "%s", pdev->name);
520 break;
521 case PLATFORM_DEVID_AUTO:
522
523
524
525
526
527 ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
528 if (ret < 0)
529 goto err_out;
530 pdev->id = ret;
531 pdev->id_auto = true;
532 dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
533 break;
534 }
535
536 for (i = 0; i < pdev->num_resources; i++) {
537 struct resource *p, *r = &pdev->resource[i];
538
539 if (r->name == NULL)
540 r->name = dev_name(&pdev->dev);
541
542 p = r->parent;
543 if (!p) {
544 if (resource_type(r) == IORESOURCE_MEM)
545 p = &iomem_resource;
546 else if (resource_type(r) == IORESOURCE_IO)
547 p = &ioport_resource;
548 }
549
550 if (p) {
551 ret = insert_resource(p, r);
552 if (ret) {
553 dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
554 goto failed;
555 }
556 }
557 }
558
559 pr_debug("Registering platform device '%s'. Parent at %s\n",
560 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
561
562 ret = device_add(&pdev->dev);
563 if (ret == 0)
564 return ret;
565
566 failed:
567 if (pdev->id_auto) {
568 ida_simple_remove(&platform_devid_ida, pdev->id);
569 pdev->id = PLATFORM_DEVID_AUTO;
570 }
571
572 while (--i >= 0) {
573 struct resource *r = &pdev->resource[i];
574 if (r->parent)
575 release_resource(r);
576 }
577
578 err_out:
579 return ret;
580}
581EXPORT_SYMBOL_GPL(platform_device_add);
582
583
584
585
586
587
588
589
590
591void platform_device_del(struct platform_device *pdev)
592{
593 int i;
594
595 if (!IS_ERR_OR_NULL(pdev)) {
596 device_del(&pdev->dev);
597
598 if (pdev->id_auto) {
599 ida_simple_remove(&platform_devid_ida, pdev->id);
600 pdev->id = PLATFORM_DEVID_AUTO;
601 }
602
603 for (i = 0; i < pdev->num_resources; i++) {
604 struct resource *r = &pdev->resource[i];
605 if (r->parent)
606 release_resource(r);
607 }
608 }
609}
610EXPORT_SYMBOL_GPL(platform_device_del);
611
612
613
614
615
616int platform_device_register(struct platform_device *pdev)
617{
618 device_initialize(&pdev->dev);
619 setup_pdev_dma_masks(pdev);
620 return platform_device_add(pdev);
621}
622EXPORT_SYMBOL_GPL(platform_device_register);
623
624
625
626
627
628
629
630
631
632void platform_device_unregister(struct platform_device *pdev)
633{
634 platform_device_del(pdev);
635 platform_device_put(pdev);
636}
637EXPORT_SYMBOL_GPL(platform_device_unregister);
638
639
640
641
642
643
644
645
646
647struct platform_device *platform_device_register_full(
648 const struct platform_device_info *pdevinfo)
649{
650 int ret = -ENOMEM;
651 struct platform_device *pdev;
652
653 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
654 if (!pdev)
655 return ERR_PTR(-ENOMEM);
656
657 pdev->dev.parent = pdevinfo->parent;
658 pdev->dev.fwnode = pdevinfo->fwnode;
659 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
660 pdev->dev.of_node_reused = pdevinfo->of_node_reused;
661
662 if (pdevinfo->dma_mask) {
663
664
665
666
667
668
669 pdev->dev.dma_mask =
670 kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
671 if (!pdev->dev.dma_mask)
672 goto err;
673
674 kmemleak_ignore(pdev->dev.dma_mask);
675
676 *pdev->dev.dma_mask = pdevinfo->dma_mask;
677 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
678 }
679
680 ret = platform_device_add_resources(pdev,
681 pdevinfo->res, pdevinfo->num_res);
682 if (ret)
683 goto err;
684
685 ret = platform_device_add_data(pdev,
686 pdevinfo->data, pdevinfo->size_data);
687 if (ret)
688 goto err;
689
690 if (pdevinfo->properties) {
691 ret = platform_device_add_properties(pdev,
692 pdevinfo->properties);
693 if (ret)
694 goto err;
695 }
696
697 ret = platform_device_add(pdev);
698 if (ret) {
699err:
700 ACPI_COMPANION_SET(&pdev->dev, NULL);
701 kfree(pdev->dev.dma_mask);
702 platform_device_put(pdev);
703 return ERR_PTR(ret);
704 }
705
706 return pdev;
707}
708EXPORT_SYMBOL_GPL(platform_device_register_full);
709
710static int platform_drv_probe(struct device *_dev)
711{
712 struct platform_driver *drv = to_platform_driver(_dev->driver);
713 struct platform_device *dev = to_platform_device(_dev);
714 int ret;
715
716 ret = of_clk_set_defaults(_dev->of_node, false);
717 if (ret < 0)
718 return ret;
719
720 ret = dev_pm_domain_attach(_dev, true);
721 if (ret)
722 goto out;
723
724 if (drv->probe) {
725 ret = drv->probe(dev);
726 if (ret)
727 dev_pm_domain_detach(_dev, true);
728 }
729
730out:
731 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
732 dev_warn(_dev, "probe deferral not supported\n");
733 ret = -ENXIO;
734 }
735
736 return ret;
737}
738
739static int platform_drv_probe_fail(struct device *_dev)
740{
741 return -ENXIO;
742}
743
744static int platform_drv_remove(struct device *_dev)
745{
746 struct platform_driver *drv = to_platform_driver(_dev->driver);
747 struct platform_device *dev = to_platform_device(_dev);
748 int ret = 0;
749
750 if (drv->remove)
751 ret = drv->remove(dev);
752 dev_pm_domain_detach(_dev, true);
753
754 return ret;
755}
756
757static void platform_drv_shutdown(struct device *_dev)
758{
759 struct platform_driver *drv = to_platform_driver(_dev->driver);
760 struct platform_device *dev = to_platform_device(_dev);
761
762 if (drv->shutdown)
763 drv->shutdown(dev);
764}
765
766
767
768
769
770
771int __platform_driver_register(struct platform_driver *drv,
772 struct module *owner)
773{
774 drv->driver.owner = owner;
775 drv->driver.bus = &platform_bus_type;
776 drv->driver.probe = platform_drv_probe;
777 drv->driver.remove = platform_drv_remove;
778 drv->driver.shutdown = platform_drv_shutdown;
779
780 return driver_register(&drv->driver);
781}
782EXPORT_SYMBOL_GPL(__platform_driver_register);
783
784
785
786
787
788void platform_driver_unregister(struct platform_driver *drv)
789{
790 driver_unregister(&drv->driver);
791}
792EXPORT_SYMBOL_GPL(platform_driver_unregister);
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814int __init_or_module __platform_driver_probe(struct platform_driver *drv,
815 int (*probe)(struct platform_device *), struct module *module)
816{
817 int retval, code;
818
819 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
820 pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
821 drv->driver.name, __func__);
822 return -EINVAL;
823 }
824
825
826
827
828
829
830 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
831
832
833
834
835
836 drv->prevent_deferred_probe = true;
837
838
839 drv->driver.suppress_bind_attrs = true;
840
841
842 drv->probe = probe;
843 retval = code = __platform_driver_register(drv, module);
844
845
846
847
848
849
850
851 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
852 drv->probe = NULL;
853 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
854 retval = -ENODEV;
855 drv->driver.probe = platform_drv_probe_fail;
856 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
857
858 if (code != retval)
859 platform_driver_unregister(drv);
860 return retval;
861}
862EXPORT_SYMBOL_GPL(__platform_driver_probe);
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879struct platform_device * __init_or_module __platform_create_bundle(
880 struct platform_driver *driver,
881 int (*probe)(struct platform_device *),
882 struct resource *res, unsigned int n_res,
883 const void *data, size_t size, struct module *module)
884{
885 struct platform_device *pdev;
886 int error;
887
888 pdev = platform_device_alloc(driver->driver.name, -1);
889 if (!pdev) {
890 error = -ENOMEM;
891 goto err_out;
892 }
893
894 error = platform_device_add_resources(pdev, res, n_res);
895 if (error)
896 goto err_pdev_put;
897
898 error = platform_device_add_data(pdev, data, size);
899 if (error)
900 goto err_pdev_put;
901
902 error = platform_device_add(pdev);
903 if (error)
904 goto err_pdev_put;
905
906 error = __platform_driver_probe(driver, probe, module);
907 if (error)
908 goto err_pdev_del;
909
910 return pdev;
911
912err_pdev_del:
913 platform_device_del(pdev);
914err_pdev_put:
915 platform_device_put(pdev);
916err_out:
917 return ERR_PTR(error);
918}
919EXPORT_SYMBOL_GPL(__platform_create_bundle);
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934int __platform_register_drivers(struct platform_driver * const *drivers,
935 unsigned int count, struct module *owner)
936{
937 unsigned int i;
938 int err;
939
940 for (i = 0; i < count; i++) {
941 pr_debug("registering platform driver %ps\n", drivers[i]);
942
943 err = __platform_driver_register(drivers[i], owner);
944 if (err < 0) {
945 pr_err("failed to register platform driver %ps: %d\n",
946 drivers[i], err);
947 goto error;
948 }
949 }
950
951 return 0;
952
953error:
954 while (i--) {
955 pr_debug("unregistering platform driver %ps\n", drivers[i]);
956 platform_driver_unregister(drivers[i]);
957 }
958
959 return err;
960}
961EXPORT_SYMBOL_GPL(__platform_register_drivers);
962
963
964
965
966
967
968
969
970
971
972void platform_unregister_drivers(struct platform_driver * const *drivers,
973 unsigned int count)
974{
975 while (count--) {
976 pr_debug("unregistering platform driver %ps\n", drivers[count]);
977 platform_driver_unregister(drivers[count]);
978 }
979}
980EXPORT_SYMBOL_GPL(platform_unregister_drivers);
981
982
983
984
985
986
987
988static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
989 char *buf)
990{
991 struct platform_device *pdev = to_platform_device(dev);
992 int len;
993
994 len = of_device_modalias(dev, buf, PAGE_SIZE);
995 if (len != -ENODEV)
996 return len;
997
998 len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
999 if (len != -ENODEV)
1000 return len;
1001
1002 len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
1003
1004 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
1005}
1006static DEVICE_ATTR_RO(modalias);
1007
1008static ssize_t driver_override_store(struct device *dev,
1009 struct device_attribute *attr,
1010 const char *buf, size_t count)
1011{
1012 struct platform_device *pdev = to_platform_device(dev);
1013 char *driver_override, *old, *cp;
1014
1015
1016 if (count >= (PAGE_SIZE - 1))
1017 return -EINVAL;
1018
1019 driver_override = kstrndup(buf, count, GFP_KERNEL);
1020 if (!driver_override)
1021 return -ENOMEM;
1022
1023 cp = strchr(driver_override, '\n');
1024 if (cp)
1025 *cp = '\0';
1026
1027 device_lock(dev);
1028 old = pdev->driver_override;
1029 if (strlen(driver_override)) {
1030 pdev->driver_override = driver_override;
1031 } else {
1032 kfree(driver_override);
1033 pdev->driver_override = NULL;
1034 }
1035 device_unlock(dev);
1036
1037 kfree(old);
1038
1039 return count;
1040}
1041
1042static ssize_t driver_override_show(struct device *dev,
1043 struct device_attribute *attr, char *buf)
1044{
1045 struct platform_device *pdev = to_platform_device(dev);
1046 ssize_t len;
1047
1048 device_lock(dev);
1049 len = sprintf(buf, "%s\n", pdev->driver_override);
1050 device_unlock(dev);
1051 return len;
1052}
1053static DEVICE_ATTR_RW(driver_override);
1054
1055
1056static struct attribute *platform_dev_attrs[] = {
1057 &dev_attr_modalias.attr,
1058 &dev_attr_driver_override.attr,
1059 NULL,
1060};
1061ATTRIBUTE_GROUPS(platform_dev);
1062
1063static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
1064{
1065 struct platform_device *pdev = to_platform_device(dev);
1066 int rc;
1067
1068
1069 rc = of_device_uevent_modalias(dev, env);
1070 if (rc != -ENODEV)
1071 return rc;
1072
1073 rc = acpi_device_uevent_modalias(dev, env);
1074 if (rc != -ENODEV)
1075 return rc;
1076
1077 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
1078 pdev->name);
1079 return 0;
1080}
1081
1082static const struct platform_device_id *platform_match_id(
1083 const struct platform_device_id *id,
1084 struct platform_device *pdev)
1085{
1086 while (id->name[0]) {
1087 if (strcmp(pdev->name, id->name) == 0) {
1088 pdev->id_entry = id;
1089 return id;
1090 }
1091 id++;
1092 }
1093 return NULL;
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109static int platform_match(struct device *dev, struct device_driver *drv)
1110{
1111 struct platform_device *pdev = to_platform_device(dev);
1112 struct platform_driver *pdrv = to_platform_driver(drv);
1113
1114
1115 if (pdev->driver_override)
1116 return !strcmp(pdev->driver_override, drv->name);
1117
1118
1119 if (of_driver_match_device(dev, drv))
1120 return 1;
1121
1122
1123 if (acpi_driver_match_device(dev, drv))
1124 return 1;
1125
1126
1127 if (pdrv->id_table)
1128 return platform_match_id(pdrv->id_table, pdev) != NULL;
1129
1130
1131 return (strcmp(pdev->name, drv->name) == 0);
1132}
1133
1134#ifdef CONFIG_PM_SLEEP
1135
1136static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
1137{
1138 struct platform_driver *pdrv = to_platform_driver(dev->driver);
1139 struct platform_device *pdev = to_platform_device(dev);
1140 int ret = 0;
1141
1142 if (dev->driver && pdrv->suspend)
1143 ret = pdrv->suspend(pdev, mesg);
1144
1145 return ret;
1146}
1147
1148static int platform_legacy_resume(struct device *dev)
1149{
1150 struct platform_driver *pdrv = to_platform_driver(dev->driver);
1151 struct platform_device *pdev = to_platform_device(dev);
1152 int ret = 0;
1153
1154 if (dev->driver && pdrv->resume)
1155 ret = pdrv->resume(pdev);
1156
1157 return ret;
1158}
1159
1160#endif
1161
1162#ifdef CONFIG_SUSPEND
1163
1164int platform_pm_suspend(struct device *dev)
1165{
1166 struct device_driver *drv = dev->driver;
1167 int ret = 0;
1168
1169 if (!drv)
1170 return 0;
1171
1172 if (drv->pm) {
1173 if (drv->pm->suspend)
1174 ret = drv->pm->suspend(dev);
1175 } else {
1176 ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
1177 }
1178
1179 return ret;
1180}
1181
1182int platform_pm_resume(struct device *dev)
1183{
1184 struct device_driver *drv = dev->driver;
1185 int ret = 0;
1186
1187 if (!drv)
1188 return 0;
1189
1190 if (drv->pm) {
1191 if (drv->pm->resume)
1192 ret = drv->pm->resume(dev);
1193 } else {
1194 ret = platform_legacy_resume(dev);
1195 }
1196
1197 return ret;
1198}
1199
1200#endif
1201
1202#ifdef CONFIG_HIBERNATE_CALLBACKS
1203
1204int platform_pm_freeze(struct device *dev)
1205{
1206 struct device_driver *drv = dev->driver;
1207 int ret = 0;
1208
1209 if (!drv)
1210 return 0;
1211
1212 if (drv->pm) {
1213 if (drv->pm->freeze)
1214 ret = drv->pm->freeze(dev);
1215 } else {
1216 ret = platform_legacy_suspend(dev, PMSG_FREEZE);
1217 }
1218
1219 return ret;
1220}
1221
1222int platform_pm_thaw(struct device *dev)
1223{
1224 struct device_driver *drv = dev->driver;
1225 int ret = 0;
1226
1227 if (!drv)
1228 return 0;
1229
1230 if (drv->pm) {
1231 if (drv->pm->thaw)
1232 ret = drv->pm->thaw(dev);
1233 } else {
1234 ret = platform_legacy_resume(dev);
1235 }
1236
1237 return ret;
1238}
1239
1240int platform_pm_poweroff(struct device *dev)
1241{
1242 struct device_driver *drv = dev->driver;
1243 int ret = 0;
1244
1245 if (!drv)
1246 return 0;
1247
1248 if (drv->pm) {
1249 if (drv->pm->poweroff)
1250 ret = drv->pm->poweroff(dev);
1251 } else {
1252 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
1253 }
1254
1255 return ret;
1256}
1257
1258int platform_pm_restore(struct device *dev)
1259{
1260 struct device_driver *drv = dev->driver;
1261 int ret = 0;
1262
1263 if (!drv)
1264 return 0;
1265
1266 if (drv->pm) {
1267 if (drv->pm->restore)
1268 ret = drv->pm->restore(dev);
1269 } else {
1270 ret = platform_legacy_resume(dev);
1271 }
1272
1273 return ret;
1274}
1275
1276#endif
1277
1278int platform_dma_configure(struct device *dev)
1279{
1280 enum dev_dma_attr attr;
1281 int ret = 0;
1282
1283 if (dev->of_node) {
1284 ret = of_dma_configure(dev, dev->of_node, true);
1285 } else if (has_acpi_companion(dev)) {
1286 attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
1287 ret = acpi_dma_configure(dev, attr);
1288 }
1289
1290 return ret;
1291}
1292
1293static const struct dev_pm_ops platform_dev_pm_ops = {
1294 .runtime_suspend = pm_generic_runtime_suspend,
1295 .runtime_resume = pm_generic_runtime_resume,
1296 USE_PLATFORM_PM_SLEEP_OPS
1297};
1298
1299struct bus_type platform_bus_type = {
1300 .name = "platform",
1301 .dev_groups = platform_dev_groups,
1302 .match = platform_match,
1303 .uevent = platform_uevent,
1304 .dma_configure = platform_dma_configure,
1305 .pm = &platform_dev_pm_ops,
1306};
1307EXPORT_SYMBOL_GPL(platform_bus_type);
1308
1309static inline int __platform_match(struct device *dev, const void *drv)
1310{
1311 return platform_match(dev, (struct device_driver *)drv);
1312}
1313
1314
1315
1316
1317
1318
1319
1320struct device *platform_find_device_by_driver(struct device *start,
1321 const struct device_driver *drv)
1322{
1323 return bus_find_device(&platform_bus_type, start, drv,
1324 __platform_match);
1325}
1326EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
1327
1328void __weak __init early_platform_cleanup(void) { }
1329
1330int __init platform_bus_init(void)
1331{
1332 int error;
1333
1334 early_platform_cleanup();
1335
1336 error = device_register(&platform_bus);
1337 if (error) {
1338 put_device(&platform_bus);
1339 return error;
1340 }
1341 error = bus_register(&platform_bus_type);
1342 if (error)
1343 device_unregister(&platform_bus);
1344 of_platform_register_reconfig_notifier();
1345 return error;
1346}
1347