1
2
3
4
5
6
7
8
9
10
11
12#include <linux/string.h>
13#include <linux/platform_device.h>
14#include <linux/of_device.h>
15#include <linux/of_irq.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/ioport.h>
20#include <linux/dma-mapping.h>
21#include <linux/memblock.h>
22#include <linux/err.h>
23#include <linux/slab.h>
24#include <linux/pm_runtime.h>
25#include <linux/pm_domain.h>
26#include <linux/idr.h>
27#include <linux/acpi.h>
28#include <linux/clk/clk-conf.h>
29#include <linux/limits.h>
30#include <linux/property.h>
31#include <linux/kmemleak.h>
32#include <linux/types.h>
33
34#include "base.h"
35#include "power/power.h"
36
37
38static DEFINE_IDA(platform_devid_ida);
39
40struct device platform_bus = {
41 .init_name = "platform",
42};
43EXPORT_SYMBOL_GPL(platform_bus);
44
45
46
47
48
49
50
51
52
53struct resource *platform_get_resource(struct platform_device *dev,
54 unsigned int type, unsigned int num)
55{
56 u32 i;
57
58 for (i = 0; i < dev->num_resources; i++) {
59 struct resource *r = &dev->resource[i];
60
61 if (type == resource_type(r) && num-- == 0)
62 return r;
63 }
64 return NULL;
65}
66EXPORT_SYMBOL_GPL(platform_get_resource);
67
68struct resource *platform_get_mem_or_io(struct platform_device *dev,
69 unsigned int num)
70{
71 u32 i;
72
73 for (i = 0; i < dev->num_resources; i++) {
74 struct resource *r = &dev->resource[i];
75
76 if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0)
77 return r;
78 }
79 return NULL;
80}
81EXPORT_SYMBOL_GPL(platform_get_mem_or_io);
82
83#ifdef CONFIG_HAS_IOMEM
84
85
86
87
88
89
90
91
92
93
94
95
96void __iomem *
97devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
98 unsigned int index, struct resource **res)
99{
100 struct resource *r;
101
102 r = platform_get_resource(pdev, IORESOURCE_MEM, index);
103 if (res)
104 *res = r;
105 return devm_ioremap_resource(&pdev->dev, r);
106}
107EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
108
109
110
111
112
113
114
115
116
117
118
119
120void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
121 unsigned int index)
122{
123 return devm_platform_get_and_ioremap_resource(pdev, index, NULL);
124}
125EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
126
127
128
129
130
131
132
133
134
135
136
137
138
139void __iomem *
140devm_platform_ioremap_resource_byname(struct platform_device *pdev,
141 const char *name)
142{
143 struct resource *res;
144
145 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
146 return devm_ioremap_resource(&pdev->dev, res);
147}
148EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
149#endif
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
170{
171 int ret;
172#ifdef CONFIG_SPARC
173
174 if (!dev || num >= dev->archdata.num_irqs)
175 goto out_not_found;
176 ret = dev->archdata.irqs[num];
177 goto out;
178#else
179 struct resource *r;
180
181 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
182 ret = of_irq_get(dev->dev.of_node, num);
183 if (ret > 0 || ret == -EPROBE_DEFER)
184 goto out;
185 }
186
187 r = platform_get_resource(dev, IORESOURCE_IRQ, num);
188 if (has_acpi_companion(&dev->dev)) {
189 if (r && r->flags & IORESOURCE_DISABLED) {
190 ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
191 if (ret)
192 goto out;
193 }
194 }
195
196
197
198
199
200
201
202 if (r && r->flags & IORESOURCE_BITS) {
203 struct irq_data *irqd;
204
205 irqd = irq_get_irq_data(r->start);
206 if (!irqd)
207 goto out_not_found;
208 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
209 }
210
211 if (r) {
212 ret = r->start;
213 goto out;
214 }
215
216
217
218
219
220
221
222
223 if (num == 0 && has_acpi_companion(&dev->dev)) {
224 ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
225
226 if (ret >= 0 || ret == -EPROBE_DEFER)
227 goto out;
228 }
229
230#endif
231out_not_found:
232 ret = -ENXIO;
233out:
234 WARN(ret == 0, "0 is an invalid IRQ number\n");
235 return ret;
236}
237EXPORT_SYMBOL_GPL(platform_get_irq_optional);
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256int platform_get_irq(struct platform_device *dev, unsigned int num)
257{
258 int ret;
259
260 ret = platform_get_irq_optional(dev, num);
261 if (ret < 0)
262 return dev_err_probe(&dev->dev, ret,
263 "IRQ index %u not found\n", num);
264
265 return ret;
266}
267EXPORT_SYMBOL_GPL(platform_get_irq);
268
269
270
271
272
273
274
275int platform_irq_count(struct platform_device *dev)
276{
277 int ret, nr = 0;
278
279 while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
280 nr++;
281
282 if (ret == -EPROBE_DEFER)
283 return ret;
284
285 return nr;
286}
287EXPORT_SYMBOL_GPL(platform_irq_count);
288
289struct irq_affinity_devres {
290 unsigned int count;
291 unsigned int irq[];
292};
293
294static void platform_disable_acpi_irq(struct platform_device *pdev, int index)
295{
296 struct resource *r;
297
298 r = platform_get_resource(pdev, IORESOURCE_IRQ, index);
299 if (r)
300 irqresource_disabled(r, 0);
301}
302
303static void devm_platform_get_irqs_affinity_release(struct device *dev,
304 void *res)
305{
306 struct irq_affinity_devres *ptr = res;
307 int i;
308
309 for (i = 0; i < ptr->count; i++) {
310 irq_dispose_mapping(ptr->irq[i]);
311
312 if (has_acpi_companion(dev))
313 platform_disable_acpi_irq(to_platform_device(dev), i);
314 }
315}
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331int devm_platform_get_irqs_affinity(struct platform_device *dev,
332 struct irq_affinity *affd,
333 unsigned int minvec,
334 unsigned int maxvec,
335 int **irqs)
336{
337 struct irq_affinity_devres *ptr;
338 struct irq_affinity_desc *desc;
339 size_t size;
340 int i, ret, nvec;
341
342 if (!affd)
343 return -EPERM;
344
345 if (maxvec < minvec)
346 return -ERANGE;
347
348 nvec = platform_irq_count(dev);
349 if (nvec < 0)
350 return nvec;
351
352 if (nvec < minvec)
353 return -ENOSPC;
354
355 nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
356 if (nvec < minvec)
357 return -ENOSPC;
358
359 if (nvec > maxvec)
360 nvec = maxvec;
361
362 size = sizeof(*ptr) + sizeof(unsigned int) * nvec;
363 ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size,
364 GFP_KERNEL);
365 if (!ptr)
366 return -ENOMEM;
367
368 ptr->count = nvec;
369
370 for (i = 0; i < nvec; i++) {
371 int irq = platform_get_irq(dev, i);
372 if (irq < 0) {
373 ret = irq;
374 goto err_free_devres;
375 }
376 ptr->irq[i] = irq;
377 }
378
379 desc = irq_create_affinity_masks(nvec, affd);
380 if (!desc) {
381 ret = -ENOMEM;
382 goto err_free_devres;
383 }
384
385 for (i = 0; i < nvec; i++) {
386 ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]);
387 if (ret) {
388 dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n",
389 ptr->irq[i], ret);
390 goto err_free_desc;
391 }
392 }
393
394 devres_add(&dev->dev, ptr);
395
396 kfree(desc);
397
398 *irqs = ptr->irq;
399
400 return nvec;
401
402err_free_desc:
403 kfree(desc);
404err_free_devres:
405 devres_free(ptr);
406 return ret;
407}
408EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity);
409
410
411
412
413
414
415
416struct resource *platform_get_resource_byname(struct platform_device *dev,
417 unsigned int type,
418 const char *name)
419{
420 u32 i;
421
422 for (i = 0; i < dev->num_resources; i++) {
423 struct resource *r = &dev->resource[i];
424
425 if (unlikely(!r->name))
426 continue;
427
428 if (type == resource_type(r) && !strcmp(r->name, name))
429 return r;
430 }
431 return NULL;
432}
433EXPORT_SYMBOL_GPL(platform_get_resource_byname);
434
435static int __platform_get_irq_byname(struct platform_device *dev,
436 const char *name)
437{
438 struct resource *r;
439 int ret;
440
441 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
442 ret = of_irq_get_byname(dev->dev.of_node, name);
443 if (ret > 0 || ret == -EPROBE_DEFER)
444 return ret;
445 }
446
447 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
448 if (r) {
449 WARN(r->start == 0, "0 is an invalid IRQ number\n");
450 return r->start;
451 }
452
453 return -ENXIO;
454}
455
456
457
458
459
460
461
462
463
464
465int platform_get_irq_byname(struct platform_device *dev, const char *name)
466{
467 int ret;
468
469 ret = __platform_get_irq_byname(dev, name);
470 if (ret < 0)
471 return dev_err_probe(&dev->dev, ret, "IRQ %s not found\n",
472 name);
473 return ret;
474}
475EXPORT_SYMBOL_GPL(platform_get_irq_byname);
476
477
478
479
480
481
482
483
484
485
486
487int platform_get_irq_byname_optional(struct platform_device *dev,
488 const char *name)
489{
490 return __platform_get_irq_byname(dev, name);
491}
492EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
493
494
495
496
497
498
499int platform_add_devices(struct platform_device **devs, int num)
500{
501 int i, ret = 0;
502
503 for (i = 0; i < num; i++) {
504 ret = platform_device_register(devs[i]);
505 if (ret) {
506 while (--i >= 0)
507 platform_device_unregister(devs[i]);
508 break;
509 }
510 }
511
512 return ret;
513}
514EXPORT_SYMBOL_GPL(platform_add_devices);
515
516struct platform_object {
517 struct platform_device pdev;
518 char name[];
519};
520
521
522
523
524
525static void setup_pdev_dma_masks(struct platform_device *pdev)
526{
527 pdev->dev.dma_parms = &pdev->dma_parms;
528
529 if (!pdev->dev.coherent_dma_mask)
530 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
531 if (!pdev->dev.dma_mask) {
532 pdev->platform_dma_mask = DMA_BIT_MASK(32);
533 pdev->dev.dma_mask = &pdev->platform_dma_mask;
534 }
535};
536
537
538
539
540
541
542
543
544void platform_device_put(struct platform_device *pdev)
545{
546 if (!IS_ERR_OR_NULL(pdev))
547 put_device(&pdev->dev);
548}
549EXPORT_SYMBOL_GPL(platform_device_put);
550
551static void platform_device_release(struct device *dev)
552{
553 struct platform_object *pa = container_of(dev, struct platform_object,
554 pdev.dev);
555
556 of_node_put(pa->pdev.dev.of_node);
557 kfree(pa->pdev.dev.platform_data);
558 kfree(pa->pdev.mfd_cell);
559 kfree(pa->pdev.resource);
560 kfree(pa->pdev.driver_override);
561 kfree(pa);
562}
563
564
565
566
567
568
569
570
571
572struct platform_device *platform_device_alloc(const char *name, int id)
573{
574 struct platform_object *pa;
575
576 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
577 if (pa) {
578 strcpy(pa->name, name);
579 pa->pdev.name = pa->name;
580 pa->pdev.id = id;
581 device_initialize(&pa->pdev.dev);
582 pa->pdev.dev.release = platform_device_release;
583 setup_pdev_dma_masks(&pa->pdev);
584 }
585
586 return pa ? &pa->pdev : NULL;
587}
588EXPORT_SYMBOL_GPL(platform_device_alloc);
589
590
591
592
593
594
595
596
597
598
599
600int platform_device_add_resources(struct platform_device *pdev,
601 const struct resource *res, unsigned int num)
602{
603 struct resource *r = NULL;
604
605 if (res) {
606 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
607 if (!r)
608 return -ENOMEM;
609 }
610
611 kfree(pdev->resource);
612 pdev->resource = r;
613 pdev->num_resources = num;
614 return 0;
615}
616EXPORT_SYMBOL_GPL(platform_device_add_resources);
617
618
619
620
621
622
623
624
625
626
627
628int platform_device_add_data(struct platform_device *pdev, const void *data,
629 size_t size)
630{
631 void *d = NULL;
632
633 if (data) {
634 d = kmemdup(data, size, GFP_KERNEL);
635 if (!d)
636 return -ENOMEM;
637 }
638
639 kfree(pdev->dev.platform_data);
640 pdev->dev.platform_data = d;
641 return 0;
642}
643EXPORT_SYMBOL_GPL(platform_device_add_data);
644
645
646
647
648
649
650
651
652int platform_device_add(struct platform_device *pdev)
653{
654 u32 i;
655 int ret;
656
657 if (!pdev)
658 return -EINVAL;
659
660 if (!pdev->dev.parent)
661 pdev->dev.parent = &platform_bus;
662
663 pdev->dev.bus = &platform_bus_type;
664
665 switch (pdev->id) {
666 default:
667 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
668 break;
669 case PLATFORM_DEVID_NONE:
670 dev_set_name(&pdev->dev, "%s", pdev->name);
671 break;
672 case PLATFORM_DEVID_AUTO:
673
674
675
676
677
678 ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
679 if (ret < 0)
680 goto err_out;
681 pdev->id = ret;
682 pdev->id_auto = true;
683 dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
684 break;
685 }
686
687 for (i = 0; i < pdev->num_resources; i++) {
688 struct resource *p, *r = &pdev->resource[i];
689
690 if (r->name == NULL)
691 r->name = dev_name(&pdev->dev);
692
693 p = r->parent;
694 if (!p) {
695 if (resource_type(r) == IORESOURCE_MEM)
696 p = &iomem_resource;
697 else if (resource_type(r) == IORESOURCE_IO)
698 p = &ioport_resource;
699 }
700
701 if (p) {
702 ret = insert_resource(p, r);
703 if (ret) {
704 dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
705 goto failed;
706 }
707 }
708 }
709
710 pr_debug("Registering platform device '%s'. Parent at %s\n",
711 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
712
713 ret = device_add(&pdev->dev);
714 if (ret == 0)
715 return ret;
716
717 failed:
718 if (pdev->id_auto) {
719 ida_free(&platform_devid_ida, pdev->id);
720 pdev->id = PLATFORM_DEVID_AUTO;
721 }
722
723 while (i--) {
724 struct resource *r = &pdev->resource[i];
725 if (r->parent)
726 release_resource(r);
727 }
728
729 err_out:
730 return ret;
731}
732EXPORT_SYMBOL_GPL(platform_device_add);
733
734
735
736
737
738
739
740
741
742void platform_device_del(struct platform_device *pdev)
743{
744 u32 i;
745
746 if (!IS_ERR_OR_NULL(pdev)) {
747 device_del(&pdev->dev);
748
749 if (pdev->id_auto) {
750 ida_free(&platform_devid_ida, pdev->id);
751 pdev->id = PLATFORM_DEVID_AUTO;
752 }
753
754 for (i = 0; i < pdev->num_resources; i++) {
755 struct resource *r = &pdev->resource[i];
756 if (r->parent)
757 release_resource(r);
758 }
759 }
760}
761EXPORT_SYMBOL_GPL(platform_device_del);
762
763
764
765
766
767
768
769
770
771int platform_device_register(struct platform_device *pdev)
772{
773 device_initialize(&pdev->dev);
774 setup_pdev_dma_masks(pdev);
775 return platform_device_add(pdev);
776}
777EXPORT_SYMBOL_GPL(platform_device_register);
778
779
780
781
782
783
784
785
786
787void platform_device_unregister(struct platform_device *pdev)
788{
789 platform_device_del(pdev);
790 platform_device_put(pdev);
791}
792EXPORT_SYMBOL_GPL(platform_device_unregister);
793
794
795
796
797
798
799
800
801
802struct platform_device *platform_device_register_full(
803 const struct platform_device_info *pdevinfo)
804{
805 int ret;
806 struct platform_device *pdev;
807
808 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
809 if (!pdev)
810 return ERR_PTR(-ENOMEM);
811
812 pdev->dev.parent = pdevinfo->parent;
813 pdev->dev.fwnode = pdevinfo->fwnode;
814 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
815 pdev->dev.of_node_reused = pdevinfo->of_node_reused;
816
817 if (pdevinfo->dma_mask) {
818 pdev->platform_dma_mask = pdevinfo->dma_mask;
819 pdev->dev.dma_mask = &pdev->platform_dma_mask;
820 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
821 }
822
823 ret = platform_device_add_resources(pdev,
824 pdevinfo->res, pdevinfo->num_res);
825 if (ret)
826 goto err;
827
828 ret = platform_device_add_data(pdev,
829 pdevinfo->data, pdevinfo->size_data);
830 if (ret)
831 goto err;
832
833 if (pdevinfo->properties) {
834 ret = device_create_managed_software_node(&pdev->dev,
835 pdevinfo->properties, NULL);
836 if (ret)
837 goto err;
838 }
839
840 ret = platform_device_add(pdev);
841 if (ret) {
842err:
843 ACPI_COMPANION_SET(&pdev->dev, NULL);
844 platform_device_put(pdev);
845 return ERR_PTR(ret);
846 }
847
848 return pdev;
849}
850EXPORT_SYMBOL_GPL(platform_device_register_full);
851
852
853
854
855
856
857int __platform_driver_register(struct platform_driver *drv,
858 struct module *owner)
859{
860 drv->driver.owner = owner;
861 drv->driver.bus = &platform_bus_type;
862
863 return driver_register(&drv->driver);
864}
865EXPORT_SYMBOL_GPL(__platform_driver_register);
866
867
868
869
870
871void platform_driver_unregister(struct platform_driver *drv)
872{
873 driver_unregister(&drv->driver);
874}
875EXPORT_SYMBOL_GPL(platform_driver_unregister);
876
877static int platform_probe_fail(struct platform_device *pdev)
878{
879 return -ENXIO;
880}
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902int __init_or_module __platform_driver_probe(struct platform_driver *drv,
903 int (*probe)(struct platform_device *), struct module *module)
904{
905 int retval, code;
906
907 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
908 pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
909 drv->driver.name, __func__);
910 return -EINVAL;
911 }
912
913
914
915
916
917
918 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
919
920
921
922
923
924 drv->prevent_deferred_probe = true;
925
926
927 drv->driver.suppress_bind_attrs = true;
928
929
930 drv->probe = probe;
931 retval = code = __platform_driver_register(drv, module);
932 if (retval)
933 return retval;
934
935
936
937
938
939
940
941 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
942 drv->probe = platform_probe_fail;
943 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
944 retval = -ENODEV;
945 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
946
947 if (code != retval)
948 platform_driver_unregister(drv);
949 return retval;
950}
951EXPORT_SYMBOL_GPL(__platform_driver_probe);
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968struct platform_device * __init_or_module __platform_create_bundle(
969 struct platform_driver *driver,
970 int (*probe)(struct platform_device *),
971 struct resource *res, unsigned int n_res,
972 const void *data, size_t size, struct module *module)
973{
974 struct platform_device *pdev;
975 int error;
976
977 pdev = platform_device_alloc(driver->driver.name, -1);
978 if (!pdev) {
979 error = -ENOMEM;
980 goto err_out;
981 }
982
983 error = platform_device_add_resources(pdev, res, n_res);
984 if (error)
985 goto err_pdev_put;
986
987 error = platform_device_add_data(pdev, data, size);
988 if (error)
989 goto err_pdev_put;
990
991 error = platform_device_add(pdev);
992 if (error)
993 goto err_pdev_put;
994
995 error = __platform_driver_probe(driver, probe, module);
996 if (error)
997 goto err_pdev_del;
998
999 return pdev;
1000
1001err_pdev_del:
1002 platform_device_del(pdev);
1003err_pdev_put:
1004 platform_device_put(pdev);
1005err_out:
1006 return ERR_PTR(error);
1007}
1008EXPORT_SYMBOL_GPL(__platform_create_bundle);
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023int __platform_register_drivers(struct platform_driver * const *drivers,
1024 unsigned int count, struct module *owner)
1025{
1026 unsigned int i;
1027 int err;
1028
1029 for (i = 0; i < count; i++) {
1030 pr_debug("registering platform driver %ps\n", drivers[i]);
1031
1032 err = __platform_driver_register(drivers[i], owner);
1033 if (err < 0) {
1034 pr_err("failed to register platform driver %ps: %d\n",
1035 drivers[i], err);
1036 goto error;
1037 }
1038 }
1039
1040 return 0;
1041
1042error:
1043 while (i--) {
1044 pr_debug("unregistering platform driver %ps\n", drivers[i]);
1045 platform_driver_unregister(drivers[i]);
1046 }
1047
1048 return err;
1049}
1050EXPORT_SYMBOL_GPL(__platform_register_drivers);
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061void platform_unregister_drivers(struct platform_driver * const *drivers,
1062 unsigned int count)
1063{
1064 while (count--) {
1065 pr_debug("unregistering platform driver %ps\n", drivers[count]);
1066 platform_driver_unregister(drivers[count]);
1067 }
1068}
1069EXPORT_SYMBOL_GPL(platform_unregister_drivers);
1070
1071static const struct platform_device_id *platform_match_id(
1072 const struct platform_device_id *id,
1073 struct platform_device *pdev)
1074{
1075 while (id->name[0]) {
1076 if (strcmp(pdev->name, id->name) == 0) {
1077 pdev->id_entry = id;
1078 return id;
1079 }
1080 id++;
1081 }
1082 return NULL;
1083}
1084
1085#ifdef CONFIG_PM_SLEEP
1086
1087static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
1088{
1089 struct platform_driver *pdrv = to_platform_driver(dev->driver);
1090 struct platform_device *pdev = to_platform_device(dev);
1091 int ret = 0;
1092
1093 if (dev->driver && pdrv->suspend)
1094 ret = pdrv->suspend(pdev, mesg);
1095
1096 return ret;
1097}
1098
1099static int platform_legacy_resume(struct device *dev)
1100{
1101 struct platform_driver *pdrv = to_platform_driver(dev->driver);
1102 struct platform_device *pdev = to_platform_device(dev);
1103 int ret = 0;
1104
1105 if (dev->driver && pdrv->resume)
1106 ret = pdrv->resume(pdev);
1107
1108 return ret;
1109}
1110
1111#endif
1112
1113#ifdef CONFIG_SUSPEND
1114
1115int platform_pm_suspend(struct device *dev)
1116{
1117 struct device_driver *drv = dev->driver;
1118 int ret = 0;
1119
1120 if (!drv)
1121 return 0;
1122
1123 if (drv->pm) {
1124 if (drv->pm->suspend)
1125 ret = drv->pm->suspend(dev);
1126 } else {
1127 ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
1128 }
1129
1130 return ret;
1131}
1132
1133int platform_pm_resume(struct device *dev)
1134{
1135 struct device_driver *drv = dev->driver;
1136 int ret = 0;
1137
1138 if (!drv)
1139 return 0;
1140
1141 if (drv->pm) {
1142 if (drv->pm->resume)
1143 ret = drv->pm->resume(dev);
1144 } else {
1145 ret = platform_legacy_resume(dev);
1146 }
1147
1148 return ret;
1149}
1150
1151#endif
1152
1153#ifdef CONFIG_HIBERNATE_CALLBACKS
1154
1155int platform_pm_freeze(struct device *dev)
1156{
1157 struct device_driver *drv = dev->driver;
1158 int ret = 0;
1159
1160 if (!drv)
1161 return 0;
1162
1163 if (drv->pm) {
1164 if (drv->pm->freeze)
1165 ret = drv->pm->freeze(dev);
1166 } else {
1167 ret = platform_legacy_suspend(dev, PMSG_FREEZE);
1168 }
1169
1170 return ret;
1171}
1172
1173int platform_pm_thaw(struct device *dev)
1174{
1175 struct device_driver *drv = dev->driver;
1176 int ret = 0;
1177
1178 if (!drv)
1179 return 0;
1180
1181 if (drv->pm) {
1182 if (drv->pm->thaw)
1183 ret = drv->pm->thaw(dev);
1184 } else {
1185 ret = platform_legacy_resume(dev);
1186 }
1187
1188 return ret;
1189}
1190
1191int platform_pm_poweroff(struct device *dev)
1192{
1193 struct device_driver *drv = dev->driver;
1194 int ret = 0;
1195
1196 if (!drv)
1197 return 0;
1198
1199 if (drv->pm) {
1200 if (drv->pm->poweroff)
1201 ret = drv->pm->poweroff(dev);
1202 } else {
1203 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
1204 }
1205
1206 return ret;
1207}
1208
1209int platform_pm_restore(struct device *dev)
1210{
1211 struct device_driver *drv = dev->driver;
1212 int ret = 0;
1213
1214 if (!drv)
1215 return 0;
1216
1217 if (drv->pm) {
1218 if (drv->pm->restore)
1219 ret = drv->pm->restore(dev);
1220 } else {
1221 ret = platform_legacy_resume(dev);
1222 }
1223
1224 return ret;
1225}
1226
1227#endif
1228
1229
1230
1231
1232
1233
1234
1235static ssize_t modalias_show(struct device *dev,
1236 struct device_attribute *attr, char *buf)
1237{
1238 struct platform_device *pdev = to_platform_device(dev);
1239 int len;
1240
1241 len = of_device_modalias(dev, buf, PAGE_SIZE);
1242 if (len != -ENODEV)
1243 return len;
1244
1245 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
1246 if (len != -ENODEV)
1247 return len;
1248
1249 return sysfs_emit(buf, "platform:%s\n", pdev->name);
1250}
1251static DEVICE_ATTR_RO(modalias);
1252
1253static ssize_t numa_node_show(struct device *dev,
1254 struct device_attribute *attr, char *buf)
1255{
1256 return sysfs_emit(buf, "%d\n", dev_to_node(dev));
1257}
1258static DEVICE_ATTR_RO(numa_node);
1259
1260static ssize_t driver_override_show(struct device *dev,
1261 struct device_attribute *attr, char *buf)
1262{
1263 struct platform_device *pdev = to_platform_device(dev);
1264 ssize_t len;
1265
1266 device_lock(dev);
1267 len = sysfs_emit(buf, "%s\n", pdev->driver_override);
1268 device_unlock(dev);
1269
1270 return len;
1271}
1272
1273static ssize_t driver_override_store(struct device *dev,
1274 struct device_attribute *attr,
1275 const char *buf, size_t count)
1276{
1277 struct platform_device *pdev = to_platform_device(dev);
1278 char *driver_override, *old, *cp;
1279
1280
1281 if (count >= (PAGE_SIZE - 1))
1282 return -EINVAL;
1283
1284 driver_override = kstrndup(buf, count, GFP_KERNEL);
1285 if (!driver_override)
1286 return -ENOMEM;
1287
1288 cp = strchr(driver_override, '\n');
1289 if (cp)
1290 *cp = '\0';
1291
1292 device_lock(dev);
1293 old = pdev->driver_override;
1294 if (strlen(driver_override)) {
1295 pdev->driver_override = driver_override;
1296 } else {
1297 kfree(driver_override);
1298 pdev->driver_override = NULL;
1299 }
1300 device_unlock(dev);
1301
1302 kfree(old);
1303
1304 return count;
1305}
1306static DEVICE_ATTR_RW(driver_override);
1307
1308static struct attribute *platform_dev_attrs[] = {
1309 &dev_attr_modalias.attr,
1310 &dev_attr_numa_node.attr,
1311 &dev_attr_driver_override.attr,
1312 NULL,
1313};
1314
1315static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
1316 int n)
1317{
1318 struct device *dev = container_of(kobj, typeof(*dev), kobj);
1319
1320 if (a == &dev_attr_numa_node.attr &&
1321 dev_to_node(dev) == NUMA_NO_NODE)
1322 return 0;
1323
1324 return a->mode;
1325}
1326
1327static const struct attribute_group platform_dev_group = {
1328 .attrs = platform_dev_attrs,
1329 .is_visible = platform_dev_attrs_visible,
1330};
1331__ATTRIBUTE_GROUPS(platform_dev);
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347static int platform_match(struct device *dev, struct device_driver *drv)
1348{
1349 struct platform_device *pdev = to_platform_device(dev);
1350 struct platform_driver *pdrv = to_platform_driver(drv);
1351
1352
1353 if (pdev->driver_override)
1354 return !strcmp(pdev->driver_override, drv->name);
1355
1356
1357 if (of_driver_match_device(dev, drv))
1358 return 1;
1359
1360
1361 if (acpi_driver_match_device(dev, drv))
1362 return 1;
1363
1364
1365 if (pdrv->id_table)
1366 return platform_match_id(pdrv->id_table, pdev) != NULL;
1367
1368
1369 return (strcmp(pdev->name, drv->name) == 0);
1370}
1371
1372static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
1373{
1374 struct platform_device *pdev = to_platform_device(dev);
1375 int rc;
1376
1377
1378 rc = of_device_uevent_modalias(dev, env);
1379 if (rc != -ENODEV)
1380 return rc;
1381
1382 rc = acpi_device_uevent_modalias(dev, env);
1383 if (rc != -ENODEV)
1384 return rc;
1385
1386 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
1387 pdev->name);
1388 return 0;
1389}
1390
1391static int platform_probe(struct device *_dev)
1392{
1393 struct platform_driver *drv = to_platform_driver(_dev->driver);
1394 struct platform_device *dev = to_platform_device(_dev);
1395 int ret;
1396
1397
1398
1399
1400
1401
1402
1403
1404 if (unlikely(drv->probe == platform_probe_fail))
1405 return -ENXIO;
1406
1407 ret = of_clk_set_defaults(_dev->of_node, false);
1408 if (ret < 0)
1409 return ret;
1410
1411 ret = dev_pm_domain_attach(_dev, true);
1412 if (ret)
1413 goto out;
1414
1415 if (drv->probe) {
1416 ret = drv->probe(dev);
1417 if (ret)
1418 dev_pm_domain_detach(_dev, true);
1419 }
1420
1421out:
1422 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
1423 dev_warn(_dev, "probe deferral not supported\n");
1424 ret = -ENXIO;
1425 }
1426
1427 return ret;
1428}
1429
1430static void platform_remove(struct device *_dev)
1431{
1432 struct platform_driver *drv = to_platform_driver(_dev->driver);
1433 struct platform_device *dev = to_platform_device(_dev);
1434
1435 if (drv->remove) {
1436 int ret = drv->remove(dev);
1437
1438 if (ret)
1439 dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
1440 }
1441 dev_pm_domain_detach(_dev, true);
1442}
1443
1444static void platform_shutdown(struct device *_dev)
1445{
1446 struct platform_device *dev = to_platform_device(_dev);
1447 struct platform_driver *drv;
1448
1449 if (!_dev->driver)
1450 return;
1451
1452 drv = to_platform_driver(_dev->driver);
1453 if (drv->shutdown)
1454 drv->shutdown(dev);
1455}
1456
1457
1458int platform_dma_configure(struct device *dev)
1459{
1460 enum dev_dma_attr attr;
1461 int ret = 0;
1462
1463 if (dev->of_node) {
1464 ret = of_dma_configure(dev, dev->of_node, true);
1465 } else if (has_acpi_companion(dev)) {
1466 attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
1467 ret = acpi_dma_configure(dev, attr);
1468 }
1469
1470 return ret;
1471}
1472
1473static const struct dev_pm_ops platform_dev_pm_ops = {
1474 SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
1475 USE_PLATFORM_PM_SLEEP_OPS
1476};
1477
1478struct bus_type platform_bus_type = {
1479 .name = "platform",
1480 .dev_groups = platform_dev_groups,
1481 .match = platform_match,
1482 .uevent = platform_uevent,
1483 .probe = platform_probe,
1484 .remove = platform_remove,
1485 .shutdown = platform_shutdown,
1486 .dma_configure = platform_dma_configure,
1487 .pm = &platform_dev_pm_ops,
1488};
1489EXPORT_SYMBOL_GPL(platform_bus_type);
1490
1491static inline int __platform_match(struct device *dev, const void *drv)
1492{
1493 return platform_match(dev, (struct device_driver *)drv);
1494}
1495
1496
1497
1498
1499
1500
1501
1502struct device *platform_find_device_by_driver(struct device *start,
1503 const struct device_driver *drv)
1504{
1505 return bus_find_device(&platform_bus_type, start, drv,
1506 __platform_match);
1507}
1508EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
1509
1510void __weak __init early_platform_cleanup(void) { }
1511
1512int __init platform_bus_init(void)
1513{
1514 int error;
1515
1516 early_platform_cleanup();
1517
1518 error = device_register(&platform_bus);
1519 if (error) {
1520 put_device(&platform_bus);
1521 return error;
1522 }
1523 error = bus_register(&platform_bus_type);
1524 if (error)
1525 device_unregister(&platform_bus);
1526 of_platform_register_reconfig_notifier();
1527 return error;
1528}
1529