1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/device.h>
20#include <linux/delay.h>
21#include <linux/dma-mapping.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/async.h>
27#include <linux/pm_runtime.h>
28#include <linux/pinctrl/devinfo.h>
29
30#include "base.h"
31#include "power/power.h"
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52static DEFINE_MUTEX(deferred_probe_mutex);
53static LIST_HEAD(deferred_probe_pending_list);
54static LIST_HEAD(deferred_probe_active_list);
55static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
56static bool initcalls_done;
57
58
59
60
61
62
63static bool defer_all_probes;
64
65
66
67
68
69static void deferred_probe_debug(struct device *dev)
70{
71 ktime_t calltime, delta, rettime;
72 unsigned long long duration;
73
74 printk(KERN_DEBUG "deferred probe %s @ %i\n", dev_name(dev),
75 task_pid_nr(current));
76 calltime = ktime_get();
77 bus_probe_device(dev);
78 rettime = ktime_get();
79 delta = ktime_sub(rettime, calltime);
80 duration = (unsigned long long) ktime_to_ns(delta) >> 10;
81 printk(KERN_DEBUG "deferred probe %s returned after %lld usecs\n",
82 dev_name(dev), duration);
83}
84
85
86
87
88static void deferred_probe_work_func(struct work_struct *work)
89{
90 struct device *dev;
91 struct device_private *private;
92
93
94
95
96
97
98
99
100
101
102
103
104 mutex_lock(&deferred_probe_mutex);
105 while (!list_empty(&deferred_probe_active_list)) {
106 private = list_first_entry(&deferred_probe_active_list,
107 typeof(*dev->p), deferred_probe);
108 dev = private->device;
109 list_del_init(&private->deferred_probe);
110
111 get_device(dev);
112
113
114
115
116
117 mutex_unlock(&deferred_probe_mutex);
118
119
120
121
122
123
124
125 device_pm_move_to_tail(dev);
126
127 dev_dbg(dev, "Retrying from deferred list\n");
128 if (initcall_debug && !initcalls_done)
129 deferred_probe_debug(dev);
130 else
131 bus_probe_device(dev);
132
133 mutex_lock(&deferred_probe_mutex);
134
135 put_device(dev);
136 }
137 mutex_unlock(&deferred_probe_mutex);
138}
139static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
140
141static void driver_deferred_probe_add(struct device *dev)
142{
143 mutex_lock(&deferred_probe_mutex);
144 if (list_empty(&dev->p->deferred_probe)) {
145 dev_dbg(dev, "Added to deferred list\n");
146 list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
147 }
148 mutex_unlock(&deferred_probe_mutex);
149}
150
151void driver_deferred_probe_del(struct device *dev)
152{
153 mutex_lock(&deferred_probe_mutex);
154 if (!list_empty(&dev->p->deferred_probe)) {
155 dev_dbg(dev, "Removed from deferred list\n");
156 list_del_init(&dev->p->deferred_probe);
157 }
158 mutex_unlock(&deferred_probe_mutex);
159}
160
161static bool driver_deferred_probe_enable = false;
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180static void driver_deferred_probe_trigger(void)
181{
182 if (!driver_deferred_probe_enable)
183 return;
184
185
186
187
188
189
190 mutex_lock(&deferred_probe_mutex);
191 atomic_inc(&deferred_trigger_count);
192 list_splice_tail_init(&deferred_probe_pending_list,
193 &deferred_probe_active_list);
194 mutex_unlock(&deferred_probe_mutex);
195
196
197
198
199
200 schedule_work(&deferred_probe_work);
201}
202
203
204
205
206
207
208void device_block_probing(void)
209{
210 defer_all_probes = true;
211
212 wait_for_device_probe();
213}
214
215
216
217
218
219
220
221void device_unblock_probing(void)
222{
223 defer_all_probes = false;
224 driver_deferred_probe_trigger();
225}
226
227
228
229
230
231
232
233
234static int deferred_probe_initcall(void)
235{
236 driver_deferred_probe_enable = true;
237 driver_deferred_probe_trigger();
238
239 flush_work(&deferred_probe_work);
240 initcalls_done = true;
241 return 0;
242}
243late_initcall(deferred_probe_initcall);
244
245
246
247
248
249
250
251
252
253
254bool device_is_bound(struct device *dev)
255{
256 return dev->p && klist_node_attached(&dev->p->knode_driver);
257}
258
259static void driver_bound(struct device *dev)
260{
261 if (device_is_bound(dev)) {
262 printk(KERN_WARNING "%s: device %s already bound\n",
263 __func__, kobject_name(&dev->kobj));
264 return;
265 }
266
267 pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
268 __func__, dev_name(dev));
269
270 klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
271 device_links_driver_bound(dev);
272
273 device_pm_check_callbacks(dev);
274
275
276
277
278
279 driver_deferred_probe_del(dev);
280 driver_deferred_probe_trigger();
281
282 if (dev->bus)
283 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
284 BUS_NOTIFY_BOUND_DRIVER, dev);
285
286 kobject_uevent(&dev->kobj, KOBJ_BIND);
287}
288
289static ssize_t coredump_store(struct device *dev, struct device_attribute *attr,
290 const char *buf, size_t count)
291{
292 device_lock(dev);
293 dev->driver->coredump(dev);
294 device_unlock(dev);
295
296 return count;
297}
298static DEVICE_ATTR_WO(coredump);
299
300static int driver_sysfs_add(struct device *dev)
301{
302 int ret;
303
304 if (dev->bus)
305 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
306 BUS_NOTIFY_BIND_DRIVER, dev);
307
308 ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
309 kobject_name(&dev->kobj));
310 if (ret)
311 goto fail;
312
313 ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
314 "driver");
315 if (ret)
316 goto rm_dev;
317
318 if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump ||
319 !device_create_file(dev, &dev_attr_coredump))
320 return 0;
321
322 sysfs_remove_link(&dev->kobj, "driver");
323
324rm_dev:
325 sysfs_remove_link(&dev->driver->p->kobj,
326 kobject_name(&dev->kobj));
327
328fail:
329 return ret;
330}
331
332static void driver_sysfs_remove(struct device *dev)
333{
334 struct device_driver *drv = dev->driver;
335
336 if (drv) {
337 if (drv->coredump)
338 device_remove_file(dev, &dev_attr_coredump);
339 sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
340 sysfs_remove_link(&dev->kobj, "driver");
341 }
342}
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358int device_bind_driver(struct device *dev)
359{
360 int ret;
361
362 ret = driver_sysfs_add(dev);
363 if (!ret)
364 driver_bound(dev);
365 else if (dev->bus)
366 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
367 BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
368 return ret;
369}
370EXPORT_SYMBOL_GPL(device_bind_driver);
371
372static atomic_t probe_count = ATOMIC_INIT(0);
373static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
374
375static void driver_deferred_probe_add_trigger(struct device *dev,
376 int local_trigger_count)
377{
378 driver_deferred_probe_add(dev);
379
380 if (local_trigger_count != atomic_read(&deferred_trigger_count))
381 driver_deferred_probe_trigger();
382}
383
384static int really_probe(struct device *dev, struct device_driver *drv)
385{
386 int ret = -EPROBE_DEFER;
387 int local_trigger_count = atomic_read(&deferred_trigger_count);
388 bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
389 !drv->suppress_bind_attrs;
390
391 if (defer_all_probes) {
392
393
394
395
396
397 dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
398 driver_deferred_probe_add(dev);
399 return ret;
400 }
401
402 ret = device_links_check_suppliers(dev);
403 if (ret == -EPROBE_DEFER)
404 driver_deferred_probe_add_trigger(dev, local_trigger_count);
405 if (ret)
406 return ret;
407
408 atomic_inc(&probe_count);
409 pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
410 drv->bus->name, __func__, drv->name, dev_name(dev));
411 WARN_ON(!list_empty(&dev->devres_head));
412
413re_probe:
414 dev->driver = drv;
415
416
417 ret = pinctrl_bind_pins(dev);
418 if (ret)
419 goto pinctrl_bind_failed;
420
421 if (dev->bus->dma_configure) {
422 ret = dev->bus->dma_configure(dev);
423 if (ret)
424 goto dma_failed;
425 }
426
427 if (driver_sysfs_add(dev)) {
428 printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
429 __func__, dev_name(dev));
430 goto probe_failed;
431 }
432
433 if (dev->pm_domain && dev->pm_domain->activate) {
434 ret = dev->pm_domain->activate(dev);
435 if (ret)
436 goto probe_failed;
437 }
438
439 if (dev->bus->probe) {
440 ret = dev->bus->probe(dev);
441 if (ret)
442 goto probe_failed;
443 } else if (drv->probe) {
444 ret = drv->probe(dev);
445 if (ret)
446 goto probe_failed;
447 }
448
449 if (test_remove) {
450 test_remove = false;
451
452 if (dev->bus->remove)
453 dev->bus->remove(dev);
454 else if (drv->remove)
455 drv->remove(dev);
456
457 devres_release_all(dev);
458 driver_sysfs_remove(dev);
459 dev->driver = NULL;
460 dev_set_drvdata(dev, NULL);
461 if (dev->pm_domain && dev->pm_domain->dismiss)
462 dev->pm_domain->dismiss(dev);
463 pm_runtime_reinit(dev);
464
465 goto re_probe;
466 }
467
468 pinctrl_init_done(dev);
469
470 if (dev->pm_domain && dev->pm_domain->sync)
471 dev->pm_domain->sync(dev);
472
473 driver_bound(dev);
474 ret = 1;
475 pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
476 drv->bus->name, __func__, dev_name(dev), drv->name);
477 goto done;
478
479probe_failed:
480 arch_teardown_dma_ops(dev);
481dma_failed:
482 if (dev->bus)
483 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
484 BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
485pinctrl_bind_failed:
486 device_links_no_driver(dev);
487 devres_release_all(dev);
488 driver_sysfs_remove(dev);
489 dev->driver = NULL;
490 dev_set_drvdata(dev, NULL);
491 if (dev->pm_domain && dev->pm_domain->dismiss)
492 dev->pm_domain->dismiss(dev);
493 pm_runtime_reinit(dev);
494 dev_pm_set_driver_flags(dev, 0);
495
496 switch (ret) {
497 case -EPROBE_DEFER:
498
499 dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
500 driver_deferred_probe_add_trigger(dev, local_trigger_count);
501 break;
502 case -ENODEV:
503 case -ENXIO:
504 pr_debug("%s: probe of %s rejects match %d\n",
505 drv->name, dev_name(dev), ret);
506 break;
507 default:
508
509 printk(KERN_WARNING
510 "%s: probe of %s failed with error %d\n",
511 drv->name, dev_name(dev), ret);
512 }
513
514
515
516
517 ret = 0;
518done:
519 atomic_dec(&probe_count);
520 wake_up(&probe_waitqueue);
521 return ret;
522}
523
524
525
526
527
528
529
530int driver_probe_done(void)
531{
532 pr_debug("%s: probe_count = %d\n", __func__,
533 atomic_read(&probe_count));
534 if (atomic_read(&probe_count))
535 return -EBUSY;
536 return 0;
537}
538
539
540
541
542
543void wait_for_device_probe(void)
544{
545
546 flush_work(&deferred_probe_work);
547
548
549 wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
550 async_synchronize_full();
551}
552EXPORT_SYMBOL_GPL(wait_for_device_probe);
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567int driver_probe_device(struct device_driver *drv, struct device *dev)
568{
569 int ret = 0;
570
571 if (!device_is_registered(dev))
572 return -ENODEV;
573
574 pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
575 drv->bus->name, __func__, dev_name(dev), drv->name);
576
577 pm_runtime_get_suppliers(dev);
578 if (dev->parent)
579 pm_runtime_get_sync(dev->parent);
580
581 pm_runtime_barrier(dev);
582 ret = really_probe(dev, drv);
583 pm_request_idle(dev);
584
585 if (dev->parent)
586 pm_runtime_put(dev->parent);
587
588 pm_runtime_put_suppliers(dev);
589 return ret;
590}
591
592bool driver_allows_async_probing(struct device_driver *drv)
593{
594 switch (drv->probe_type) {
595 case PROBE_PREFER_ASYNCHRONOUS:
596 return true;
597
598 case PROBE_FORCE_SYNCHRONOUS:
599 return false;
600
601 default:
602 if (module_requested_async_probing(drv->owner))
603 return true;
604
605 return false;
606 }
607}
608
609struct device_attach_data {
610 struct device *dev;
611
612
613
614
615
616
617
618
619 bool check_async;
620
621
622
623
624
625
626
627
628
629
630
631
632
633 bool want_async;
634
635
636
637
638
639 bool have_async;
640};
641
642static int __device_attach_driver(struct device_driver *drv, void *_data)
643{
644 struct device_attach_data *data = _data;
645 struct device *dev = data->dev;
646 bool async_allowed;
647 int ret;
648
649
650
651
652
653
654
655 if (dev->driver)
656 return -EBUSY;
657
658 ret = driver_match_device(drv, dev);
659 if (ret == 0) {
660
661 return 0;
662 } else if (ret == -EPROBE_DEFER) {
663 dev_dbg(dev, "Device match requests probe deferral\n");
664 driver_deferred_probe_add(dev);
665 } else if (ret < 0) {
666 dev_dbg(dev, "Bus failed to match device: %d", ret);
667 return ret;
668 }
669
670 async_allowed = driver_allows_async_probing(drv);
671
672 if (async_allowed)
673 data->have_async = true;
674
675 if (data->check_async && async_allowed != data->want_async)
676 return 0;
677
678 return driver_probe_device(drv, dev);
679}
680
681static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
682{
683 struct device *dev = _dev;
684 struct device_attach_data data = {
685 .dev = dev,
686 .check_async = true,
687 .want_async = true,
688 };
689
690 device_lock(dev);
691
692 if (dev->parent)
693 pm_runtime_get_sync(dev->parent);
694
695 bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
696 dev_dbg(dev, "async probe completed\n");
697
698 pm_request_idle(dev);
699
700 if (dev->parent)
701 pm_runtime_put(dev->parent);
702
703 device_unlock(dev);
704
705 put_device(dev);
706}
707
708static int __device_attach(struct device *dev, bool allow_async)
709{
710 int ret = 0;
711
712 device_lock(dev);
713 if (dev->driver) {
714 if (device_is_bound(dev)) {
715 ret = 1;
716 goto out_unlock;
717 }
718 ret = device_bind_driver(dev);
719 if (ret == 0)
720 ret = 1;
721 else {
722 dev->driver = NULL;
723 ret = 0;
724 }
725 } else {
726 struct device_attach_data data = {
727 .dev = dev,
728 .check_async = allow_async,
729 .want_async = false,
730 };
731
732 if (dev->parent)
733 pm_runtime_get_sync(dev->parent);
734
735 ret = bus_for_each_drv(dev->bus, NULL, &data,
736 __device_attach_driver);
737 if (!ret && allow_async && data.have_async) {
738
739
740
741
742
743
744
745 dev_dbg(dev, "scheduling asynchronous probe\n");
746 get_device(dev);
747 async_schedule(__device_attach_async_helper, dev);
748 } else {
749 pm_request_idle(dev);
750 }
751
752 if (dev->parent)
753 pm_runtime_put(dev->parent);
754 }
755out_unlock:
756 device_unlock(dev);
757 return ret;
758}
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774int device_attach(struct device *dev)
775{
776 return __device_attach(dev, false);
777}
778EXPORT_SYMBOL_GPL(device_attach);
779
780void device_initial_probe(struct device *dev)
781{
782 __device_attach(dev, true);
783}
784
785static int __driver_attach(struct device *dev, void *data)
786{
787 struct device_driver *drv = data;
788 int ret;
789
790
791
792
793
794
795
796
797
798
799
800 ret = driver_match_device(drv, dev);
801 if (ret == 0) {
802
803 return 0;
804 } else if (ret == -EPROBE_DEFER) {
805 dev_dbg(dev, "Device match requests probe deferral\n");
806 driver_deferred_probe_add(dev);
807 } else if (ret < 0) {
808 dev_dbg(dev, "Bus failed to match device: %d", ret);
809 return ret;
810 }
811
812 if (dev->parent && dev->bus->need_parent_lock)
813 device_lock(dev->parent);
814 device_lock(dev);
815 if (!dev->driver)
816 driver_probe_device(drv, dev);
817 device_unlock(dev);
818 if (dev->parent && dev->bus->need_parent_lock)
819 device_unlock(dev->parent);
820
821 return 0;
822}
823
824
825
826
827
828
829
830
831
832
833int driver_attach(struct device_driver *drv)
834{
835 return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
836}
837EXPORT_SYMBOL_GPL(driver_attach);
838
839
840
841
842
843static void __device_release_driver(struct device *dev, struct device *parent)
844{
845 struct device_driver *drv;
846
847 drv = dev->driver;
848 if (drv) {
849 if (driver_allows_async_probing(drv))
850 async_synchronize_full();
851
852 while (device_links_busy(dev)) {
853 device_unlock(dev);
854 if (parent)
855 device_unlock(parent);
856
857 device_links_unbind_consumers(dev);
858 if (parent)
859 device_lock(parent);
860
861 device_lock(dev);
862
863
864
865
866
867 if (dev->driver != drv)
868 return;
869 }
870
871 pm_runtime_get_sync(dev);
872 pm_runtime_clean_up_links(dev);
873
874 driver_sysfs_remove(dev);
875
876 if (dev->bus)
877 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
878 BUS_NOTIFY_UNBIND_DRIVER,
879 dev);
880
881 pm_runtime_put_sync(dev);
882
883 if (dev->bus && dev->bus->remove)
884 dev->bus->remove(dev);
885 else if (drv->remove)
886 drv->remove(dev);
887
888 device_links_driver_cleanup(dev);
889
890 devres_release_all(dev);
891 arch_teardown_dma_ops(dev);
892 dev->driver = NULL;
893 dev_set_drvdata(dev, NULL);
894 if (dev->pm_domain && dev->pm_domain->dismiss)
895 dev->pm_domain->dismiss(dev);
896 pm_runtime_reinit(dev);
897 dev_pm_set_driver_flags(dev, 0);
898
899 klist_remove(&dev->p->knode_driver);
900 device_pm_check_callbacks(dev);
901 if (dev->bus)
902 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
903 BUS_NOTIFY_UNBOUND_DRIVER,
904 dev);
905
906 kobject_uevent(&dev->kobj, KOBJ_UNBIND);
907 }
908}
909
910void device_release_driver_internal(struct device *dev,
911 struct device_driver *drv,
912 struct device *parent)
913{
914 if (parent && dev->bus->need_parent_lock)
915 device_lock(parent);
916
917 device_lock(dev);
918 if (!drv || drv == dev->driver)
919 __device_release_driver(dev, parent);
920
921 device_unlock(dev);
922 if (parent && dev->bus->need_parent_lock)
923 device_unlock(parent);
924}
925
926
927
928
929
930
931
932
933
934
935
936
937void device_release_driver(struct device *dev)
938{
939
940
941
942
943
944 device_release_driver_internal(dev, NULL, NULL);
945}
946EXPORT_SYMBOL_GPL(device_release_driver);
947
948
949
950
951
952void driver_detach(struct device_driver *drv)
953{
954 struct device_private *dev_prv;
955 struct device *dev;
956
957 for (;;) {
958 spin_lock(&drv->p->klist_devices.k_lock);
959 if (list_empty(&drv->p->klist_devices.k_list)) {
960 spin_unlock(&drv->p->klist_devices.k_lock);
961 break;
962 }
963 dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
964 struct device_private,
965 knode_driver.n_node);
966 dev = dev_prv->device;
967 get_device(dev);
968 spin_unlock(&drv->p->klist_devices.k_lock);
969 device_release_driver_internal(dev, drv, dev->parent);
970 put_device(dev);
971 }
972}
973