1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/device.h>
21#include <linux/delay.h>
22#include <linux/module.h>
23#include <linux/kthread.h>
24#include <linux/wait.h>
25#include <linux/async.h>
26#include <linux/pm_runtime.h>
27#include <linux/pinctrl/devinfo.h>
28
29#include "base.h"
30#include "power/power.h"
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51static DEFINE_MUTEX(deferred_probe_mutex);
52static LIST_HEAD(deferred_probe_pending_list);
53static LIST_HEAD(deferred_probe_active_list);
54static struct workqueue_struct *deferred_wq;
55static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
56
57
58
59
60static void deferred_probe_work_func(struct work_struct *work)
61{
62 struct device *dev;
63 struct device_private *private;
64
65
66
67
68
69
70
71
72
73
74
75
76 mutex_lock(&deferred_probe_mutex);
77 while (!list_empty(&deferred_probe_active_list)) {
78 private = list_first_entry(&deferred_probe_active_list,
79 typeof(*dev->p), deferred_probe);
80 dev = private->device;
81 list_del_init(&private->deferred_probe);
82
83 get_device(dev);
84
85
86
87
88
89 mutex_unlock(&deferred_probe_mutex);
90
91
92
93
94
95
96
97 device_pm_lock();
98 device_pm_move_last(dev);
99 device_pm_unlock();
100
101 dev_dbg(dev, "Retrying from deferred list\n");
102 bus_probe_device(dev);
103
104 mutex_lock(&deferred_probe_mutex);
105
106 put_device(dev);
107 }
108 mutex_unlock(&deferred_probe_mutex);
109}
110static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
111
112static void driver_deferred_probe_add(struct device *dev)
113{
114 mutex_lock(&deferred_probe_mutex);
115 if (list_empty(&dev->p->deferred_probe)) {
116 dev_dbg(dev, "Added to deferred list\n");
117 list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
118 }
119 mutex_unlock(&deferred_probe_mutex);
120}
121
122void driver_deferred_probe_del(struct device *dev)
123{
124 mutex_lock(&deferred_probe_mutex);
125 if (!list_empty(&dev->p->deferred_probe)) {
126 dev_dbg(dev, "Removed from deferred list\n");
127 list_del_init(&dev->p->deferred_probe);
128 }
129 mutex_unlock(&deferred_probe_mutex);
130}
131
132static bool driver_deferred_probe_enable = false;
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151static void driver_deferred_probe_trigger(void)
152{
153 if (!driver_deferred_probe_enable)
154 return;
155
156
157
158
159
160
161 mutex_lock(&deferred_probe_mutex);
162 atomic_inc(&deferred_trigger_count);
163 list_splice_tail_init(&deferred_probe_pending_list,
164 &deferred_probe_active_list);
165 mutex_unlock(&deferred_probe_mutex);
166
167
168
169
170
171 queue_work(deferred_wq, &deferred_probe_work);
172}
173
174
175
176
177
178
179
180
181static int deferred_probe_initcall(void)
182{
183 deferred_wq = create_singlethread_workqueue("deferwq");
184 if (WARN_ON(!deferred_wq))
185 return -ENOMEM;
186
187 driver_deferred_probe_enable = true;
188 driver_deferred_probe_trigger();
189
190 flush_workqueue(deferred_wq);
191 return 0;
192}
193late_initcall(deferred_probe_initcall);
194
195static void driver_bound(struct device *dev)
196{
197 if (klist_node_attached(&dev->p->knode_driver)) {
198 printk(KERN_WARNING "%s: device %s already bound\n",
199 __func__, kobject_name(&dev->kobj));
200 return;
201 }
202
203 pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
204 __func__, dev_name(dev));
205
206 klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
207
208
209
210
211
212 driver_deferred_probe_del(dev);
213 driver_deferred_probe_trigger();
214
215 if (dev->bus)
216 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
217 BUS_NOTIFY_BOUND_DRIVER, dev);
218}
219
220static int driver_sysfs_add(struct device *dev)
221{
222 int ret;
223
224 if (dev->bus)
225 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
226 BUS_NOTIFY_BIND_DRIVER, dev);
227
228 ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
229 kobject_name(&dev->kobj));
230 if (ret == 0) {
231 ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
232 "driver");
233 if (ret)
234 sysfs_remove_link(&dev->driver->p->kobj,
235 kobject_name(&dev->kobj));
236 }
237 return ret;
238}
239
240static void driver_sysfs_remove(struct device *dev)
241{
242 struct device_driver *drv = dev->driver;
243
244 if (drv) {
245 sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
246 sysfs_remove_link(&dev->kobj, "driver");
247 }
248}
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264int device_bind_driver(struct device *dev)
265{
266 int ret;
267
268 ret = driver_sysfs_add(dev);
269 if (!ret)
270 driver_bound(dev);
271 return ret;
272}
273EXPORT_SYMBOL_GPL(device_bind_driver);
274
275static atomic_t probe_count = ATOMIC_INIT(0);
276static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
277
278static int really_probe(struct device *dev, struct device_driver *drv)
279{
280 int ret = 0;
281 int local_trigger_count = atomic_read(&deferred_trigger_count);
282
283 atomic_inc(&probe_count);
284 pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
285 drv->bus->name, __func__, drv->name, dev_name(dev));
286 WARN_ON(!list_empty(&dev->devres_head));
287
288 dev->driver = drv;
289
290
291 ret = pinctrl_bind_pins(dev);
292 if (ret)
293 goto probe_failed;
294
295 if (driver_sysfs_add(dev)) {
296 printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
297 __func__, dev_name(dev));
298 goto probe_failed;
299 }
300
301 if (dev->pm_domain && dev->pm_domain->activate) {
302 ret = dev->pm_domain->activate(dev);
303 if (ret)
304 goto probe_failed;
305 }
306
307
308
309
310
311
312
313 devices_kset_move_last(dev);
314
315 if (dev->bus->probe) {
316 ret = dev->bus->probe(dev);
317 if (ret)
318 goto probe_failed;
319 } else if (drv->probe) {
320 ret = drv->probe(dev);
321 if (ret)
322 goto probe_failed;
323 }
324
325 pinctrl_init_done(dev);
326
327 if (dev->pm_domain && dev->pm_domain->sync)
328 dev->pm_domain->sync(dev);
329
330 driver_bound(dev);
331 ret = 1;
332 pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
333 drv->bus->name, __func__, dev_name(dev), drv->name);
334 goto done;
335
336probe_failed:
337 devres_release_all(dev);
338 driver_sysfs_remove(dev);
339 dev->driver = NULL;
340 dev_set_drvdata(dev, NULL);
341 if (dev->pm_domain && dev->pm_domain->dismiss)
342 dev->pm_domain->dismiss(dev);
343
344 switch (ret) {
345 case -EPROBE_DEFER:
346
347 dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
348 driver_deferred_probe_add(dev);
349
350 if (local_trigger_count != atomic_read(&deferred_trigger_count))
351 driver_deferred_probe_trigger();
352 break;
353 case -ENODEV:
354 case -ENXIO:
355 pr_debug("%s: probe of %s rejects match %d\n",
356 drv->name, dev_name(dev), ret);
357 break;
358 default:
359
360 printk(KERN_WARNING
361 "%s: probe of %s failed with error %d\n",
362 drv->name, dev_name(dev), ret);
363 }
364
365
366
367
368 ret = 0;
369done:
370 atomic_dec(&probe_count);
371 wake_up(&probe_waitqueue);
372 return ret;
373}
374
375
376
377
378
379
380
381int driver_probe_done(void)
382{
383 pr_debug("%s: probe_count = %d\n", __func__,
384 atomic_read(&probe_count));
385 if (atomic_read(&probe_count))
386 return -EBUSY;
387 return 0;
388}
389
390
391
392
393
394void wait_for_device_probe(void)
395{
396
397 wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
398 async_synchronize_full();
399}
400EXPORT_SYMBOL_GPL(wait_for_device_probe);
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415int driver_probe_device(struct device_driver *drv, struct device *dev)
416{
417 int ret = 0;
418
419 if (!device_is_registered(dev))
420 return -ENODEV;
421
422 pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
423 drv->bus->name, __func__, dev_name(dev), drv->name);
424
425 if (dev->parent)
426 pm_runtime_get_sync(dev->parent);
427
428 pm_runtime_barrier(dev);
429 ret = really_probe(dev, drv);
430 pm_request_idle(dev);
431
432 if (dev->parent)
433 pm_runtime_put(dev->parent);
434
435 return ret;
436}
437
438bool driver_allows_async_probing(struct device_driver *drv)
439{
440 switch (drv->probe_type) {
441 case PROBE_PREFER_ASYNCHRONOUS:
442 return true;
443
444 case PROBE_FORCE_SYNCHRONOUS:
445 return false;
446
447 default:
448 if (module_requested_async_probing(drv->owner))
449 return true;
450
451 return false;
452 }
453}
454
455struct device_attach_data {
456 struct device *dev;
457
458
459
460
461
462
463
464
465 bool check_async;
466
467
468
469
470
471
472
473
474
475
476
477
478
479 bool want_async;
480
481
482
483
484
485 bool have_async;
486};
487
488static int __device_attach_driver(struct device_driver *drv, void *_data)
489{
490 struct device_attach_data *data = _data;
491 struct device *dev = data->dev;
492 bool async_allowed;
493
494
495
496
497
498
499
500 if (dev->driver)
501 return -EBUSY;
502
503 if (!driver_match_device(drv, dev))
504 return 0;
505
506 async_allowed = driver_allows_async_probing(drv);
507
508 if (async_allowed)
509 data->have_async = true;
510
511 if (data->check_async && async_allowed != data->want_async)
512 return 0;
513
514 return driver_probe_device(drv, dev);
515}
516
517static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
518{
519 struct device *dev = _dev;
520 struct device_attach_data data = {
521 .dev = dev,
522 .check_async = true,
523 .want_async = true,
524 };
525
526 device_lock(dev);
527
528 if (dev->parent)
529 pm_runtime_get_sync(dev->parent);
530
531 bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
532 dev_dbg(dev, "async probe completed\n");
533
534 pm_request_idle(dev);
535
536 if (dev->parent)
537 pm_runtime_put(dev->parent);
538
539 device_unlock(dev);
540
541 put_device(dev);
542}
543
544static int __device_attach(struct device *dev, bool allow_async)
545{
546 int ret = 0;
547
548 device_lock(dev);
549 if (dev->driver) {
550 if (klist_node_attached(&dev->p->knode_driver)) {
551 ret = 1;
552 goto out_unlock;
553 }
554 ret = device_bind_driver(dev);
555 if (ret == 0)
556 ret = 1;
557 else {
558 dev->driver = NULL;
559 ret = 0;
560 }
561 } else {
562 struct device_attach_data data = {
563 .dev = dev,
564 .check_async = allow_async,
565 .want_async = false,
566 };
567
568 if (dev->parent)
569 pm_runtime_get_sync(dev->parent);
570
571 ret = bus_for_each_drv(dev->bus, NULL, &data,
572 __device_attach_driver);
573 if (!ret && allow_async && data.have_async) {
574
575
576
577
578
579
580
581 dev_dbg(dev, "scheduling asynchronous probe\n");
582 get_device(dev);
583 async_schedule(__device_attach_async_helper, dev);
584 } else {
585 pm_request_idle(dev);
586 }
587
588 if (dev->parent)
589 pm_runtime_put(dev->parent);
590 }
591out_unlock:
592 device_unlock(dev);
593 return ret;
594}
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610int device_attach(struct device *dev)
611{
612 return __device_attach(dev, false);
613}
614EXPORT_SYMBOL_GPL(device_attach);
615
616void device_initial_probe(struct device *dev)
617{
618 __device_attach(dev, true);
619}
620
621static int __driver_attach(struct device *dev, void *data)
622{
623 struct device_driver *drv = data;
624
625
626
627
628
629
630
631
632
633
634
635 if (!driver_match_device(drv, dev))
636 return 0;
637
638 if (dev->parent)
639 device_lock(dev->parent);
640 device_lock(dev);
641 if (!dev->driver)
642 driver_probe_device(drv, dev);
643 device_unlock(dev);
644 if (dev->parent)
645 device_unlock(dev->parent);
646
647 return 0;
648}
649
650
651
652
653
654
655
656
657
658
659int driver_attach(struct device_driver *drv)
660{
661 return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
662}
663EXPORT_SYMBOL_GPL(driver_attach);
664
665
666
667
668
669static void __device_release_driver(struct device *dev)
670{
671 struct device_driver *drv;
672
673 drv = dev->driver;
674 if (drv) {
675 if (driver_allows_async_probing(drv))
676 async_synchronize_full();
677
678 pm_runtime_get_sync(dev);
679
680 driver_sysfs_remove(dev);
681
682 if (dev->bus)
683 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
684 BUS_NOTIFY_UNBIND_DRIVER,
685 dev);
686
687 pm_runtime_put_sync(dev);
688
689 if (dev->bus && dev->bus->remove)
690 dev->bus->remove(dev);
691 else if (drv->remove)
692 drv->remove(dev);
693 devres_release_all(dev);
694 dev->driver = NULL;
695 dev_set_drvdata(dev, NULL);
696 if (dev->pm_domain && dev->pm_domain->dismiss)
697 dev->pm_domain->dismiss(dev);
698
699 klist_remove(&dev->p->knode_driver);
700 if (dev->bus)
701 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
702 BUS_NOTIFY_UNBOUND_DRIVER,
703 dev);
704
705 }
706}
707
708
709
710
711
712
713
714
715void device_release_driver(struct device *dev)
716{
717
718
719
720
721
722 device_lock(dev);
723 __device_release_driver(dev);
724 device_unlock(dev);
725}
726EXPORT_SYMBOL_GPL(device_release_driver);
727
728
729
730
731
732void driver_detach(struct device_driver *drv)
733{
734 struct device_private *dev_prv;
735 struct device *dev;
736
737 for (;;) {
738 spin_lock(&drv->p->klist_devices.k_lock);
739 if (list_empty(&drv->p->klist_devices.k_list)) {
740 spin_unlock(&drv->p->klist_devices.k_lock);
741 break;
742 }
743 dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
744 struct device_private,
745 knode_driver.n_node);
746 dev = dev_prv->device;
747 get_device(dev);
748 spin_unlock(&drv->p->klist_devices.k_lock);
749
750 if (dev->parent)
751 device_lock(dev->parent);
752 device_lock(dev);
753 if (dev->driver == drv)
754 __device_release_driver(dev);
755 device_unlock(dev);
756 if (dev->parent)
757 device_unlock(dev->parent);
758 put_device(dev);
759 }
760}
761