1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/cpufreq.h>
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/fwnode.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/kdev_t.h>
21#include <linux/notifier.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/genhd.h>
25#include <linux/mutex.h>
26#include <linux/pm_runtime.h>
27#include <linux/netdevice.h>
28#include <linux/sched/signal.h>
29#include <linux/sched/mm.h>
30#include <linux/sysfs.h>
31#include <linux/dma-map-ops.h>
32
33#include "base.h"
34#include "power/power.h"
35
36#ifdef CONFIG_SYSFS_DEPRECATED
37#ifdef CONFIG_SYSFS_DEPRECATED_V2
38long sysfs_deprecated = 1;
39#else
40long sysfs_deprecated = 0;
41#endif
42static int __init sysfs_deprecated_setup(char *arg)
43{
44 return kstrtol(arg, 10, &sysfs_deprecated);
45}
46early_param("sysfs.deprecated", sysfs_deprecated_setup);
47#endif
48
49
50static LIST_HEAD(deferred_sync);
51static unsigned int defer_sync_state_count = 1;
52static DEFINE_MUTEX(fwnode_link_lock);
53static bool fw_devlink_is_permissive(void);
54static bool fw_devlink_drv_reg_done;
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
74{
75 struct fwnode_link *link;
76 int ret = 0;
77
78 mutex_lock(&fwnode_link_lock);
79
80 list_for_each_entry(link, &sup->consumers, s_hook)
81 if (link->consumer == con)
82 goto out;
83
84 link = kzalloc(sizeof(*link), GFP_KERNEL);
85 if (!link) {
86 ret = -ENOMEM;
87 goto out;
88 }
89
90 link->supplier = sup;
91 INIT_LIST_HEAD(&link->s_hook);
92 link->consumer = con;
93 INIT_LIST_HEAD(&link->c_hook);
94
95 list_add(&link->s_hook, &sup->consumers);
96 list_add(&link->c_hook, &con->suppliers);
97out:
98 mutex_unlock(&fwnode_link_lock);
99
100 return ret;
101}
102
103
104
105
106
107
108
109static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
110{
111 struct fwnode_link *link, *tmp;
112
113 mutex_lock(&fwnode_link_lock);
114 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
115 list_del(&link->s_hook);
116 list_del(&link->c_hook);
117 kfree(link);
118 }
119 mutex_unlock(&fwnode_link_lock);
120}
121
122
123
124
125
126
127
128static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
129{
130 struct fwnode_link *link, *tmp;
131
132 mutex_lock(&fwnode_link_lock);
133 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
134 list_del(&link->s_hook);
135 list_del(&link->c_hook);
136 kfree(link);
137 }
138 mutex_unlock(&fwnode_link_lock);
139}
140
141
142
143
144
145
146
147void fwnode_links_purge(struct fwnode_handle *fwnode)
148{
149 fwnode_links_purge_suppliers(fwnode);
150 fwnode_links_purge_consumers(fwnode);
151}
152
153void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
154{
155 struct fwnode_handle *child;
156
157
158 if (fwnode->dev)
159 return;
160
161 fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
162 fwnode_links_purge_consumers(fwnode);
163
164 fwnode_for_each_available_child_node(fwnode, child)
165 fw_devlink_purge_absent_suppliers(child);
166}
167EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
168
169#ifdef CONFIG_SRCU
170static DEFINE_MUTEX(device_links_lock);
171DEFINE_STATIC_SRCU(device_links_srcu);
172
173static inline void device_links_write_lock(void)
174{
175 mutex_lock(&device_links_lock);
176}
177
178static inline void device_links_write_unlock(void)
179{
180 mutex_unlock(&device_links_lock);
181}
182
183int device_links_read_lock(void) __acquires(&device_links_srcu)
184{
185 return srcu_read_lock(&device_links_srcu);
186}
187
188void device_links_read_unlock(int idx) __releases(&device_links_srcu)
189{
190 srcu_read_unlock(&device_links_srcu, idx);
191}
192
193int device_links_read_lock_held(void)
194{
195 return srcu_read_lock_held(&device_links_srcu);
196}
197
198static void device_link_synchronize_removal(void)
199{
200 synchronize_srcu(&device_links_srcu);
201}
202
203static void device_link_remove_from_lists(struct device_link *link)
204{
205 list_del_rcu(&link->s_node);
206 list_del_rcu(&link->c_node);
207}
208#else
209static DECLARE_RWSEM(device_links_lock);
210
211static inline void device_links_write_lock(void)
212{
213 down_write(&device_links_lock);
214}
215
216static inline void device_links_write_unlock(void)
217{
218 up_write(&device_links_lock);
219}
220
221int device_links_read_lock(void)
222{
223 down_read(&device_links_lock);
224 return 0;
225}
226
227void device_links_read_unlock(int not_used)
228{
229 up_read(&device_links_lock);
230}
231
232#ifdef CONFIG_DEBUG_LOCK_ALLOC
233int device_links_read_lock_held(void)
234{
235 return lockdep_is_held(&device_links_lock);
236}
237#endif
238
239static inline void device_link_synchronize_removal(void)
240{
241}
242
243static void device_link_remove_from_lists(struct device_link *link)
244{
245 list_del(&link->s_node);
246 list_del(&link->c_node);
247}
248#endif
249
250static bool device_is_ancestor(struct device *dev, struct device *target)
251{
252 while (target->parent) {
253 target = target->parent;
254 if (dev == target)
255 return true;
256 }
257 return false;
258}
259
260
261
262
263
264
265
266
267
268int device_is_dependent(struct device *dev, void *target)
269{
270 struct device_link *link;
271 int ret;
272
273
274
275
276
277
278 if (dev == target || device_is_ancestor(dev, target))
279 return 1;
280
281 ret = device_for_each_child(dev, target, device_is_dependent);
282 if (ret)
283 return ret;
284
285 list_for_each_entry(link, &dev->links.consumers, s_node) {
286 if ((link->flags & ~DL_FLAG_INFERRED) ==
287 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
288 continue;
289
290 if (link->consumer == target)
291 return 1;
292
293 ret = device_is_dependent(link->consumer, target);
294 if (ret)
295 break;
296 }
297 return ret;
298}
299
300static void device_link_init_status(struct device_link *link,
301 struct device *consumer,
302 struct device *supplier)
303{
304 switch (supplier->links.status) {
305 case DL_DEV_PROBING:
306 switch (consumer->links.status) {
307 case DL_DEV_PROBING:
308
309
310
311
312
313
314
315 link->status = DL_STATE_CONSUMER_PROBE;
316 break;
317 default:
318 link->status = DL_STATE_DORMANT;
319 break;
320 }
321 break;
322 case DL_DEV_DRIVER_BOUND:
323 switch (consumer->links.status) {
324 case DL_DEV_PROBING:
325 link->status = DL_STATE_CONSUMER_PROBE;
326 break;
327 case DL_DEV_DRIVER_BOUND:
328 link->status = DL_STATE_ACTIVE;
329 break;
330 default:
331 link->status = DL_STATE_AVAILABLE;
332 break;
333 }
334 break;
335 case DL_DEV_UNBINDING:
336 link->status = DL_STATE_SUPPLIER_UNBIND;
337 break;
338 default:
339 link->status = DL_STATE_DORMANT;
340 break;
341 }
342}
343
344static int device_reorder_to_tail(struct device *dev, void *not_used)
345{
346 struct device_link *link;
347
348
349
350
351
352 if (device_is_registered(dev))
353 devices_kset_move_last(dev);
354
355 if (device_pm_initialized(dev))
356 device_pm_move_last(dev);
357
358 device_for_each_child(dev, NULL, device_reorder_to_tail);
359 list_for_each_entry(link, &dev->links.consumers, s_node) {
360 if ((link->flags & ~DL_FLAG_INFERRED) ==
361 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
362 continue;
363 device_reorder_to_tail(link->consumer, NULL);
364 }
365
366 return 0;
367}
368
369
370
371
372
373
374
375
376
377
378void device_pm_move_to_tail(struct device *dev)
379{
380 int idx;
381
382 idx = device_links_read_lock();
383 device_pm_lock();
384 device_reorder_to_tail(dev, NULL);
385 device_pm_unlock();
386 device_links_read_unlock(idx);
387}
388
389#define to_devlink(dev) container_of((dev), struct device_link, link_dev)
390
391static ssize_t status_show(struct device *dev,
392 struct device_attribute *attr, char *buf)
393{
394 const char *output;
395
396 switch (to_devlink(dev)->status) {
397 case DL_STATE_NONE:
398 output = "not tracked";
399 break;
400 case DL_STATE_DORMANT:
401 output = "dormant";
402 break;
403 case DL_STATE_AVAILABLE:
404 output = "available";
405 break;
406 case DL_STATE_CONSUMER_PROBE:
407 output = "consumer probing";
408 break;
409 case DL_STATE_ACTIVE:
410 output = "active";
411 break;
412 case DL_STATE_SUPPLIER_UNBIND:
413 output = "supplier unbinding";
414 break;
415 default:
416 output = "unknown";
417 break;
418 }
419
420 return sysfs_emit(buf, "%s\n", output);
421}
422static DEVICE_ATTR_RO(status);
423
424static ssize_t auto_remove_on_show(struct device *dev,
425 struct device_attribute *attr, char *buf)
426{
427 struct device_link *link = to_devlink(dev);
428 const char *output;
429
430 if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
431 output = "supplier unbind";
432 else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
433 output = "consumer unbind";
434 else
435 output = "never";
436
437 return sysfs_emit(buf, "%s\n", output);
438}
439static DEVICE_ATTR_RO(auto_remove_on);
440
441static ssize_t runtime_pm_show(struct device *dev,
442 struct device_attribute *attr, char *buf)
443{
444 struct device_link *link = to_devlink(dev);
445
446 return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
447}
448static DEVICE_ATTR_RO(runtime_pm);
449
450static ssize_t sync_state_only_show(struct device *dev,
451 struct device_attribute *attr, char *buf)
452{
453 struct device_link *link = to_devlink(dev);
454
455 return sysfs_emit(buf, "%d\n",
456 !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
457}
458static DEVICE_ATTR_RO(sync_state_only);
459
460static struct attribute *devlink_attrs[] = {
461 &dev_attr_status.attr,
462 &dev_attr_auto_remove_on.attr,
463 &dev_attr_runtime_pm.attr,
464 &dev_attr_sync_state_only.attr,
465 NULL,
466};
467ATTRIBUTE_GROUPS(devlink);
468
469static void device_link_release_fn(struct work_struct *work)
470{
471 struct device_link *link = container_of(work, struct device_link, rm_work);
472
473
474 device_link_synchronize_removal();
475
476 while (refcount_dec_not_one(&link->rpm_active))
477 pm_runtime_put(link->supplier);
478
479 put_device(link->consumer);
480 put_device(link->supplier);
481 kfree(link);
482}
483
484static void devlink_dev_release(struct device *dev)
485{
486 struct device_link *link = to_devlink(dev);
487
488 INIT_WORK(&link->rm_work, device_link_release_fn);
489
490
491
492
493
494
495 queue_work(system_long_wq, &link->rm_work);
496}
497
498static struct class devlink_class = {
499 .name = "devlink",
500 .owner = THIS_MODULE,
501 .dev_groups = devlink_groups,
502 .dev_release = devlink_dev_release,
503};
504
505static int devlink_add_symlinks(struct device *dev,
506 struct class_interface *class_intf)
507{
508 int ret;
509 size_t len;
510 struct device_link *link = to_devlink(dev);
511 struct device *sup = link->supplier;
512 struct device *con = link->consumer;
513 char *buf;
514
515 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
516 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
517 len += strlen(":");
518 len += strlen("supplier:") + 1;
519 buf = kzalloc(len, GFP_KERNEL);
520 if (!buf)
521 return -ENOMEM;
522
523 ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
524 if (ret)
525 goto out;
526
527 ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer");
528 if (ret)
529 goto err_con;
530
531 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
532 ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
533 if (ret)
534 goto err_con_dev;
535
536 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
537 ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
538 if (ret)
539 goto err_sup_dev;
540
541 goto out;
542
543err_sup_dev:
544 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
545 sysfs_remove_link(&sup->kobj, buf);
546err_con_dev:
547 sysfs_remove_link(&link->link_dev.kobj, "consumer");
548err_con:
549 sysfs_remove_link(&link->link_dev.kobj, "supplier");
550out:
551 kfree(buf);
552 return ret;
553}
554
555static void devlink_remove_symlinks(struct device *dev,
556 struct class_interface *class_intf)
557{
558 struct device_link *link = to_devlink(dev);
559 size_t len;
560 struct device *sup = link->supplier;
561 struct device *con = link->consumer;
562 char *buf;
563
564 sysfs_remove_link(&link->link_dev.kobj, "consumer");
565 sysfs_remove_link(&link->link_dev.kobj, "supplier");
566
567 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
568 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
569 len += strlen(":");
570 len += strlen("supplier:") + 1;
571 buf = kzalloc(len, GFP_KERNEL);
572 if (!buf) {
573 WARN(1, "Unable to properly free device link symlinks!\n");
574 return;
575 }
576
577 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
578 sysfs_remove_link(&con->kobj, buf);
579 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
580 sysfs_remove_link(&sup->kobj, buf);
581 kfree(buf);
582}
583
584static struct class_interface devlink_class_intf = {
585 .class = &devlink_class,
586 .add_dev = devlink_add_symlinks,
587 .remove_dev = devlink_remove_symlinks,
588};
589
590static int __init devlink_class_init(void)
591{
592 int ret;
593
594 ret = class_register(&devlink_class);
595 if (ret)
596 return ret;
597
598 ret = class_interface_register(&devlink_class_intf);
599 if (ret)
600 class_unregister(&devlink_class);
601
602 return ret;
603}
604postcore_initcall(devlink_class_init);
605
606#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
607 DL_FLAG_AUTOREMOVE_SUPPLIER | \
608 DL_FLAG_AUTOPROBE_CONSUMER | \
609 DL_FLAG_SYNC_STATE_ONLY | \
610 DL_FLAG_INFERRED)
611
612#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
613 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671struct device_link *device_link_add(struct device *consumer,
672 struct device *supplier, u32 flags)
673{
674 struct device_link *link;
675
676 if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
677 (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
678 (flags & DL_FLAG_SYNC_STATE_ONLY &&
679 (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
680 (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
681 flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
682 DL_FLAG_AUTOREMOVE_SUPPLIER)))
683 return NULL;
684
685 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
686 if (pm_runtime_get_sync(supplier) < 0) {
687 pm_runtime_put_noidle(supplier);
688 return NULL;
689 }
690 }
691
692 if (!(flags & DL_FLAG_STATELESS))
693 flags |= DL_FLAG_MANAGED;
694
695 device_links_write_lock();
696 device_pm_lock();
697
698
699
700
701
702
703
704
705 if (!device_pm_initialized(supplier)
706 || (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
707 device_is_dependent(consumer, supplier))) {
708 link = NULL;
709 goto out;
710 }
711
712
713
714
715
716 if (flags & DL_FLAG_SYNC_STATE_ONLY &&
717 consumer->links.status != DL_DEV_NO_DRIVER &&
718 consumer->links.status != DL_DEV_PROBING) {
719 link = NULL;
720 goto out;
721 }
722
723
724
725
726
727
728 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
729 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
730
731 list_for_each_entry(link, &supplier->links.consumers, s_node) {
732 if (link->consumer != consumer)
733 continue;
734
735 if (link->flags & DL_FLAG_INFERRED &&
736 !(flags & DL_FLAG_INFERRED))
737 link->flags &= ~DL_FLAG_INFERRED;
738
739 if (flags & DL_FLAG_PM_RUNTIME) {
740 if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
741 pm_runtime_new_link(consumer);
742 link->flags |= DL_FLAG_PM_RUNTIME;
743 }
744 if (flags & DL_FLAG_RPM_ACTIVE)
745 refcount_inc(&link->rpm_active);
746 }
747
748 if (flags & DL_FLAG_STATELESS) {
749 kref_get(&link->kref);
750 if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
751 !(link->flags & DL_FLAG_STATELESS)) {
752 link->flags |= DL_FLAG_STATELESS;
753 goto reorder;
754 } else {
755 link->flags |= DL_FLAG_STATELESS;
756 goto out;
757 }
758 }
759
760
761
762
763
764
765 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
766 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
767 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
768 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
769 }
770 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
771 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
772 DL_FLAG_AUTOREMOVE_SUPPLIER);
773 }
774 if (!(link->flags & DL_FLAG_MANAGED)) {
775 kref_get(&link->kref);
776 link->flags |= DL_FLAG_MANAGED;
777 device_link_init_status(link, consumer, supplier);
778 }
779 if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
780 !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
781 link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
782 goto reorder;
783 }
784
785 goto out;
786 }
787
788 link = kzalloc(sizeof(*link), GFP_KERNEL);
789 if (!link)
790 goto out;
791
792 refcount_set(&link->rpm_active, 1);
793
794 get_device(supplier);
795 link->supplier = supplier;
796 INIT_LIST_HEAD(&link->s_node);
797 get_device(consumer);
798 link->consumer = consumer;
799 INIT_LIST_HEAD(&link->c_node);
800 link->flags = flags;
801 kref_init(&link->kref);
802
803 link->link_dev.class = &devlink_class;
804 device_set_pm_not_required(&link->link_dev);
805 dev_set_name(&link->link_dev, "%s:%s--%s:%s",
806 dev_bus_name(supplier), dev_name(supplier),
807 dev_bus_name(consumer), dev_name(consumer));
808 if (device_register(&link->link_dev)) {
809 put_device(consumer);
810 put_device(supplier);
811 kfree(link);
812 link = NULL;
813 goto out;
814 }
815
816 if (flags & DL_FLAG_PM_RUNTIME) {
817 if (flags & DL_FLAG_RPM_ACTIVE)
818 refcount_inc(&link->rpm_active);
819
820 pm_runtime_new_link(consumer);
821 }
822
823
824 if (flags & DL_FLAG_STATELESS)
825 link->status = DL_STATE_NONE;
826 else
827 device_link_init_status(link, consumer, supplier);
828
829
830
831
832
833 if (link->status == DL_STATE_CONSUMER_PROBE &&
834 flags & DL_FLAG_PM_RUNTIME)
835 pm_runtime_resume(supplier);
836
837 list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
838 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
839
840 if (flags & DL_FLAG_SYNC_STATE_ONLY) {
841 dev_dbg(consumer,
842 "Linked as a sync state only consumer to %s\n",
843 dev_name(supplier));
844 goto out;
845 }
846
847reorder:
848
849
850
851
852
853
854
855 device_reorder_to_tail(consumer, NULL);
856
857 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
858
859out:
860 device_pm_unlock();
861 device_links_write_unlock();
862
863 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
864 pm_runtime_put(supplier);
865
866 return link;
867}
868EXPORT_SYMBOL_GPL(device_link_add);
869
870static void __device_link_del(struct kref *kref)
871{
872 struct device_link *link = container_of(kref, struct device_link, kref);
873
874 dev_dbg(link->consumer, "Dropping the link to %s\n",
875 dev_name(link->supplier));
876
877 pm_runtime_drop_link(link);
878
879 device_link_remove_from_lists(link);
880 device_unregister(&link->link_dev);
881}
882
883static void device_link_put_kref(struct device_link *link)
884{
885 if (link->flags & DL_FLAG_STATELESS)
886 kref_put(&link->kref, __device_link_del);
887 else
888 WARN(1, "Unable to drop a managed device link reference\n");
889}
890
891
892
893
894
895
896
897
898
899
900void device_link_del(struct device_link *link)
901{
902 device_links_write_lock();
903 device_link_put_kref(link);
904 device_links_write_unlock();
905}
906EXPORT_SYMBOL_GPL(device_link_del);
907
908
909
910
911
912
913
914
915
916void device_link_remove(void *consumer, struct device *supplier)
917{
918 struct device_link *link;
919
920 if (WARN_ON(consumer == supplier))
921 return;
922
923 device_links_write_lock();
924
925 list_for_each_entry(link, &supplier->links.consumers, s_node) {
926 if (link->consumer == consumer) {
927 device_link_put_kref(link);
928 break;
929 }
930 }
931
932 device_links_write_unlock();
933}
934EXPORT_SYMBOL_GPL(device_link_remove);
935
936static void device_links_missing_supplier(struct device *dev)
937{
938 struct device_link *link;
939
940 list_for_each_entry(link, &dev->links.suppliers, c_node) {
941 if (link->status != DL_STATE_CONSUMER_PROBE)
942 continue;
943
944 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
945 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
946 } else {
947 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
948 WRITE_ONCE(link->status, DL_STATE_DORMANT);
949 }
950 }
951}
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969int device_links_check_suppliers(struct device *dev)
970{
971 struct device_link *link;
972 int ret = 0;
973
974
975
976
977
978 mutex_lock(&fwnode_link_lock);
979 if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
980 !fw_devlink_is_permissive()) {
981 dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n",
982 list_first_entry(&dev->fwnode->suppliers,
983 struct fwnode_link,
984 c_hook)->supplier);
985 mutex_unlock(&fwnode_link_lock);
986 return -EPROBE_DEFER;
987 }
988 mutex_unlock(&fwnode_link_lock);
989
990 device_links_write_lock();
991
992 list_for_each_entry(link, &dev->links.suppliers, c_node) {
993 if (!(link->flags & DL_FLAG_MANAGED))
994 continue;
995
996 if (link->status != DL_STATE_AVAILABLE &&
997 !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
998 device_links_missing_supplier(dev);
999 dev_dbg(dev, "probe deferral - supplier %s not ready\n",
1000 dev_name(link->supplier));
1001 ret = -EPROBE_DEFER;
1002 break;
1003 }
1004 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1005 }
1006 dev->links.status = DL_DEV_PROBING;
1007
1008 device_links_write_unlock();
1009 return ret;
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030static void __device_links_queue_sync_state(struct device *dev,
1031 struct list_head *list)
1032{
1033 struct device_link *link;
1034
1035 if (!dev_has_sync_state(dev))
1036 return;
1037 if (dev->state_synced)
1038 return;
1039
1040 list_for_each_entry(link, &dev->links.consumers, s_node) {
1041 if (!(link->flags & DL_FLAG_MANAGED))
1042 continue;
1043 if (link->status != DL_STATE_ACTIVE)
1044 return;
1045 }
1046
1047
1048
1049
1050
1051
1052 dev->state_synced = true;
1053
1054 if (WARN_ON(!list_empty(&dev->links.defer_sync)))
1055 return;
1056
1057 get_device(dev);
1058 list_add_tail(&dev->links.defer_sync, list);
1059}
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071static void device_links_flush_sync_list(struct list_head *list,
1072 struct device *dont_lock_dev)
1073{
1074 struct device *dev, *tmp;
1075
1076 list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
1077 list_del_init(&dev->links.defer_sync);
1078
1079 if (dev != dont_lock_dev)
1080 device_lock(dev);
1081
1082 if (dev->bus->sync_state)
1083 dev->bus->sync_state(dev);
1084 else if (dev->driver && dev->driver->sync_state)
1085 dev->driver->sync_state(dev);
1086
1087 if (dev != dont_lock_dev)
1088 device_unlock(dev);
1089
1090 put_device(dev);
1091 }
1092}
1093
1094void device_links_supplier_sync_state_pause(void)
1095{
1096 device_links_write_lock();
1097 defer_sync_state_count++;
1098 device_links_write_unlock();
1099}
1100
1101void device_links_supplier_sync_state_resume(void)
1102{
1103 struct device *dev, *tmp;
1104 LIST_HEAD(sync_list);
1105
1106 device_links_write_lock();
1107 if (!defer_sync_state_count) {
1108 WARN(true, "Unmatched sync_state pause/resume!");
1109 goto out;
1110 }
1111 defer_sync_state_count--;
1112 if (defer_sync_state_count)
1113 goto out;
1114
1115 list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
1116
1117
1118
1119
1120 list_del_init(&dev->links.defer_sync);
1121 __device_links_queue_sync_state(dev, &sync_list);
1122 }
1123out:
1124 device_links_write_unlock();
1125
1126 device_links_flush_sync_list(&sync_list, NULL);
1127}
1128
1129static int sync_state_resume_initcall(void)
1130{
1131 device_links_supplier_sync_state_resume();
1132 return 0;
1133}
1134late_initcall(sync_state_resume_initcall);
1135
1136static void __device_links_supplier_defer_sync(struct device *sup)
1137{
1138 if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
1139 list_add_tail(&sup->links.defer_sync, &deferred_sync);
1140}
1141
1142static void device_link_drop_managed(struct device_link *link)
1143{
1144 link->flags &= ~DL_FLAG_MANAGED;
1145 WRITE_ONCE(link->status, DL_STATE_NONE);
1146 kref_put(&link->kref, __device_link_del);
1147}
1148
1149static ssize_t waiting_for_supplier_show(struct device *dev,
1150 struct device_attribute *attr,
1151 char *buf)
1152{
1153 bool val;
1154
1155 device_lock(dev);
1156 val = !list_empty(&dev->fwnode->suppliers);
1157 device_unlock(dev);
1158 return sysfs_emit(buf, "%u\n", val);
1159}
1160static DEVICE_ATTR_RO(waiting_for_supplier);
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176void device_links_force_bind(struct device *dev)
1177{
1178 struct device_link *link, *ln;
1179
1180 device_links_write_lock();
1181
1182 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1183 if (!(link->flags & DL_FLAG_MANAGED))
1184 continue;
1185
1186 if (link->status != DL_STATE_AVAILABLE) {
1187 device_link_drop_managed(link);
1188 continue;
1189 }
1190 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1191 }
1192 dev->links.status = DL_DEV_PROBING;
1193
1194 device_links_write_unlock();
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208void device_links_driver_bound(struct device *dev)
1209{
1210 struct device_link *link, *ln;
1211 LIST_HEAD(sync_list);
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224 if (dev->fwnode && dev->fwnode->dev == dev) {
1225 struct fwnode_handle *child;
1226 fwnode_links_purge_suppliers(dev->fwnode);
1227 fwnode_for_each_available_child_node(dev->fwnode, child)
1228 fw_devlink_purge_absent_suppliers(child);
1229 }
1230 device_remove_file(dev, &dev_attr_waiting_for_supplier);
1231
1232 device_links_write_lock();
1233
1234 list_for_each_entry(link, &dev->links.consumers, s_node) {
1235 if (!(link->flags & DL_FLAG_MANAGED))
1236 continue;
1237
1238
1239
1240
1241
1242
1243
1244 if (link->status == DL_STATE_CONSUMER_PROBE ||
1245 link->status == DL_STATE_ACTIVE)
1246 continue;
1247
1248 WARN_ON(link->status != DL_STATE_DORMANT);
1249 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1250
1251 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
1252 driver_deferred_probe_add(link->consumer);
1253 }
1254
1255 if (defer_sync_state_count)
1256 __device_links_supplier_defer_sync(dev);
1257 else
1258 __device_links_queue_sync_state(dev, &sync_list);
1259
1260 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1261 struct device *supplier;
1262
1263 if (!(link->flags & DL_FLAG_MANAGED))
1264 continue;
1265
1266 supplier = link->supplier;
1267 if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
1268
1269
1270
1271
1272
1273 device_link_drop_managed(link);
1274 } else {
1275 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
1276 WRITE_ONCE(link->status, DL_STATE_ACTIVE);
1277 }
1278
1279
1280
1281
1282
1283
1284
1285 if (defer_sync_state_count)
1286 __device_links_supplier_defer_sync(supplier);
1287 else
1288 __device_links_queue_sync_state(supplier, &sync_list);
1289 }
1290
1291 dev->links.status = DL_DEV_DRIVER_BOUND;
1292
1293 device_links_write_unlock();
1294
1295 device_links_flush_sync_list(&sync_list, dev);
1296}
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310static void __device_links_no_driver(struct device *dev)
1311{
1312 struct device_link *link, *ln;
1313
1314 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1315 if (!(link->flags & DL_FLAG_MANAGED))
1316 continue;
1317
1318 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
1319 device_link_drop_managed(link);
1320 continue;
1321 }
1322
1323 if (link->status != DL_STATE_CONSUMER_PROBE &&
1324 link->status != DL_STATE_ACTIVE)
1325 continue;
1326
1327 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
1328 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1329 } else {
1330 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
1331 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1332 }
1333 }
1334
1335 dev->links.status = DL_DEV_NO_DRIVER;
1336}
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348void device_links_no_driver(struct device *dev)
1349{
1350 struct device_link *link;
1351
1352 device_links_write_lock();
1353
1354 list_for_each_entry(link, &dev->links.consumers, s_node) {
1355 if (!(link->flags & DL_FLAG_MANAGED))
1356 continue;
1357
1358
1359
1360
1361
1362
1363
1364
1365 if (link->status == DL_STATE_CONSUMER_PROBE ||
1366 link->status == DL_STATE_ACTIVE)
1367 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1368 }
1369
1370 __device_links_no_driver(dev);
1371
1372 device_links_write_unlock();
1373}
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385void device_links_driver_cleanup(struct device *dev)
1386{
1387 struct device_link *link, *ln;
1388
1389 device_links_write_lock();
1390
1391 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
1392 if (!(link->flags & DL_FLAG_MANAGED))
1393 continue;
1394
1395 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
1396 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
1397
1398
1399
1400
1401
1402
1403 if (link->status == DL_STATE_SUPPLIER_UNBIND &&
1404 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
1405 device_link_drop_managed(link);
1406
1407 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1408 }
1409
1410 list_del_init(&dev->links.defer_sync);
1411 __device_links_no_driver(dev);
1412
1413 device_links_write_unlock();
1414}
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430bool device_links_busy(struct device *dev)
1431{
1432 struct device_link *link;
1433 bool ret = false;
1434
1435 device_links_write_lock();
1436
1437 list_for_each_entry(link, &dev->links.consumers, s_node) {
1438 if (!(link->flags & DL_FLAG_MANAGED))
1439 continue;
1440
1441 if (link->status == DL_STATE_CONSUMER_PROBE
1442 || link->status == DL_STATE_ACTIVE) {
1443 ret = true;
1444 break;
1445 }
1446 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1447 }
1448
1449 dev->links.status = DL_DEV_UNBINDING;
1450
1451 device_links_write_unlock();
1452 return ret;
1453}
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470void device_links_unbind_consumers(struct device *dev)
1471{
1472 struct device_link *link;
1473
1474 start:
1475 device_links_write_lock();
1476
1477 list_for_each_entry(link, &dev->links.consumers, s_node) {
1478 enum device_link_state status;
1479
1480 if (!(link->flags & DL_FLAG_MANAGED) ||
1481 link->flags & DL_FLAG_SYNC_STATE_ONLY)
1482 continue;
1483
1484 status = link->status;
1485 if (status == DL_STATE_CONSUMER_PROBE) {
1486 device_links_write_unlock();
1487
1488 wait_for_device_probe();
1489 goto start;
1490 }
1491 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1492 if (status == DL_STATE_ACTIVE) {
1493 struct device *consumer = link->consumer;
1494
1495 get_device(consumer);
1496
1497 device_links_write_unlock();
1498
1499 device_release_driver_internal(consumer, NULL,
1500 consumer->parent);
1501 put_device(consumer);
1502 goto start;
1503 }
1504 }
1505
1506 device_links_write_unlock();
1507}
1508
1509
1510
1511
1512
1513static void device_links_purge(struct device *dev)
1514{
1515 struct device_link *link, *ln;
1516
1517 if (dev->class == &devlink_class)
1518 return;
1519
1520
1521
1522
1523
1524 device_links_write_lock();
1525
1526 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1527 WARN_ON(link->status == DL_STATE_ACTIVE);
1528 __device_link_del(&link->kref);
1529 }
1530
1531 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
1532 WARN_ON(link->status != DL_STATE_DORMANT &&
1533 link->status != DL_STATE_NONE);
1534 __device_link_del(&link->kref);
1535 }
1536
1537 device_links_write_unlock();
1538}
1539
1540#define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \
1541 DL_FLAG_SYNC_STATE_ONLY)
1542#define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \
1543 DL_FLAG_AUTOPROBE_CONSUMER)
1544#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
1545 DL_FLAG_PM_RUNTIME)
1546
1547static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
1548static int __init fw_devlink_setup(char *arg)
1549{
1550 if (!arg)
1551 return -EINVAL;
1552
1553 if (strcmp(arg, "off") == 0) {
1554 fw_devlink_flags = 0;
1555 } else if (strcmp(arg, "permissive") == 0) {
1556 fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1557 } else if (strcmp(arg, "on") == 0) {
1558 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
1559 } else if (strcmp(arg, "rpm") == 0) {
1560 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
1561 }
1562 return 0;
1563}
1564early_param("fw_devlink", fw_devlink_setup);
1565
1566static bool fw_devlink_strict;
1567static int __init fw_devlink_strict_setup(char *arg)
1568{
1569 return strtobool(arg, &fw_devlink_strict);
1570}
1571early_param("fw_devlink.strict", fw_devlink_strict_setup);
1572
1573u32 fw_devlink_get_flags(void)
1574{
1575 return fw_devlink_flags;
1576}
1577
1578static bool fw_devlink_is_permissive(void)
1579{
1580 return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
1581}
1582
1583bool fw_devlink_is_strict(void)
1584{
1585 return fw_devlink_strict && !fw_devlink_is_permissive();
1586}
1587
1588static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
1589{
1590 if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
1591 return;
1592
1593 fwnode_call_int_op(fwnode, add_links);
1594 fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
1595}
1596
1597static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
1598{
1599 struct fwnode_handle *child = NULL;
1600
1601 fw_devlink_parse_fwnode(fwnode);
1602
1603 while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1604 fw_devlink_parse_fwtree(child);
1605}
1606
1607static void fw_devlink_relax_link(struct device_link *link)
1608{
1609 if (!(link->flags & DL_FLAG_INFERRED))
1610 return;
1611
1612 if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE))
1613 return;
1614
1615 pm_runtime_drop_link(link);
1616 link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
1617 dev_dbg(link->consumer, "Relaxing link with %s\n",
1618 dev_name(link->supplier));
1619}
1620
1621static int fw_devlink_no_driver(struct device *dev, void *data)
1622{
1623 struct device_link *link = to_devlink(dev);
1624
1625 if (!link->supplier->can_match)
1626 fw_devlink_relax_link(link);
1627
1628 return 0;
1629}
1630
1631void fw_devlink_drivers_done(void)
1632{
1633 fw_devlink_drv_reg_done = true;
1634 device_links_write_lock();
1635 class_for_each_device(&devlink_class, NULL, NULL,
1636 fw_devlink_no_driver);
1637 device_links_write_unlock();
1638}
1639
1640static void fw_devlink_unblock_consumers(struct device *dev)
1641{
1642 struct device_link *link;
1643
1644 if (!fw_devlink_flags || fw_devlink_is_permissive())
1645 return;
1646
1647 device_links_write_lock();
1648 list_for_each_entry(link, &dev->links.consumers, s_node)
1649 fw_devlink_relax_link(link);
1650 device_links_write_unlock();
1651}
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667static int fw_devlink_relax_cycle(struct device *con, void *sup)
1668{
1669 struct device_link *link;
1670 int ret;
1671
1672 if (con == sup)
1673 return 1;
1674
1675 ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
1676 if (ret)
1677 return ret;
1678
1679 list_for_each_entry(link, &con->links.consumers, s_node) {
1680 if ((link->flags & ~DL_FLAG_INFERRED) ==
1681 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
1682 continue;
1683
1684 if (!fw_devlink_relax_cycle(link->consumer, sup))
1685 continue;
1686
1687 ret = 1;
1688
1689 fw_devlink_relax_link(link);
1690 }
1691 return ret;
1692}
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714static int fw_devlink_create_devlink(struct device *con,
1715 struct fwnode_handle *sup_handle, u32 flags)
1716{
1717 struct device *sup_dev;
1718 int ret = 0;
1719
1720 sup_dev = get_dev_from_fwnode(sup_handle);
1721 if (sup_dev) {
1722
1723
1724
1725
1726
1727 if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
1728 sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
1729 ret = -EINVAL;
1730 goto out;
1731 }
1732
1733
1734
1735
1736
1737 if (!device_link_add(con, sup_dev, flags) &&
1738 !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
1739 dev_info(con, "Fixing up cyclic dependency with %s\n",
1740 dev_name(sup_dev));
1741 device_links_write_lock();
1742 fw_devlink_relax_cycle(con, sup_dev);
1743 device_links_write_unlock();
1744 device_link_add(con, sup_dev,
1745 FW_DEVLINK_FLAGS_PERMISSIVE);
1746 ret = -EINVAL;
1747 }
1748
1749 goto out;
1750 }
1751
1752
1753 if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
1754 return -EINVAL;
1755
1756
1757
1758
1759
1760
1761 if (flags & DL_FLAG_SYNC_STATE_ONLY)
1762 return -EAGAIN;
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774 sup_dev = fwnode_get_next_parent_dev(sup_handle);
1775 if (sup_dev && device_is_dependent(con, sup_dev)) {
1776 dev_dbg(con, "Not linking to %pfwP - False link\n",
1777 sup_handle);
1778 ret = -EINVAL;
1779 } else {
1780
1781
1782
1783
1784 ret = -EAGAIN;
1785 }
1786
1787out:
1788 put_device(sup_dev);
1789 return ret;
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808static void __fw_devlink_link_to_consumers(struct device *dev)
1809{
1810 struct fwnode_handle *fwnode = dev->fwnode;
1811 struct fwnode_link *link, *tmp;
1812
1813 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
1814 u32 dl_flags = fw_devlink_get_flags();
1815 struct device *con_dev;
1816 bool own_link = true;
1817 int ret;
1818
1819 con_dev = get_dev_from_fwnode(link->consumer);
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830 if (!con_dev) {
1831 con_dev = fwnode_get_next_parent_dev(link->consumer);
1832
1833
1834
1835
1836
1837
1838 if (con_dev &&
1839 fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) {
1840 put_device(con_dev);
1841 con_dev = NULL;
1842 } else {
1843 own_link = false;
1844 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1845 }
1846 }
1847
1848 if (!con_dev)
1849 continue;
1850
1851 ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags);
1852 put_device(con_dev);
1853 if (!own_link || ret == -EAGAIN)
1854 continue;
1855
1856 list_del(&link->s_hook);
1857 list_del(&link->c_hook);
1858 kfree(link);
1859 }
1860}
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888static void __fw_devlink_link_to_suppliers(struct device *dev,
1889 struct fwnode_handle *fwnode)
1890{
1891 bool own_link = (dev->fwnode == fwnode);
1892 struct fwnode_link *link, *tmp;
1893 struct fwnode_handle *child = NULL;
1894 u32 dl_flags;
1895
1896 if (own_link)
1897 dl_flags = fw_devlink_get_flags();
1898 else
1899 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1900
1901 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
1902 int ret;
1903 struct device *sup_dev;
1904 struct fwnode_handle *sup = link->supplier;
1905
1906 ret = fw_devlink_create_devlink(dev, sup, dl_flags);
1907 if (!own_link || ret == -EAGAIN)
1908 continue;
1909
1910 list_del(&link->s_hook);
1911 list_del(&link->c_hook);
1912 kfree(link);
1913
1914
1915 if (ret)
1916 continue;
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931 sup_dev = get_dev_from_fwnode(sup);
1932 __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode);
1933 put_device(sup_dev);
1934 }
1935
1936
1937
1938
1939
1940
1941
1942 while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1943 __fw_devlink_link_to_suppliers(dev, child);
1944}
1945
1946static void fw_devlink_link_device(struct device *dev)
1947{
1948 struct fwnode_handle *fwnode = dev->fwnode;
1949
1950 if (!fw_devlink_flags)
1951 return;
1952
1953 fw_devlink_parse_fwtree(fwnode);
1954
1955 mutex_lock(&fwnode_link_lock);
1956 __fw_devlink_link_to_consumers(dev);
1957 __fw_devlink_link_to_suppliers(dev, fwnode);
1958 mutex_unlock(&fwnode_link_lock);
1959}
1960
1961
1962
1963int (*platform_notify)(struct device *dev) = NULL;
1964int (*platform_notify_remove)(struct device *dev) = NULL;
1965static struct kobject *dev_kobj;
1966struct kobject *sysfs_dev_char_kobj;
1967struct kobject *sysfs_dev_block_kobj;
1968
1969static DEFINE_MUTEX(device_hotplug_lock);
1970
1971void lock_device_hotplug(void)
1972{
1973 mutex_lock(&device_hotplug_lock);
1974}
1975
1976void unlock_device_hotplug(void)
1977{
1978 mutex_unlock(&device_hotplug_lock);
1979}
1980
1981int lock_device_hotplug_sysfs(void)
1982{
1983 if (mutex_trylock(&device_hotplug_lock))
1984 return 0;
1985
1986
1987 msleep(5);
1988 return restart_syscall();
1989}
1990
1991#ifdef CONFIG_BLOCK
1992static inline int device_is_not_partition(struct device *dev)
1993{
1994 return !(dev->type == &part_type);
1995}
1996#else
1997static inline int device_is_not_partition(struct device *dev)
1998{
1999 return 1;
2000}
2001#endif
2002
2003static int
2004device_platform_notify(struct device *dev, enum kobject_action action)
2005{
2006 int ret;
2007
2008 ret = acpi_platform_notify(dev, action);
2009 if (ret)
2010 return ret;
2011
2012 ret = software_node_notify(dev, action);
2013 if (ret)
2014 return ret;
2015
2016 if (platform_notify && action == KOBJ_ADD)
2017 platform_notify(dev);
2018 else if (platform_notify_remove && action == KOBJ_REMOVE)
2019 platform_notify_remove(dev);
2020 return 0;
2021}
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032const char *dev_driver_string(const struct device *dev)
2033{
2034 struct device_driver *drv;
2035
2036
2037
2038
2039
2040 drv = READ_ONCE(dev->driver);
2041 return drv ? drv->name : dev_bus_name(dev);
2042}
2043EXPORT_SYMBOL(dev_driver_string);
2044
2045#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
2046
2047static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
2048 char *buf)
2049{
2050 struct device_attribute *dev_attr = to_dev_attr(attr);
2051 struct device *dev = kobj_to_dev(kobj);
2052 ssize_t ret = -EIO;
2053
2054 if (dev_attr->show)
2055 ret = dev_attr->show(dev, dev_attr, buf);
2056 if (ret >= (ssize_t)PAGE_SIZE) {
2057 printk("dev_attr_show: %pS returned bad count\n",
2058 dev_attr->show);
2059 }
2060 return ret;
2061}
2062
2063static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
2064 const char *buf, size_t count)
2065{
2066 struct device_attribute *dev_attr = to_dev_attr(attr);
2067 struct device *dev = kobj_to_dev(kobj);
2068 ssize_t ret = -EIO;
2069
2070 if (dev_attr->store)
2071 ret = dev_attr->store(dev, dev_attr, buf, count);
2072 return ret;
2073}
2074
2075static const struct sysfs_ops dev_sysfs_ops = {
2076 .show = dev_attr_show,
2077 .store = dev_attr_store,
2078};
2079
2080#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
2081
2082ssize_t device_store_ulong(struct device *dev,
2083 struct device_attribute *attr,
2084 const char *buf, size_t size)
2085{
2086 struct dev_ext_attribute *ea = to_ext_attr(attr);
2087 int ret;
2088 unsigned long new;
2089
2090 ret = kstrtoul(buf, 0, &new);
2091 if (ret)
2092 return ret;
2093 *(unsigned long *)(ea->var) = new;
2094
2095 return size;
2096}
2097EXPORT_SYMBOL_GPL(device_store_ulong);
2098
2099ssize_t device_show_ulong(struct device *dev,
2100 struct device_attribute *attr,
2101 char *buf)
2102{
2103 struct dev_ext_attribute *ea = to_ext_attr(attr);
2104 return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
2105}
2106EXPORT_SYMBOL_GPL(device_show_ulong);
2107
2108ssize_t device_store_int(struct device *dev,
2109 struct device_attribute *attr,
2110 const char *buf, size_t size)
2111{
2112 struct dev_ext_attribute *ea = to_ext_attr(attr);
2113 int ret;
2114 long new;
2115
2116 ret = kstrtol(buf, 0, &new);
2117 if (ret)
2118 return ret;
2119
2120 if (new > INT_MAX || new < INT_MIN)
2121 return -EINVAL;
2122 *(int *)(ea->var) = new;
2123
2124 return size;
2125}
2126EXPORT_SYMBOL_GPL(device_store_int);
2127
2128ssize_t device_show_int(struct device *dev,
2129 struct device_attribute *attr,
2130 char *buf)
2131{
2132 struct dev_ext_attribute *ea = to_ext_attr(attr);
2133
2134 return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
2135}
2136EXPORT_SYMBOL_GPL(device_show_int);
2137
2138ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
2139 const char *buf, size_t size)
2140{
2141 struct dev_ext_attribute *ea = to_ext_attr(attr);
2142
2143 if (strtobool(buf, ea->var) < 0)
2144 return -EINVAL;
2145
2146 return size;
2147}
2148EXPORT_SYMBOL_GPL(device_store_bool);
2149
2150ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
2151 char *buf)
2152{
2153 struct dev_ext_attribute *ea = to_ext_attr(attr);
2154
2155 return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
2156}
2157EXPORT_SYMBOL_GPL(device_show_bool);
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167static void device_release(struct kobject *kobj)
2168{
2169 struct device *dev = kobj_to_dev(kobj);
2170 struct device_private *p = dev->p;
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181 devres_release_all(dev);
2182
2183 kfree(dev->dma_range_map);
2184
2185 if (dev->release)
2186 dev->release(dev);
2187 else if (dev->type && dev->type->release)
2188 dev->type->release(dev);
2189 else if (dev->class && dev->class->dev_release)
2190 dev->class->dev_release(dev);
2191 else
2192 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
2193 dev_name(dev));
2194 kfree(p);
2195}
2196
2197static const void *device_namespace(struct kobject *kobj)
2198{
2199 struct device *dev = kobj_to_dev(kobj);
2200 const void *ns = NULL;
2201
2202 if (dev->class && dev->class->ns_type)
2203 ns = dev->class->namespace(dev);
2204
2205 return ns;
2206}
2207
2208static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
2209{
2210 struct device *dev = kobj_to_dev(kobj);
2211
2212 if (dev->class && dev->class->get_ownership)
2213 dev->class->get_ownership(dev, uid, gid);
2214}
2215
2216static struct kobj_type device_ktype = {
2217 .release = device_release,
2218 .sysfs_ops = &dev_sysfs_ops,
2219 .namespace = device_namespace,
2220 .get_ownership = device_get_ownership,
2221};
2222
2223
2224static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
2225{
2226 struct kobj_type *ktype = get_ktype(kobj);
2227
2228 if (ktype == &device_ktype) {
2229 struct device *dev = kobj_to_dev(kobj);
2230 if (dev->bus)
2231 return 1;
2232 if (dev->class)
2233 return 1;
2234 }
2235 return 0;
2236}
2237
2238static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
2239{
2240 struct device *dev = kobj_to_dev(kobj);
2241
2242 if (dev->bus)
2243 return dev->bus->name;
2244 if (dev->class)
2245 return dev->class->name;
2246 return NULL;
2247}
2248
2249static int dev_uevent(struct kset *kset, struct kobject *kobj,
2250 struct kobj_uevent_env *env)
2251{
2252 struct device *dev = kobj_to_dev(kobj);
2253 int retval = 0;
2254
2255
2256 if (MAJOR(dev->devt)) {
2257 const char *tmp;
2258 const char *name;
2259 umode_t mode = 0;
2260 kuid_t uid = GLOBAL_ROOT_UID;
2261 kgid_t gid = GLOBAL_ROOT_GID;
2262
2263 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
2264 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
2265 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
2266 if (name) {
2267 add_uevent_var(env, "DEVNAME=%s", name);
2268 if (mode)
2269 add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
2270 if (!uid_eq(uid, GLOBAL_ROOT_UID))
2271 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
2272 if (!gid_eq(gid, GLOBAL_ROOT_GID))
2273 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
2274 kfree(tmp);
2275 }
2276 }
2277
2278 if (dev->type && dev->type->name)
2279 add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
2280
2281 if (dev->driver)
2282 add_uevent_var(env, "DRIVER=%s", dev->driver->name);
2283
2284
2285 of_device_uevent(dev, env);
2286
2287
2288 if (dev->bus && dev->bus->uevent) {
2289 retval = dev->bus->uevent(dev, env);
2290 if (retval)
2291 pr_debug("device: '%s': %s: bus uevent() returned %d\n",
2292 dev_name(dev), __func__, retval);
2293 }
2294
2295
2296 if (dev->class && dev->class->dev_uevent) {
2297 retval = dev->class->dev_uevent(dev, env);
2298 if (retval)
2299 pr_debug("device: '%s': %s: class uevent() "
2300 "returned %d\n", dev_name(dev),
2301 __func__, retval);
2302 }
2303
2304
2305 if (dev->type && dev->type->uevent) {
2306 retval = dev->type->uevent(dev, env);
2307 if (retval)
2308 pr_debug("device: '%s': %s: dev_type uevent() "
2309 "returned %d\n", dev_name(dev),
2310 __func__, retval);
2311 }
2312
2313 return retval;
2314}
2315
2316static const struct kset_uevent_ops device_uevent_ops = {
2317 .filter = dev_uevent_filter,
2318 .name = dev_uevent_name,
2319 .uevent = dev_uevent,
2320};
2321
2322static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
2323 char *buf)
2324{
2325 struct kobject *top_kobj;
2326 struct kset *kset;
2327 struct kobj_uevent_env *env = NULL;
2328 int i;
2329 int len = 0;
2330 int retval;
2331
2332
2333 top_kobj = &dev->kobj;
2334 while (!top_kobj->kset && top_kobj->parent)
2335 top_kobj = top_kobj->parent;
2336 if (!top_kobj->kset)
2337 goto out;
2338
2339 kset = top_kobj->kset;
2340 if (!kset->uevent_ops || !kset->uevent_ops->uevent)
2341 goto out;
2342
2343
2344 if (kset->uevent_ops && kset->uevent_ops->filter)
2345 if (!kset->uevent_ops->filter(kset, &dev->kobj))
2346 goto out;
2347
2348 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
2349 if (!env)
2350 return -ENOMEM;
2351
2352
2353 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
2354 if (retval)
2355 goto out;
2356
2357
2358 for (i = 0; i < env->envp_idx; i++)
2359 len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
2360out:
2361 kfree(env);
2362 return len;
2363}
2364
2365static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
2366 const char *buf, size_t count)
2367{
2368 int rc;
2369
2370 rc = kobject_synth_uevent(&dev->kobj, buf, count);
2371
2372 if (rc) {
2373 dev_err(dev, "uevent: failed to send synthetic uevent\n");
2374 return rc;
2375 }
2376
2377 return count;
2378}
2379static DEVICE_ATTR_RW(uevent);
2380
2381static ssize_t online_show(struct device *dev, struct device_attribute *attr,
2382 char *buf)
2383{
2384 bool val;
2385
2386 device_lock(dev);
2387 val = !dev->offline;
2388 device_unlock(dev);
2389 return sysfs_emit(buf, "%u\n", val);
2390}
2391
2392static ssize_t online_store(struct device *dev, struct device_attribute *attr,
2393 const char *buf, size_t count)
2394{
2395 bool val;
2396 int ret;
2397
2398 ret = strtobool(buf, &val);
2399 if (ret < 0)
2400 return ret;
2401
2402 ret = lock_device_hotplug_sysfs();
2403 if (ret)
2404 return ret;
2405
2406 ret = val ? device_online(dev) : device_offline(dev);
2407 unlock_device_hotplug();
2408 return ret < 0 ? ret : count;
2409}
2410static DEVICE_ATTR_RW(online);
2411
2412int device_add_groups(struct device *dev, const struct attribute_group **groups)
2413{
2414 return sysfs_create_groups(&dev->kobj, groups);
2415}
2416EXPORT_SYMBOL_GPL(device_add_groups);
2417
2418void device_remove_groups(struct device *dev,
2419 const struct attribute_group **groups)
2420{
2421 sysfs_remove_groups(&dev->kobj, groups);
2422}
2423EXPORT_SYMBOL_GPL(device_remove_groups);
2424
2425union device_attr_group_devres {
2426 const struct attribute_group *group;
2427 const struct attribute_group **groups;
2428};
2429
2430static int devm_attr_group_match(struct device *dev, void *res, void *data)
2431{
2432 return ((union device_attr_group_devres *)res)->group == data;
2433}
2434
2435static void devm_attr_group_remove(struct device *dev, void *res)
2436{
2437 union device_attr_group_devres *devres = res;
2438 const struct attribute_group *group = devres->group;
2439
2440 dev_dbg(dev, "%s: removing group %p\n", __func__, group);
2441 sysfs_remove_group(&dev->kobj, group);
2442}
2443
2444static void devm_attr_groups_remove(struct device *dev, void *res)
2445{
2446 union device_attr_group_devres *devres = res;
2447 const struct attribute_group **groups = devres->groups;
2448
2449 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
2450 sysfs_remove_groups(&dev->kobj, groups);
2451}
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
2464{
2465 union device_attr_group_devres *devres;
2466 int error;
2467
2468 devres = devres_alloc(devm_attr_group_remove,
2469 sizeof(*devres), GFP_KERNEL);
2470 if (!devres)
2471 return -ENOMEM;
2472
2473 error = sysfs_create_group(&dev->kobj, grp);
2474 if (error) {
2475 devres_free(devres);
2476 return error;
2477 }
2478
2479 devres->group = grp;
2480 devres_add(dev, devres);
2481 return 0;
2482}
2483EXPORT_SYMBOL_GPL(devm_device_add_group);
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493void devm_device_remove_group(struct device *dev,
2494 const struct attribute_group *grp)
2495{
2496 WARN_ON(devres_release(dev, devm_attr_group_remove,
2497 devm_attr_group_match,
2498 (void *)grp));
2499}
2500EXPORT_SYMBOL_GPL(devm_device_remove_group);
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515int devm_device_add_groups(struct device *dev,
2516 const struct attribute_group **groups)
2517{
2518 union device_attr_group_devres *devres;
2519 int error;
2520
2521 devres = devres_alloc(devm_attr_groups_remove,
2522 sizeof(*devres), GFP_KERNEL);
2523 if (!devres)
2524 return -ENOMEM;
2525
2526 error = sysfs_create_groups(&dev->kobj, groups);
2527 if (error) {
2528 devres_free(devres);
2529 return error;
2530 }
2531
2532 devres->groups = groups;
2533 devres_add(dev, devres);
2534 return 0;
2535}
2536EXPORT_SYMBOL_GPL(devm_device_add_groups);
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546void devm_device_remove_groups(struct device *dev,
2547 const struct attribute_group **groups)
2548{
2549 WARN_ON(devres_release(dev, devm_attr_groups_remove,
2550 devm_attr_group_match,
2551 (void *)groups));
2552}
2553EXPORT_SYMBOL_GPL(devm_device_remove_groups);
2554
2555static int device_add_attrs(struct device *dev)
2556{
2557 struct class *class = dev->class;
2558 const struct device_type *type = dev->type;
2559 int error;
2560
2561 if (class) {
2562 error = device_add_groups(dev, class->dev_groups);
2563 if (error)
2564 return error;
2565 }
2566
2567 if (type) {
2568 error = device_add_groups(dev, type->groups);
2569 if (error)
2570 goto err_remove_class_groups;
2571 }
2572
2573 error = device_add_groups(dev, dev->groups);
2574 if (error)
2575 goto err_remove_type_groups;
2576
2577 if (device_supports_offline(dev) && !dev->offline_disabled) {
2578 error = device_create_file(dev, &dev_attr_online);
2579 if (error)
2580 goto err_remove_dev_groups;
2581 }
2582
2583 if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) {
2584 error = device_create_file(dev, &dev_attr_waiting_for_supplier);
2585 if (error)
2586 goto err_remove_dev_online;
2587 }
2588
2589 return 0;
2590
2591 err_remove_dev_online:
2592 device_remove_file(dev, &dev_attr_online);
2593 err_remove_dev_groups:
2594 device_remove_groups(dev, dev->groups);
2595 err_remove_type_groups:
2596 if (type)
2597 device_remove_groups(dev, type->groups);
2598 err_remove_class_groups:
2599 if (class)
2600 device_remove_groups(dev, class->dev_groups);
2601
2602 return error;
2603}
2604
2605static void device_remove_attrs(struct device *dev)
2606{
2607 struct class *class = dev->class;
2608 const struct device_type *type = dev->type;
2609
2610 device_remove_file(dev, &dev_attr_waiting_for_supplier);
2611 device_remove_file(dev, &dev_attr_online);
2612 device_remove_groups(dev, dev->groups);
2613
2614 if (type)
2615 device_remove_groups(dev, type->groups);
2616
2617 if (class)
2618 device_remove_groups(dev, class->dev_groups);
2619}
2620
2621static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
2622 char *buf)
2623{
2624 return print_dev_t(buf, dev->devt);
2625}
2626static DEVICE_ATTR_RO(dev);
2627
2628
2629struct kset *devices_kset;
2630
2631
2632
2633
2634
2635
2636static void devices_kset_move_before(struct device *deva, struct device *devb)
2637{
2638 if (!devices_kset)
2639 return;
2640 pr_debug("devices_kset: Moving %s before %s\n",
2641 dev_name(deva), dev_name(devb));
2642 spin_lock(&devices_kset->list_lock);
2643 list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
2644 spin_unlock(&devices_kset->list_lock);
2645}
2646
2647
2648
2649
2650
2651
2652static void devices_kset_move_after(struct device *deva, struct device *devb)
2653{
2654 if (!devices_kset)
2655 return;
2656 pr_debug("devices_kset: Moving %s after %s\n",
2657 dev_name(deva), dev_name(devb));
2658 spin_lock(&devices_kset->list_lock);
2659 list_move(&deva->kobj.entry, &devb->kobj.entry);
2660 spin_unlock(&devices_kset->list_lock);
2661}
2662
2663
2664
2665
2666
2667void devices_kset_move_last(struct device *dev)
2668{
2669 if (!devices_kset)
2670 return;
2671 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
2672 spin_lock(&devices_kset->list_lock);
2673 list_move_tail(&dev->kobj.entry, &devices_kset->list);
2674 spin_unlock(&devices_kset->list_lock);
2675}
2676
2677
2678
2679
2680
2681
2682int device_create_file(struct device *dev,
2683 const struct device_attribute *attr)
2684{
2685 int error = 0;
2686
2687 if (dev) {
2688 WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
2689 "Attribute %s: write permission without 'store'\n",
2690 attr->attr.name);
2691 WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
2692 "Attribute %s: read permission without 'show'\n",
2693 attr->attr.name);
2694 error = sysfs_create_file(&dev->kobj, &attr->attr);
2695 }
2696
2697 return error;
2698}
2699EXPORT_SYMBOL_GPL(device_create_file);
2700
2701
2702
2703
2704
2705
2706void device_remove_file(struct device *dev,
2707 const struct device_attribute *attr)
2708{
2709 if (dev)
2710 sysfs_remove_file(&dev->kobj, &attr->attr);
2711}
2712EXPORT_SYMBOL_GPL(device_remove_file);
2713
2714
2715
2716
2717
2718
2719
2720
2721bool device_remove_file_self(struct device *dev,
2722 const struct device_attribute *attr)
2723{
2724 if (dev)
2725 return sysfs_remove_file_self(&dev->kobj, &attr->attr);
2726 else
2727 return false;
2728}
2729EXPORT_SYMBOL_GPL(device_remove_file_self);
2730
2731
2732
2733
2734
2735
2736int device_create_bin_file(struct device *dev,
2737 const struct bin_attribute *attr)
2738{
2739 int error = -EINVAL;
2740 if (dev)
2741 error = sysfs_create_bin_file(&dev->kobj, attr);
2742 return error;
2743}
2744EXPORT_SYMBOL_GPL(device_create_bin_file);
2745
2746
2747
2748
2749
2750
2751void device_remove_bin_file(struct device *dev,
2752 const struct bin_attribute *attr)
2753{
2754 if (dev)
2755 sysfs_remove_bin_file(&dev->kobj, attr);
2756}
2757EXPORT_SYMBOL_GPL(device_remove_bin_file);
2758
2759static void klist_children_get(struct klist_node *n)
2760{
2761 struct device_private *p = to_device_private_parent(n);
2762 struct device *dev = p->device;
2763
2764 get_device(dev);
2765}
2766
2767static void klist_children_put(struct klist_node *n)
2768{
2769 struct device_private *p = to_device_private_parent(n);
2770 struct device *dev = p->device;
2771
2772 put_device(dev);
2773}
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795void device_initialize(struct device *dev)
2796{
2797 dev->kobj.kset = devices_kset;
2798 kobject_init(&dev->kobj, &device_ktype);
2799 INIT_LIST_HEAD(&dev->dma_pools);
2800 mutex_init(&dev->mutex);
2801#ifdef CONFIG_PROVE_LOCKING
2802 mutex_init(&dev->lockdep_mutex);
2803#endif
2804 lockdep_set_novalidate_class(&dev->mutex);
2805 spin_lock_init(&dev->devres_lock);
2806 INIT_LIST_HEAD(&dev->devres_head);
2807 device_pm_init(dev);
2808 set_dev_node(dev, -1);
2809#ifdef CONFIG_GENERIC_MSI_IRQ
2810 INIT_LIST_HEAD(&dev->msi_list);
2811#endif
2812 INIT_LIST_HEAD(&dev->links.consumers);
2813 INIT_LIST_HEAD(&dev->links.suppliers);
2814 INIT_LIST_HEAD(&dev->links.defer_sync);
2815 dev->links.status = DL_DEV_NO_DRIVER;
2816#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
2817 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
2818 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
2819 dev->dma_coherent = dma_default_coherent;
2820#endif
2821}
2822EXPORT_SYMBOL_GPL(device_initialize);
2823
2824struct kobject *virtual_device_parent(struct device *dev)
2825{
2826 static struct kobject *virtual_dir = NULL;
2827
2828 if (!virtual_dir)
2829 virtual_dir = kobject_create_and_add("virtual",
2830 &devices_kset->kobj);
2831
2832 return virtual_dir;
2833}
2834
2835struct class_dir {
2836 struct kobject kobj;
2837 struct class *class;
2838};
2839
2840#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
2841
2842static void class_dir_release(struct kobject *kobj)
2843{
2844 struct class_dir *dir = to_class_dir(kobj);
2845 kfree(dir);
2846}
2847
2848static const
2849struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
2850{
2851 struct class_dir *dir = to_class_dir(kobj);
2852 return dir->class->ns_type;
2853}
2854
2855static struct kobj_type class_dir_ktype = {
2856 .release = class_dir_release,
2857 .sysfs_ops = &kobj_sysfs_ops,
2858 .child_ns_type = class_dir_child_ns_type
2859};
2860
2861static struct kobject *
2862class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
2863{
2864 struct class_dir *dir;
2865 int retval;
2866
2867 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
2868 if (!dir)
2869 return ERR_PTR(-ENOMEM);
2870
2871 dir->class = class;
2872 kobject_init(&dir->kobj, &class_dir_ktype);
2873
2874 dir->kobj.kset = &class->p->glue_dirs;
2875
2876 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
2877 if (retval < 0) {
2878 kobject_put(&dir->kobj);
2879 return ERR_PTR(retval);
2880 }
2881 return &dir->kobj;
2882}
2883
2884static DEFINE_MUTEX(gdp_mutex);
2885
2886static struct kobject *get_device_parent(struct device *dev,
2887 struct device *parent)
2888{
2889 if (dev->class) {
2890 struct kobject *kobj = NULL;
2891 struct kobject *parent_kobj;
2892 struct kobject *k;
2893
2894#ifdef CONFIG_BLOCK
2895
2896 if (sysfs_deprecated && dev->class == &block_class) {
2897 if (parent && parent->class == &block_class)
2898 return &parent->kobj;
2899 return &block_class.p->subsys.kobj;
2900 }
2901#endif
2902
2903
2904
2905
2906
2907
2908 if (parent == NULL)
2909 parent_kobj = virtual_device_parent(dev);
2910 else if (parent->class && !dev->class->ns_type)
2911 return &parent->kobj;
2912 else
2913 parent_kobj = &parent->kobj;
2914
2915 mutex_lock(&gdp_mutex);
2916
2917
2918 spin_lock(&dev->class->p->glue_dirs.list_lock);
2919 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
2920 if (k->parent == parent_kobj) {
2921 kobj = kobject_get(k);
2922 break;
2923 }
2924 spin_unlock(&dev->class->p->glue_dirs.list_lock);
2925 if (kobj) {
2926 mutex_unlock(&gdp_mutex);
2927 return kobj;
2928 }
2929
2930
2931 k = class_dir_create_and_add(dev->class, parent_kobj);
2932
2933 mutex_unlock(&gdp_mutex);
2934 return k;
2935 }
2936
2937
2938 if (!parent && dev->bus && dev->bus->dev_root)
2939 return &dev->bus->dev_root->kobj;
2940
2941 if (parent)
2942 return &parent->kobj;
2943 return NULL;
2944}
2945
2946static inline bool live_in_glue_dir(struct kobject *kobj,
2947 struct device *dev)
2948{
2949 if (!kobj || !dev->class ||
2950 kobj->kset != &dev->class->p->glue_dirs)
2951 return false;
2952 return true;
2953}
2954
2955static inline struct kobject *get_glue_dir(struct device *dev)
2956{
2957 return dev->kobj.parent;
2958}
2959
2960
2961
2962
2963
2964
2965static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
2966{
2967 unsigned int ref;
2968
2969
2970 if (!live_in_glue_dir(glue_dir, dev))
2971 return;
2972
2973 mutex_lock(&gdp_mutex);
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022 ref = kref_read(&glue_dir->kref);
3023 if (!kobject_has_children(glue_dir) && !--ref)
3024 kobject_del(glue_dir);
3025 kobject_put(glue_dir);
3026 mutex_unlock(&gdp_mutex);
3027}
3028
3029static int device_add_class_symlinks(struct device *dev)
3030{
3031 struct device_node *of_node = dev_of_node(dev);
3032 int error;
3033
3034 if (of_node) {
3035 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
3036 if (error)
3037 dev_warn(dev, "Error %d creating of_node link\n",error);
3038
3039 }
3040
3041 if (!dev->class)
3042 return 0;
3043
3044 error = sysfs_create_link(&dev->kobj,
3045 &dev->class->p->subsys.kobj,
3046 "subsystem");
3047 if (error)
3048 goto out_devnode;
3049
3050 if (dev->parent && device_is_not_partition(dev)) {
3051 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
3052 "device");
3053 if (error)
3054 goto out_subsys;
3055 }
3056
3057#ifdef CONFIG_BLOCK
3058
3059 if (sysfs_deprecated && dev->class == &block_class)
3060 return 0;
3061#endif
3062
3063
3064 error = sysfs_create_link(&dev->class->p->subsys.kobj,
3065 &dev->kobj, dev_name(dev));
3066 if (error)
3067 goto out_device;
3068
3069 return 0;
3070
3071out_device:
3072 sysfs_remove_link(&dev->kobj, "device");
3073
3074out_subsys:
3075 sysfs_remove_link(&dev->kobj, "subsystem");
3076out_devnode:
3077 sysfs_remove_link(&dev->kobj, "of_node");
3078 return error;
3079}
3080
3081static void device_remove_class_symlinks(struct device *dev)
3082{
3083 if (dev_of_node(dev))
3084 sysfs_remove_link(&dev->kobj, "of_node");
3085
3086 if (!dev->class)
3087 return;
3088
3089 if (dev->parent && device_is_not_partition(dev))
3090 sysfs_remove_link(&dev->kobj, "device");
3091 sysfs_remove_link(&dev->kobj, "subsystem");
3092#ifdef CONFIG_BLOCK
3093 if (sysfs_deprecated && dev->class == &block_class)
3094 return;
3095#endif
3096 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
3097}
3098
3099
3100
3101
3102
3103
3104int dev_set_name(struct device *dev, const char *fmt, ...)
3105{
3106 va_list vargs;
3107 int err;
3108
3109 va_start(vargs, fmt);
3110 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
3111 va_end(vargs);
3112 return err;
3113}
3114EXPORT_SYMBOL_GPL(dev_set_name);
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127static struct kobject *device_to_dev_kobj(struct device *dev)
3128{
3129 struct kobject *kobj;
3130
3131 if (dev->class)
3132 kobj = dev->class->dev_kobj;
3133 else
3134 kobj = sysfs_dev_char_kobj;
3135
3136 return kobj;
3137}
3138
3139static int device_create_sys_dev_entry(struct device *dev)
3140{
3141 struct kobject *kobj = device_to_dev_kobj(dev);
3142 int error = 0;
3143 char devt_str[15];
3144
3145 if (kobj) {
3146 format_dev_t(devt_str, dev->devt);
3147 error = sysfs_create_link(kobj, &dev->kobj, devt_str);
3148 }
3149
3150 return error;
3151}
3152
3153static void device_remove_sys_dev_entry(struct device *dev)
3154{
3155 struct kobject *kobj = device_to_dev_kobj(dev);
3156 char devt_str[15];
3157
3158 if (kobj) {
3159 format_dev_t(devt_str, dev->devt);
3160 sysfs_remove_link(kobj, devt_str);
3161 }
3162}
3163
3164static int device_private_init(struct device *dev)
3165{
3166 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
3167 if (!dev->p)
3168 return -ENOMEM;
3169 dev->p->device = dev;
3170 klist_init(&dev->p->klist_children, klist_children_get,
3171 klist_children_put);
3172 INIT_LIST_HEAD(&dev->p->deferred_probe);
3173 return 0;
3174}
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203int device_add(struct device *dev)
3204{
3205 struct device *parent;
3206 struct kobject *kobj;
3207 struct class_interface *class_intf;
3208 int error = -EINVAL;
3209 struct kobject *glue_dir = NULL;
3210
3211 dev = get_device(dev);
3212 if (!dev)
3213 goto done;
3214
3215 if (!dev->p) {
3216 error = device_private_init(dev);
3217 if (error)
3218 goto done;
3219 }
3220
3221
3222
3223
3224
3225
3226 if (dev->init_name) {
3227 dev_set_name(dev, "%s", dev->init_name);
3228 dev->init_name = NULL;
3229 }
3230
3231
3232 if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
3233 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
3234
3235 if (!dev_name(dev)) {
3236 error = -EINVAL;
3237 goto name_error;
3238 }
3239
3240 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3241
3242 parent = get_device(dev->parent);
3243 kobj = get_device_parent(dev, parent);
3244 if (IS_ERR(kobj)) {
3245 error = PTR_ERR(kobj);
3246 goto parent_error;
3247 }
3248 if (kobj)
3249 dev->kobj.parent = kobj;
3250
3251
3252 if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
3253 set_dev_node(dev, dev_to_node(parent));
3254
3255
3256
3257 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
3258 if (error) {
3259 glue_dir = get_glue_dir(dev);
3260 goto Error;
3261 }
3262
3263
3264 error = device_platform_notify(dev, KOBJ_ADD);
3265 if (error)
3266 goto platform_error;
3267
3268 error = device_create_file(dev, &dev_attr_uevent);
3269 if (error)
3270 goto attrError;
3271
3272 error = device_add_class_symlinks(dev);
3273 if (error)
3274 goto SymlinkError;
3275 error = device_add_attrs(dev);
3276 if (error)
3277 goto AttrsError;
3278 error = bus_add_device(dev);
3279 if (error)
3280 goto BusError;
3281 error = dpm_sysfs_add(dev);
3282 if (error)
3283 goto DPMError;
3284 device_pm_add(dev);
3285
3286 if (MAJOR(dev->devt)) {
3287 error = device_create_file(dev, &dev_attr_dev);
3288 if (error)
3289 goto DevAttrError;
3290
3291 error = device_create_sys_dev_entry(dev);
3292 if (error)
3293 goto SysEntryError;
3294
3295 devtmpfs_create_node(dev);
3296 }
3297
3298
3299
3300
3301 if (dev->bus)
3302 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3303 BUS_NOTIFY_ADD_DEVICE, dev);
3304
3305 kobject_uevent(&dev->kobj, KOBJ_ADD);
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319 if (dev->fwnode && !dev->fwnode->dev) {
3320 dev->fwnode->dev = dev;
3321 fw_devlink_link_device(dev);
3322 }
3323
3324 bus_probe_device(dev);
3325
3326
3327
3328
3329
3330
3331 if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match)
3332 fw_devlink_unblock_consumers(dev);
3333
3334 if (parent)
3335 klist_add_tail(&dev->p->knode_parent,
3336 &parent->p->klist_children);
3337
3338 if (dev->class) {
3339 mutex_lock(&dev->class->p->mutex);
3340
3341 klist_add_tail(&dev->p->knode_class,
3342 &dev->class->p->klist_devices);
3343
3344
3345 list_for_each_entry(class_intf,
3346 &dev->class->p->interfaces, node)
3347 if (class_intf->add_dev)
3348 class_intf->add_dev(dev, class_intf);
3349 mutex_unlock(&dev->class->p->mutex);
3350 }
3351done:
3352 put_device(dev);
3353 return error;
3354 SysEntryError:
3355 if (MAJOR(dev->devt))
3356 device_remove_file(dev, &dev_attr_dev);
3357 DevAttrError:
3358 device_pm_remove(dev);
3359 dpm_sysfs_remove(dev);
3360 DPMError:
3361 bus_remove_device(dev);
3362 BusError:
3363 device_remove_attrs(dev);
3364 AttrsError:
3365 device_remove_class_symlinks(dev);
3366 SymlinkError:
3367 device_remove_file(dev, &dev_attr_uevent);
3368 attrError:
3369 device_platform_notify(dev, KOBJ_REMOVE);
3370platform_error:
3371 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3372 glue_dir = get_glue_dir(dev);
3373 kobject_del(&dev->kobj);
3374 Error:
3375 cleanup_glue_dir(dev, glue_dir);
3376parent_error:
3377 put_device(parent);
3378name_error:
3379 kfree(dev->p);
3380 dev->p = NULL;
3381 goto done;
3382}
3383EXPORT_SYMBOL_GPL(device_add);
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403int device_register(struct device *dev)
3404{
3405 device_initialize(dev);
3406 return device_add(dev);
3407}
3408EXPORT_SYMBOL_GPL(device_register);
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418struct device *get_device(struct device *dev)
3419{
3420 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
3421}
3422EXPORT_SYMBOL_GPL(get_device);
3423
3424
3425
3426
3427
3428void put_device(struct device *dev)
3429{
3430
3431 if (dev)
3432 kobject_put(&dev->kobj);
3433}
3434EXPORT_SYMBOL_GPL(put_device);
3435
3436bool kill_device(struct device *dev)
3437{
3438
3439
3440
3441
3442
3443
3444
3445 lockdep_assert_held(&dev->mutex);
3446
3447 if (dev->p->dead)
3448 return false;
3449 dev->p->dead = true;
3450 return true;
3451}
3452EXPORT_SYMBOL_GPL(kill_device);
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467void device_del(struct device *dev)
3468{
3469 struct device *parent = dev->parent;
3470 struct kobject *glue_dir = NULL;
3471 struct class_interface *class_intf;
3472 unsigned int noio_flag;
3473
3474 device_lock(dev);
3475 kill_device(dev);
3476 device_unlock(dev);
3477
3478 if (dev->fwnode && dev->fwnode->dev == dev)
3479 dev->fwnode->dev = NULL;
3480
3481
3482
3483
3484 noio_flag = memalloc_noio_save();
3485 if (dev->bus)
3486 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3487 BUS_NOTIFY_DEL_DEVICE, dev);
3488
3489 dpm_sysfs_remove(dev);
3490 if (parent)
3491 klist_del(&dev->p->knode_parent);
3492 if (MAJOR(dev->devt)) {
3493 devtmpfs_delete_node(dev);
3494 device_remove_sys_dev_entry(dev);
3495 device_remove_file(dev, &dev_attr_dev);
3496 }
3497 if (dev->class) {
3498 device_remove_class_symlinks(dev);
3499
3500 mutex_lock(&dev->class->p->mutex);
3501
3502 list_for_each_entry(class_intf,
3503 &dev->class->p->interfaces, node)
3504 if (class_intf->remove_dev)
3505 class_intf->remove_dev(dev, class_intf);
3506
3507 klist_del(&dev->p->knode_class);
3508 mutex_unlock(&dev->class->p->mutex);
3509 }
3510 device_remove_file(dev, &dev_attr_uevent);
3511 device_remove_attrs(dev);
3512 bus_remove_device(dev);
3513 device_pm_remove(dev);
3514 driver_deferred_probe_del(dev);
3515 device_platform_notify(dev, KOBJ_REMOVE);
3516 device_remove_properties(dev);
3517 device_links_purge(dev);
3518
3519 if (dev->bus)
3520 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3521 BUS_NOTIFY_REMOVED_DEVICE, dev);
3522 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3523 glue_dir = get_glue_dir(dev);
3524 kobject_del(&dev->kobj);
3525 cleanup_glue_dir(dev, glue_dir);
3526 memalloc_noio_restore(noio_flag);
3527 put_device(parent);
3528}
3529EXPORT_SYMBOL_GPL(device_del);
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542void device_unregister(struct device *dev)
3543{
3544 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3545 device_del(dev);
3546 put_device(dev);
3547}
3548EXPORT_SYMBOL_GPL(device_unregister);
3549
3550static struct device *prev_device(struct klist_iter *i)
3551{
3552 struct klist_node *n = klist_prev(i);
3553 struct device *dev = NULL;
3554 struct device_private *p;
3555
3556 if (n) {
3557 p = to_device_private_parent(n);
3558 dev = p->device;
3559 }
3560 return dev;
3561}
3562
3563static struct device *next_device(struct klist_iter *i)
3564{
3565 struct klist_node *n = klist_next(i);
3566 struct device *dev = NULL;
3567 struct device_private *p;
3568
3569 if (n) {
3570 p = to_device_private_parent(n);
3571 dev = p->device;
3572 }
3573 return dev;
3574}
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589const char *device_get_devnode(struct device *dev,
3590 umode_t *mode, kuid_t *uid, kgid_t *gid,
3591 const char **tmp)
3592{
3593 char *s;
3594
3595 *tmp = NULL;
3596
3597
3598 if (dev->type && dev->type->devnode)
3599 *tmp = dev->type->devnode(dev, mode, uid, gid);
3600 if (*tmp)
3601 return *tmp;
3602
3603
3604 if (dev->class && dev->class->devnode)
3605 *tmp = dev->class->devnode(dev, mode);
3606 if (*tmp)
3607 return *tmp;
3608
3609
3610 if (strchr(dev_name(dev), '!') == NULL)
3611 return dev_name(dev);
3612
3613
3614 s = kstrdup(dev_name(dev), GFP_KERNEL);
3615 if (!s)
3616 return NULL;
3617 strreplace(s, '!', '/');
3618 return *tmp = s;
3619}
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633int device_for_each_child(struct device *parent, void *data,
3634 int (*fn)(struct device *dev, void *data))
3635{
3636 struct klist_iter i;
3637 struct device *child;
3638 int error = 0;
3639
3640 if (!parent->p)
3641 return 0;
3642
3643 klist_iter_init(&parent->p->klist_children, &i);
3644 while (!error && (child = next_device(&i)))
3645 error = fn(child, data);
3646 klist_iter_exit(&i);
3647 return error;
3648}
3649EXPORT_SYMBOL_GPL(device_for_each_child);
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663int device_for_each_child_reverse(struct device *parent, void *data,
3664 int (*fn)(struct device *dev, void *data))
3665{
3666 struct klist_iter i;
3667 struct device *child;
3668 int error = 0;
3669
3670 if (!parent->p)
3671 return 0;
3672
3673 klist_iter_init(&parent->p->klist_children, &i);
3674 while ((child = prev_device(&i)) && !error)
3675 error = fn(child, data);
3676 klist_iter_exit(&i);
3677 return error;
3678}
3679EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698struct device *device_find_child(struct device *parent, void *data,
3699 int (*match)(struct device *dev, void *data))
3700{
3701 struct klist_iter i;
3702 struct device *child;
3703
3704 if (!parent)
3705 return NULL;
3706
3707 klist_iter_init(&parent->p->klist_children, &i);
3708 while ((child = next_device(&i)))
3709 if (match(child, data) && get_device(child))
3710 break;
3711 klist_iter_exit(&i);
3712 return child;
3713}
3714EXPORT_SYMBOL_GPL(device_find_child);
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726struct device *device_find_child_by_name(struct device *parent,
3727 const char *name)
3728{
3729 struct klist_iter i;
3730 struct device *child;
3731
3732 if (!parent)
3733 return NULL;
3734
3735 klist_iter_init(&parent->p->klist_children, &i);
3736 while ((child = next_device(&i)))
3737 if (sysfs_streq(dev_name(child), name) && get_device(child))
3738 break;
3739 klist_iter_exit(&i);
3740 return child;
3741}
3742EXPORT_SYMBOL_GPL(device_find_child_by_name);
3743
3744int __init devices_init(void)
3745{
3746 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
3747 if (!devices_kset)
3748 return -ENOMEM;
3749 dev_kobj = kobject_create_and_add("dev", NULL);
3750 if (!dev_kobj)
3751 goto dev_kobj_err;
3752 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
3753 if (!sysfs_dev_block_kobj)
3754 goto block_kobj_err;
3755 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
3756 if (!sysfs_dev_char_kobj)
3757 goto char_kobj_err;
3758
3759 return 0;
3760
3761 char_kobj_err:
3762 kobject_put(sysfs_dev_block_kobj);
3763 block_kobj_err:
3764 kobject_put(dev_kobj);
3765 dev_kobj_err:
3766 kset_unregister(devices_kset);
3767 return -ENOMEM;
3768}
3769
3770static int device_check_offline(struct device *dev, void *not_used)
3771{
3772 int ret;
3773
3774 ret = device_for_each_child(dev, NULL, device_check_offline);
3775 if (ret)
3776 return ret;
3777
3778 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
3779}
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792int device_offline(struct device *dev)
3793{
3794 int ret;
3795
3796 if (dev->offline_disabled)
3797 return -EPERM;
3798
3799 ret = device_for_each_child(dev, NULL, device_check_offline);
3800 if (ret)
3801 return ret;
3802
3803 device_lock(dev);
3804 if (device_supports_offline(dev)) {
3805 if (dev->offline) {
3806 ret = 1;
3807 } else {
3808 ret = dev->bus->offline(dev);
3809 if (!ret) {
3810 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
3811 dev->offline = true;
3812 }
3813 }
3814 }
3815 device_unlock(dev);
3816
3817 return ret;
3818}
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830int device_online(struct device *dev)
3831{
3832 int ret = 0;
3833
3834 device_lock(dev);
3835 if (device_supports_offline(dev)) {
3836 if (dev->offline) {
3837 ret = dev->bus->online(dev);
3838 if (!ret) {
3839 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
3840 dev->offline = false;
3841 }
3842 } else {
3843 ret = 1;
3844 }
3845 }
3846 device_unlock(dev);
3847
3848 return ret;
3849}
3850
3851struct root_device {
3852 struct device dev;
3853 struct module *owner;
3854};
3855
3856static inline struct root_device *to_root_device(struct device *d)
3857{
3858 return container_of(d, struct root_device, dev);
3859}
3860
3861static void root_device_release(struct device *dev)
3862{
3863 kfree(to_root_device(dev));
3864}
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888struct device *__root_device_register(const char *name, struct module *owner)
3889{
3890 struct root_device *root;
3891 int err = -ENOMEM;
3892
3893 root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
3894 if (!root)
3895 return ERR_PTR(err);
3896
3897 err = dev_set_name(&root->dev, "%s", name);
3898 if (err) {
3899 kfree(root);
3900 return ERR_PTR(err);
3901 }
3902
3903 root->dev.release = root_device_release;
3904
3905 err = device_register(&root->dev);
3906 if (err) {
3907 put_device(&root->dev);
3908 return ERR_PTR(err);
3909 }
3910
3911#ifdef CONFIG_MODULES
3912 if (owner) {
3913 struct module_kobject *mk = &owner->mkobj;
3914
3915 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
3916 if (err) {
3917 device_unregister(&root->dev);
3918 return ERR_PTR(err);
3919 }
3920 root->owner = owner;
3921 }
3922#endif
3923
3924 return &root->dev;
3925}
3926EXPORT_SYMBOL_GPL(__root_device_register);
3927
3928
3929
3930
3931
3932
3933
3934
3935void root_device_unregister(struct device *dev)
3936{
3937 struct root_device *root = to_root_device(dev);
3938
3939 if (root->owner)
3940 sysfs_remove_link(&root->dev.kobj, "module");
3941
3942 device_unregister(dev);
3943}
3944EXPORT_SYMBOL_GPL(root_device_unregister);
3945
3946
3947static void device_create_release(struct device *dev)
3948{
3949 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3950 kfree(dev);
3951}
3952
3953static __printf(6, 0) struct device *
3954device_create_groups_vargs(struct class *class, struct device *parent,
3955 dev_t devt, void *drvdata,
3956 const struct attribute_group **groups,
3957 const char *fmt, va_list args)
3958{
3959 struct device *dev = NULL;
3960 int retval = -ENODEV;
3961
3962 if (class == NULL || IS_ERR(class))
3963 goto error;
3964
3965 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3966 if (!dev) {
3967 retval = -ENOMEM;
3968 goto error;
3969 }
3970
3971 device_initialize(dev);
3972 dev->devt = devt;
3973 dev->class = class;
3974 dev->parent = parent;
3975 dev->groups = groups;
3976 dev->release = device_create_release;
3977 dev_set_drvdata(dev, drvdata);
3978
3979 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
3980 if (retval)
3981 goto error;
3982
3983 retval = device_add(dev);
3984 if (retval)
3985 goto error;
3986
3987 return dev;
3988
3989error:
3990 put_device(dev);
3991 return ERR_PTR(retval);
3992}
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018struct device *device_create(struct class *class, struct device *parent,
4019 dev_t devt, void *drvdata, const char *fmt, ...)
4020{
4021 va_list vargs;
4022 struct device *dev;
4023
4024 va_start(vargs, fmt);
4025 dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
4026 fmt, vargs);
4027 va_end(vargs);
4028 return dev;
4029}
4030EXPORT_SYMBOL_GPL(device_create);
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059struct device *device_create_with_groups(struct class *class,
4060 struct device *parent, dev_t devt,
4061 void *drvdata,
4062 const struct attribute_group **groups,
4063 const char *fmt, ...)
4064{
4065 va_list vargs;
4066 struct device *dev;
4067
4068 va_start(vargs, fmt);
4069 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
4070 fmt, vargs);
4071 va_end(vargs);
4072 return dev;
4073}
4074EXPORT_SYMBOL_GPL(device_create_with_groups);
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084void device_destroy(struct class *class, dev_t devt)
4085{
4086 struct device *dev;
4087
4088 dev = class_find_device_by_devt(class, devt);
4089 if (dev) {
4090 put_device(dev);
4091 device_unregister(dev);
4092 }
4093}
4094EXPORT_SYMBOL_GPL(device_destroy);
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135int device_rename(struct device *dev, const char *new_name)
4136{
4137 struct kobject *kobj = &dev->kobj;
4138 char *old_device_name = NULL;
4139 int error;
4140
4141 dev = get_device(dev);
4142 if (!dev)
4143 return -EINVAL;
4144
4145 dev_dbg(dev, "renaming to %s\n", new_name);
4146
4147 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
4148 if (!old_device_name) {
4149 error = -ENOMEM;
4150 goto out;
4151 }
4152
4153 if (dev->class) {
4154 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
4155 kobj, old_device_name,
4156 new_name, kobject_namespace(kobj));
4157 if (error)
4158 goto out;
4159 }
4160
4161 error = kobject_rename(kobj, new_name);
4162 if (error)
4163 goto out;
4164
4165out:
4166 put_device(dev);
4167
4168 kfree(old_device_name);
4169
4170 return error;
4171}
4172EXPORT_SYMBOL_GPL(device_rename);
4173
4174static int device_move_class_links(struct device *dev,
4175 struct device *old_parent,
4176 struct device *new_parent)
4177{
4178 int error = 0;
4179
4180 if (old_parent)
4181 sysfs_remove_link(&dev->kobj, "device");
4182 if (new_parent)
4183 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
4184 "device");
4185 return error;
4186}
4187
4188
4189
4190
4191
4192
4193
4194int device_move(struct device *dev, struct device *new_parent,
4195 enum dpm_order dpm_order)
4196{
4197 int error;
4198 struct device *old_parent;
4199 struct kobject *new_parent_kobj;
4200
4201 dev = get_device(dev);
4202 if (!dev)
4203 return -EINVAL;
4204
4205 device_pm_lock();
4206 new_parent = get_device(new_parent);
4207 new_parent_kobj = get_device_parent(dev, new_parent);
4208 if (IS_ERR(new_parent_kobj)) {
4209 error = PTR_ERR(new_parent_kobj);
4210 put_device(new_parent);
4211 goto out;
4212 }
4213
4214 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
4215 __func__, new_parent ? dev_name(new_parent) : "<NULL>");
4216 error = kobject_move(&dev->kobj, new_parent_kobj);
4217 if (error) {
4218 cleanup_glue_dir(dev, new_parent_kobj);
4219 put_device(new_parent);
4220 goto out;
4221 }
4222 old_parent = dev->parent;
4223 dev->parent = new_parent;
4224 if (old_parent)
4225 klist_remove(&dev->p->knode_parent);
4226 if (new_parent) {
4227 klist_add_tail(&dev->p->knode_parent,
4228 &new_parent->p->klist_children);
4229 set_dev_node(dev, dev_to_node(new_parent));
4230 }
4231
4232 if (dev->class) {
4233 error = device_move_class_links(dev, old_parent, new_parent);
4234 if (error) {
4235
4236 device_move_class_links(dev, new_parent, old_parent);
4237 if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
4238 if (new_parent)
4239 klist_remove(&dev->p->knode_parent);
4240 dev->parent = old_parent;
4241 if (old_parent) {
4242 klist_add_tail(&dev->p->knode_parent,
4243 &old_parent->p->klist_children);
4244 set_dev_node(dev, dev_to_node(old_parent));
4245 }
4246 }
4247 cleanup_glue_dir(dev, new_parent_kobj);
4248 put_device(new_parent);
4249 goto out;
4250 }
4251 }
4252 switch (dpm_order) {
4253 case DPM_ORDER_NONE:
4254 break;
4255 case DPM_ORDER_DEV_AFTER_PARENT:
4256 device_pm_move_after(dev, new_parent);
4257 devices_kset_move_after(dev, new_parent);
4258 break;
4259 case DPM_ORDER_PARENT_BEFORE_DEV:
4260 device_pm_move_before(new_parent, dev);
4261 devices_kset_move_before(new_parent, dev);
4262 break;
4263 case DPM_ORDER_DEV_LAST:
4264 device_pm_move_last(dev);
4265 devices_kset_move_last(dev);
4266 break;
4267 }
4268
4269 put_device(old_parent);
4270out:
4271 device_pm_unlock();
4272 put_device(dev);
4273 return error;
4274}
4275EXPORT_SYMBOL_GPL(device_move);
4276
4277static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
4278 kgid_t kgid)
4279{
4280 struct kobject *kobj = &dev->kobj;
4281 struct class *class = dev->class;
4282 const struct device_type *type = dev->type;
4283 int error;
4284
4285 if (class) {
4286
4287
4288
4289
4290 error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
4291 kgid);
4292 if (error)
4293 return error;
4294 }
4295
4296 if (type) {
4297
4298
4299
4300
4301 error = sysfs_groups_change_owner(kobj, type->groups, kuid,
4302 kgid);
4303 if (error)
4304 return error;
4305 }
4306
4307
4308 error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
4309 if (error)
4310 return error;
4311
4312 if (device_supports_offline(dev) && !dev->offline_disabled) {
4313
4314 error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
4315 kuid, kgid);
4316 if (error)
4317 return error;
4318 }
4319
4320 return 0;
4321}
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
4336{
4337 int error;
4338 struct kobject *kobj = &dev->kobj;
4339
4340 dev = get_device(dev);
4341 if (!dev)
4342 return -EINVAL;
4343
4344
4345
4346
4347
4348 error = sysfs_change_owner(kobj, kuid, kgid);
4349 if (error)
4350 goto out;
4351
4352
4353
4354
4355
4356
4357 error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
4358 kgid);
4359 if (error)
4360 goto out;
4361
4362
4363
4364
4365
4366
4367 error = device_attrs_change_owner(dev, kuid, kgid);
4368 if (error)
4369 goto out;
4370
4371 error = dpm_sysfs_change_owner(dev, kuid, kgid);
4372 if (error)
4373 goto out;
4374
4375#ifdef CONFIG_BLOCK
4376 if (sysfs_deprecated && dev->class == &block_class)
4377 goto out;
4378#endif
4379
4380
4381
4382
4383
4384
4385
4386 error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj,
4387 dev_name(dev), kuid, kgid);
4388 if (error)
4389 goto out;
4390
4391out:
4392 put_device(dev);
4393 return error;
4394}
4395EXPORT_SYMBOL_GPL(device_change_owner);
4396
4397
4398
4399
4400void device_shutdown(void)
4401{
4402 struct device *dev, *parent;
4403
4404 wait_for_device_probe();
4405 device_block_probing();
4406
4407 cpufreq_suspend();
4408
4409 spin_lock(&devices_kset->list_lock);
4410
4411
4412
4413
4414
4415 while (!list_empty(&devices_kset->list)) {
4416 dev = list_entry(devices_kset->list.prev, struct device,
4417 kobj.entry);
4418
4419
4420
4421
4422
4423
4424 parent = get_device(dev->parent);
4425 get_device(dev);
4426
4427
4428
4429
4430 list_del_init(&dev->kobj.entry);
4431 spin_unlock(&devices_kset->list_lock);
4432
4433
4434 if (parent)
4435 device_lock(parent);
4436 device_lock(dev);
4437
4438
4439 pm_runtime_get_noresume(dev);
4440 pm_runtime_barrier(dev);
4441
4442 if (dev->class && dev->class->shutdown_pre) {
4443 if (initcall_debug)
4444 dev_info(dev, "shutdown_pre\n");
4445 dev->class->shutdown_pre(dev);
4446 }
4447 if (dev->bus && dev->bus->shutdown) {
4448 if (initcall_debug)
4449 dev_info(dev, "shutdown\n");
4450 dev->bus->shutdown(dev);
4451 } else if (dev->driver && dev->driver->shutdown) {
4452 if (initcall_debug)
4453 dev_info(dev, "shutdown\n");
4454 dev->driver->shutdown(dev);
4455 }
4456
4457 device_unlock(dev);
4458 if (parent)
4459 device_unlock(parent);
4460
4461 put_device(dev);
4462 put_device(parent);
4463
4464 spin_lock(&devices_kset->list_lock);
4465 }
4466 spin_unlock(&devices_kset->list_lock);
4467}
4468
4469
4470
4471
4472
4473#ifdef CONFIG_PRINTK
4474static void
4475set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
4476{
4477 const char *subsys;
4478
4479 memset(dev_info, 0, sizeof(*dev_info));
4480
4481 if (dev->class)
4482 subsys = dev->class->name;
4483 else if (dev->bus)
4484 subsys = dev->bus->name;
4485 else
4486 return;
4487
4488 strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
4489
4490
4491
4492
4493
4494
4495
4496
4497 if (MAJOR(dev->devt)) {
4498 char c;
4499
4500 if (strcmp(subsys, "block") == 0)
4501 c = 'b';
4502 else
4503 c = 'c';
4504
4505 snprintf(dev_info->device, sizeof(dev_info->device),
4506 "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
4507 } else if (strcmp(subsys, "net") == 0) {
4508 struct net_device *net = to_net_dev(dev);
4509
4510 snprintf(dev_info->device, sizeof(dev_info->device),
4511 "n%u", net->ifindex);
4512 } else {
4513 snprintf(dev_info->device, sizeof(dev_info->device),
4514 "+%s:%s", subsys, dev_name(dev));
4515 }
4516}
4517
4518int dev_vprintk_emit(int level, const struct device *dev,
4519 const char *fmt, va_list args)
4520{
4521 struct dev_printk_info dev_info;
4522
4523 set_dev_info(dev, &dev_info);
4524
4525 return vprintk_emit(0, level, &dev_info, fmt, args);
4526}
4527EXPORT_SYMBOL(dev_vprintk_emit);
4528
4529int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
4530{
4531 va_list args;
4532 int r;
4533
4534 va_start(args, fmt);
4535
4536 r = dev_vprintk_emit(level, dev, fmt, args);
4537
4538 va_end(args);
4539
4540 return r;
4541}
4542EXPORT_SYMBOL(dev_printk_emit);
4543
4544static void __dev_printk(const char *level, const struct device *dev,
4545 struct va_format *vaf)
4546{
4547 if (dev)
4548 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
4549 dev_driver_string(dev), dev_name(dev), vaf);
4550 else
4551 printk("%s(NULL device *): %pV", level, vaf);
4552}
4553
4554void dev_printk(const char *level, const struct device *dev,
4555 const char *fmt, ...)
4556{
4557 struct va_format vaf;
4558 va_list args;
4559
4560 va_start(args, fmt);
4561
4562 vaf.fmt = fmt;
4563 vaf.va = &args;
4564
4565 __dev_printk(level, dev, &vaf);
4566
4567 va_end(args);
4568}
4569EXPORT_SYMBOL(dev_printk);
4570
4571#define define_dev_printk_level(func, kern_level) \
4572void func(const struct device *dev, const char *fmt, ...) \
4573{ \
4574 struct va_format vaf; \
4575 va_list args; \
4576 \
4577 va_start(args, fmt); \
4578 \
4579 vaf.fmt = fmt; \
4580 vaf.va = &args; \
4581 \
4582 __dev_printk(kern_level, dev, &vaf); \
4583 \
4584 va_end(args); \
4585} \
4586EXPORT_SYMBOL(func);
4587
4588define_dev_printk_level(_dev_emerg, KERN_EMERG);
4589define_dev_printk_level(_dev_alert, KERN_ALERT);
4590define_dev_printk_level(_dev_crit, KERN_CRIT);
4591define_dev_printk_level(_dev_err, KERN_ERR);
4592define_dev_printk_level(_dev_warn, KERN_WARNING);
4593define_dev_printk_level(_dev_notice, KERN_NOTICE);
4594define_dev_printk_level(_dev_info, KERN_INFO);
4595
4596#endif
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
4626{
4627 struct va_format vaf;
4628 va_list args;
4629
4630 va_start(args, fmt);
4631 vaf.fmt = fmt;
4632 vaf.va = &args;
4633
4634 if (err != -EPROBE_DEFER) {
4635 dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4636 } else {
4637 device_set_deferred_probe_reason(dev, &vaf);
4638 dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4639 }
4640
4641 va_end(args);
4642
4643 return err;
4644}
4645EXPORT_SYMBOL_GPL(dev_err_probe);
4646
4647static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
4648{
4649 return fwnode && !IS_ERR(fwnode->secondary);
4650}
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4667{
4668 struct device *parent = dev->parent;
4669 struct fwnode_handle *fn = dev->fwnode;
4670
4671 if (fwnode) {
4672 if (fwnode_is_primary(fn))
4673 fn = fn->secondary;
4674
4675 if (fn) {
4676 WARN_ON(fwnode->secondary);
4677 fwnode->secondary = fn;
4678 }
4679 dev->fwnode = fwnode;
4680 } else {
4681 if (fwnode_is_primary(fn)) {
4682 dev->fwnode = fn->secondary;
4683
4684 if (!(parent && fn == parent->fwnode))
4685 fn->secondary = NULL;
4686 } else {
4687 dev->fwnode = NULL;
4688 }
4689 }
4690}
4691EXPORT_SYMBOL_GPL(set_primary_fwnode);
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4703{
4704 if (fwnode)
4705 fwnode->secondary = ERR_PTR(-ENODEV);
4706
4707 if (fwnode_is_primary(dev->fwnode))
4708 dev->fwnode->secondary = fwnode;
4709 else
4710 dev->fwnode = fwnode;
4711}
4712EXPORT_SYMBOL_GPL(set_secondary_fwnode);
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
4723{
4724 of_node_put(dev->of_node);
4725 dev->of_node = of_node_get(dev2->of_node);
4726 dev->of_node_reused = true;
4727}
4728EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
4729
4730int device_match_name(struct device *dev, const void *name)
4731{
4732 return sysfs_streq(dev_name(dev), name);
4733}
4734EXPORT_SYMBOL_GPL(device_match_name);
4735
4736int device_match_of_node(struct device *dev, const void *np)
4737{
4738 return dev->of_node == np;
4739}
4740EXPORT_SYMBOL_GPL(device_match_of_node);
4741
4742int device_match_fwnode(struct device *dev, const void *fwnode)
4743{
4744 return dev_fwnode(dev) == fwnode;
4745}
4746EXPORT_SYMBOL_GPL(device_match_fwnode);
4747
4748int device_match_devt(struct device *dev, const void *pdevt)
4749{
4750 return dev->devt == *(dev_t *)pdevt;
4751}
4752EXPORT_SYMBOL_GPL(device_match_devt);
4753
4754int device_match_acpi_dev(struct device *dev, const void *adev)
4755{
4756 return ACPI_COMPANION(dev) == adev;
4757}
4758EXPORT_SYMBOL(device_match_acpi_dev);
4759
4760int device_match_any(struct device *dev, const void *unused)
4761{
4762 return 1;
4763}
4764EXPORT_SYMBOL_GPL(device_match_any);
4765