1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/cpufreq.h>
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/fwnode.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/kdev_t.h>
21#include <linux/notifier.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/genhd.h>
25#include <linux/mutex.h>
26#include <linux/pm_runtime.h>
27#include <linux/netdevice.h>
28#include <linux/sched/signal.h>
29#include <linux/sched/mm.h>
30#include <linux/swiotlb.h>
31#include <linux/sysfs.h>
32#include <linux/dma-map-ops.h>
33
34#include "base.h"
35#include "power/power.h"
36
37#ifdef CONFIG_SYSFS_DEPRECATED
38#ifdef CONFIG_SYSFS_DEPRECATED_V2
39long sysfs_deprecated = 1;
40#else
41long sysfs_deprecated = 0;
42#endif
43static int __init sysfs_deprecated_setup(char *arg)
44{
45 return kstrtol(arg, 10, &sysfs_deprecated);
46}
47early_param("sysfs.deprecated", sysfs_deprecated_setup);
48#endif
49
50
51static LIST_HEAD(deferred_sync);
52static unsigned int defer_sync_state_count = 1;
53static DEFINE_MUTEX(fwnode_link_lock);
54static bool fw_devlink_is_permissive(void);
55static bool fw_devlink_drv_reg_done;
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
75{
76 struct fwnode_link *link;
77 int ret = 0;
78
79 mutex_lock(&fwnode_link_lock);
80
81 list_for_each_entry(link, &sup->consumers, s_hook)
82 if (link->consumer == con)
83 goto out;
84
85 link = kzalloc(sizeof(*link), GFP_KERNEL);
86 if (!link) {
87 ret = -ENOMEM;
88 goto out;
89 }
90
91 link->supplier = sup;
92 INIT_LIST_HEAD(&link->s_hook);
93 link->consumer = con;
94 INIT_LIST_HEAD(&link->c_hook);
95
96 list_add(&link->s_hook, &sup->consumers);
97 list_add(&link->c_hook, &con->suppliers);
98 pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",
99 con, sup);
100out:
101 mutex_unlock(&fwnode_link_lock);
102
103 return ret;
104}
105
106
107
108
109
110
111
112static void __fwnode_link_del(struct fwnode_link *link)
113{
114 pr_debug("%pfwP Dropping the fwnode link to %pfwP\n",
115 link->consumer, link->supplier);
116 list_del(&link->s_hook);
117 list_del(&link->c_hook);
118 kfree(link);
119}
120
121
122
123
124
125
126
127static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
128{
129 struct fwnode_link *link, *tmp;
130
131 mutex_lock(&fwnode_link_lock);
132 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
133 __fwnode_link_del(link);
134 mutex_unlock(&fwnode_link_lock);
135}
136
137
138
139
140
141
142
143static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
144{
145 struct fwnode_link *link, *tmp;
146
147 mutex_lock(&fwnode_link_lock);
148 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
149 __fwnode_link_del(link);
150 mutex_unlock(&fwnode_link_lock);
151}
152
153
154
155
156
157
158
159void fwnode_links_purge(struct fwnode_handle *fwnode)
160{
161 fwnode_links_purge_suppliers(fwnode);
162 fwnode_links_purge_consumers(fwnode);
163}
164
165void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
166{
167 struct fwnode_handle *child;
168
169
170 if (fwnode->dev)
171 return;
172
173 fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
174 fwnode_links_purge_consumers(fwnode);
175
176 fwnode_for_each_available_child_node(fwnode, child)
177 fw_devlink_purge_absent_suppliers(child);
178}
179EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
180
181#ifdef CONFIG_SRCU
182static DEFINE_MUTEX(device_links_lock);
183DEFINE_STATIC_SRCU(device_links_srcu);
184
185static inline void device_links_write_lock(void)
186{
187 mutex_lock(&device_links_lock);
188}
189
190static inline void device_links_write_unlock(void)
191{
192 mutex_unlock(&device_links_lock);
193}
194
195int device_links_read_lock(void) __acquires(&device_links_srcu)
196{
197 return srcu_read_lock(&device_links_srcu);
198}
199
200void device_links_read_unlock(int idx) __releases(&device_links_srcu)
201{
202 srcu_read_unlock(&device_links_srcu, idx);
203}
204
205int device_links_read_lock_held(void)
206{
207 return srcu_read_lock_held(&device_links_srcu);
208}
209
210static void device_link_synchronize_removal(void)
211{
212 synchronize_srcu(&device_links_srcu);
213}
214
215static void device_link_remove_from_lists(struct device_link *link)
216{
217 list_del_rcu(&link->s_node);
218 list_del_rcu(&link->c_node);
219}
220#else
221static DECLARE_RWSEM(device_links_lock);
222
223static inline void device_links_write_lock(void)
224{
225 down_write(&device_links_lock);
226}
227
228static inline void device_links_write_unlock(void)
229{
230 up_write(&device_links_lock);
231}
232
233int device_links_read_lock(void)
234{
235 down_read(&device_links_lock);
236 return 0;
237}
238
239void device_links_read_unlock(int not_used)
240{
241 up_read(&device_links_lock);
242}
243
244#ifdef CONFIG_DEBUG_LOCK_ALLOC
245int device_links_read_lock_held(void)
246{
247 return lockdep_is_held(&device_links_lock);
248}
249#endif
250
251static inline void device_link_synchronize_removal(void)
252{
253}
254
255static void device_link_remove_from_lists(struct device_link *link)
256{
257 list_del(&link->s_node);
258 list_del(&link->c_node);
259}
260#endif
261
262static bool device_is_ancestor(struct device *dev, struct device *target)
263{
264 while (target->parent) {
265 target = target->parent;
266 if (dev == target)
267 return true;
268 }
269 return false;
270}
271
272
273
274
275
276
277
278
279
280int device_is_dependent(struct device *dev, void *target)
281{
282 struct device_link *link;
283 int ret;
284
285
286
287
288
289
290 if (dev == target || device_is_ancestor(dev, target))
291 return 1;
292
293 ret = device_for_each_child(dev, target, device_is_dependent);
294 if (ret)
295 return ret;
296
297 list_for_each_entry(link, &dev->links.consumers, s_node) {
298 if ((link->flags & ~DL_FLAG_INFERRED) ==
299 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
300 continue;
301
302 if (link->consumer == target)
303 return 1;
304
305 ret = device_is_dependent(link->consumer, target);
306 if (ret)
307 break;
308 }
309 return ret;
310}
311
312static void device_link_init_status(struct device_link *link,
313 struct device *consumer,
314 struct device *supplier)
315{
316 switch (supplier->links.status) {
317 case DL_DEV_PROBING:
318 switch (consumer->links.status) {
319 case DL_DEV_PROBING:
320
321
322
323
324
325
326
327 link->status = DL_STATE_CONSUMER_PROBE;
328 break;
329 default:
330 link->status = DL_STATE_DORMANT;
331 break;
332 }
333 break;
334 case DL_DEV_DRIVER_BOUND:
335 switch (consumer->links.status) {
336 case DL_DEV_PROBING:
337 link->status = DL_STATE_CONSUMER_PROBE;
338 break;
339 case DL_DEV_DRIVER_BOUND:
340 link->status = DL_STATE_ACTIVE;
341 break;
342 default:
343 link->status = DL_STATE_AVAILABLE;
344 break;
345 }
346 break;
347 case DL_DEV_UNBINDING:
348 link->status = DL_STATE_SUPPLIER_UNBIND;
349 break;
350 default:
351 link->status = DL_STATE_DORMANT;
352 break;
353 }
354}
355
356static int device_reorder_to_tail(struct device *dev, void *not_used)
357{
358 struct device_link *link;
359
360
361
362
363
364 if (device_is_registered(dev))
365 devices_kset_move_last(dev);
366
367 if (device_pm_initialized(dev))
368 device_pm_move_last(dev);
369
370 device_for_each_child(dev, NULL, device_reorder_to_tail);
371 list_for_each_entry(link, &dev->links.consumers, s_node) {
372 if ((link->flags & ~DL_FLAG_INFERRED) ==
373 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
374 continue;
375 device_reorder_to_tail(link->consumer, NULL);
376 }
377
378 return 0;
379}
380
381
382
383
384
385
386
387
388
389
390void device_pm_move_to_tail(struct device *dev)
391{
392 int idx;
393
394 idx = device_links_read_lock();
395 device_pm_lock();
396 device_reorder_to_tail(dev, NULL);
397 device_pm_unlock();
398 device_links_read_unlock(idx);
399}
400
401#define to_devlink(dev) container_of((dev), struct device_link, link_dev)
402
403static ssize_t status_show(struct device *dev,
404 struct device_attribute *attr, char *buf)
405{
406 const char *output;
407
408 switch (to_devlink(dev)->status) {
409 case DL_STATE_NONE:
410 output = "not tracked";
411 break;
412 case DL_STATE_DORMANT:
413 output = "dormant";
414 break;
415 case DL_STATE_AVAILABLE:
416 output = "available";
417 break;
418 case DL_STATE_CONSUMER_PROBE:
419 output = "consumer probing";
420 break;
421 case DL_STATE_ACTIVE:
422 output = "active";
423 break;
424 case DL_STATE_SUPPLIER_UNBIND:
425 output = "supplier unbinding";
426 break;
427 default:
428 output = "unknown";
429 break;
430 }
431
432 return sysfs_emit(buf, "%s\n", output);
433}
434static DEVICE_ATTR_RO(status);
435
436static ssize_t auto_remove_on_show(struct device *dev,
437 struct device_attribute *attr, char *buf)
438{
439 struct device_link *link = to_devlink(dev);
440 const char *output;
441
442 if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
443 output = "supplier unbind";
444 else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
445 output = "consumer unbind";
446 else
447 output = "never";
448
449 return sysfs_emit(buf, "%s\n", output);
450}
451static DEVICE_ATTR_RO(auto_remove_on);
452
453static ssize_t runtime_pm_show(struct device *dev,
454 struct device_attribute *attr, char *buf)
455{
456 struct device_link *link = to_devlink(dev);
457
458 return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
459}
460static DEVICE_ATTR_RO(runtime_pm);
461
462static ssize_t sync_state_only_show(struct device *dev,
463 struct device_attribute *attr, char *buf)
464{
465 struct device_link *link = to_devlink(dev);
466
467 return sysfs_emit(buf, "%d\n",
468 !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
469}
470static DEVICE_ATTR_RO(sync_state_only);
471
472static struct attribute *devlink_attrs[] = {
473 &dev_attr_status.attr,
474 &dev_attr_auto_remove_on.attr,
475 &dev_attr_runtime_pm.attr,
476 &dev_attr_sync_state_only.attr,
477 NULL,
478};
479ATTRIBUTE_GROUPS(devlink);
480
481static void device_link_release_fn(struct work_struct *work)
482{
483 struct device_link *link = container_of(work, struct device_link, rm_work);
484
485
486 device_link_synchronize_removal();
487
488 while (refcount_dec_not_one(&link->rpm_active))
489 pm_runtime_put(link->supplier);
490
491 put_device(link->consumer);
492 put_device(link->supplier);
493 kfree(link);
494}
495
496static void devlink_dev_release(struct device *dev)
497{
498 struct device_link *link = to_devlink(dev);
499
500 INIT_WORK(&link->rm_work, device_link_release_fn);
501
502
503
504
505
506
507 queue_work(system_long_wq, &link->rm_work);
508}
509
510static struct class devlink_class = {
511 .name = "devlink",
512 .owner = THIS_MODULE,
513 .dev_groups = devlink_groups,
514 .dev_release = devlink_dev_release,
515};
516
517static int devlink_add_symlinks(struct device *dev,
518 struct class_interface *class_intf)
519{
520 int ret;
521 size_t len;
522 struct device_link *link = to_devlink(dev);
523 struct device *sup = link->supplier;
524 struct device *con = link->consumer;
525 char *buf;
526
527 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
528 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
529 len += strlen(":");
530 len += strlen("supplier:") + 1;
531 buf = kzalloc(len, GFP_KERNEL);
532 if (!buf)
533 return -ENOMEM;
534
535 ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
536 if (ret)
537 goto out;
538
539 ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer");
540 if (ret)
541 goto err_con;
542
543 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
544 ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
545 if (ret)
546 goto err_con_dev;
547
548 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
549 ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
550 if (ret)
551 goto err_sup_dev;
552
553 goto out;
554
555err_sup_dev:
556 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
557 sysfs_remove_link(&sup->kobj, buf);
558err_con_dev:
559 sysfs_remove_link(&link->link_dev.kobj, "consumer");
560err_con:
561 sysfs_remove_link(&link->link_dev.kobj, "supplier");
562out:
563 kfree(buf);
564 return ret;
565}
566
567static void devlink_remove_symlinks(struct device *dev,
568 struct class_interface *class_intf)
569{
570 struct device_link *link = to_devlink(dev);
571 size_t len;
572 struct device *sup = link->supplier;
573 struct device *con = link->consumer;
574 char *buf;
575
576 sysfs_remove_link(&link->link_dev.kobj, "consumer");
577 sysfs_remove_link(&link->link_dev.kobj, "supplier");
578
579 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
580 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
581 len += strlen(":");
582 len += strlen("supplier:") + 1;
583 buf = kzalloc(len, GFP_KERNEL);
584 if (!buf) {
585 WARN(1, "Unable to properly free device link symlinks!\n");
586 return;
587 }
588
589 if (device_is_registered(con)) {
590 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
591 sysfs_remove_link(&con->kobj, buf);
592 }
593 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
594 sysfs_remove_link(&sup->kobj, buf);
595 kfree(buf);
596}
597
598static struct class_interface devlink_class_intf = {
599 .class = &devlink_class,
600 .add_dev = devlink_add_symlinks,
601 .remove_dev = devlink_remove_symlinks,
602};
603
604static int __init devlink_class_init(void)
605{
606 int ret;
607
608 ret = class_register(&devlink_class);
609 if (ret)
610 return ret;
611
612 ret = class_interface_register(&devlink_class_intf);
613 if (ret)
614 class_unregister(&devlink_class);
615
616 return ret;
617}
618postcore_initcall(devlink_class_init);
619
620#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
621 DL_FLAG_AUTOREMOVE_SUPPLIER | \
622 DL_FLAG_AUTOPROBE_CONSUMER | \
623 DL_FLAG_SYNC_STATE_ONLY | \
624 DL_FLAG_INFERRED)
625
626#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
627 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685struct device_link *device_link_add(struct device *consumer,
686 struct device *supplier, u32 flags)
687{
688 struct device_link *link;
689
690 if (!consumer || !supplier || consumer == supplier ||
691 flags & ~DL_ADD_VALID_FLAGS ||
692 (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
693 (flags & DL_FLAG_SYNC_STATE_ONLY &&
694 (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
695 (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
696 flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
697 DL_FLAG_AUTOREMOVE_SUPPLIER)))
698 return NULL;
699
700 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
701 if (pm_runtime_get_sync(supplier) < 0) {
702 pm_runtime_put_noidle(supplier);
703 return NULL;
704 }
705 }
706
707 if (!(flags & DL_FLAG_STATELESS))
708 flags |= DL_FLAG_MANAGED;
709
710 device_links_write_lock();
711 device_pm_lock();
712
713
714
715
716
717
718
719
720 if (!device_pm_initialized(supplier)
721 || (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
722 device_is_dependent(consumer, supplier))) {
723 link = NULL;
724 goto out;
725 }
726
727
728
729
730
731 if (flags & DL_FLAG_SYNC_STATE_ONLY &&
732 consumer->links.status != DL_DEV_NO_DRIVER &&
733 consumer->links.status != DL_DEV_PROBING) {
734 link = NULL;
735 goto out;
736 }
737
738
739
740
741
742
743 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
744 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
745
746 list_for_each_entry(link, &supplier->links.consumers, s_node) {
747 if (link->consumer != consumer)
748 continue;
749
750 if (link->flags & DL_FLAG_INFERRED &&
751 !(flags & DL_FLAG_INFERRED))
752 link->flags &= ~DL_FLAG_INFERRED;
753
754 if (flags & DL_FLAG_PM_RUNTIME) {
755 if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
756 pm_runtime_new_link(consumer);
757 link->flags |= DL_FLAG_PM_RUNTIME;
758 }
759 if (flags & DL_FLAG_RPM_ACTIVE)
760 refcount_inc(&link->rpm_active);
761 }
762
763 if (flags & DL_FLAG_STATELESS) {
764 kref_get(&link->kref);
765 if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
766 !(link->flags & DL_FLAG_STATELESS)) {
767 link->flags |= DL_FLAG_STATELESS;
768 goto reorder;
769 } else {
770 link->flags |= DL_FLAG_STATELESS;
771 goto out;
772 }
773 }
774
775
776
777
778
779
780 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
781 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
782 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
783 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
784 }
785 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
786 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
787 DL_FLAG_AUTOREMOVE_SUPPLIER);
788 }
789 if (!(link->flags & DL_FLAG_MANAGED)) {
790 kref_get(&link->kref);
791 link->flags |= DL_FLAG_MANAGED;
792 device_link_init_status(link, consumer, supplier);
793 }
794 if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
795 !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
796 link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
797 goto reorder;
798 }
799
800 goto out;
801 }
802
803 link = kzalloc(sizeof(*link), GFP_KERNEL);
804 if (!link)
805 goto out;
806
807 refcount_set(&link->rpm_active, 1);
808
809 get_device(supplier);
810 link->supplier = supplier;
811 INIT_LIST_HEAD(&link->s_node);
812 get_device(consumer);
813 link->consumer = consumer;
814 INIT_LIST_HEAD(&link->c_node);
815 link->flags = flags;
816 kref_init(&link->kref);
817
818 link->link_dev.class = &devlink_class;
819 device_set_pm_not_required(&link->link_dev);
820 dev_set_name(&link->link_dev, "%s:%s--%s:%s",
821 dev_bus_name(supplier), dev_name(supplier),
822 dev_bus_name(consumer), dev_name(consumer));
823 if (device_register(&link->link_dev)) {
824 put_device(&link->link_dev);
825 link = NULL;
826 goto out;
827 }
828
829 if (flags & DL_FLAG_PM_RUNTIME) {
830 if (flags & DL_FLAG_RPM_ACTIVE)
831 refcount_inc(&link->rpm_active);
832
833 pm_runtime_new_link(consumer);
834 }
835
836
837 if (flags & DL_FLAG_STATELESS)
838 link->status = DL_STATE_NONE;
839 else
840 device_link_init_status(link, consumer, supplier);
841
842
843
844
845
846 if (link->status == DL_STATE_CONSUMER_PROBE &&
847 flags & DL_FLAG_PM_RUNTIME)
848 pm_runtime_resume(supplier);
849
850 list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
851 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
852
853 if (flags & DL_FLAG_SYNC_STATE_ONLY) {
854 dev_dbg(consumer,
855 "Linked as a sync state only consumer to %s\n",
856 dev_name(supplier));
857 goto out;
858 }
859
860reorder:
861
862
863
864
865
866
867
868 device_reorder_to_tail(consumer, NULL);
869
870 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
871
872out:
873 device_pm_unlock();
874 device_links_write_unlock();
875
876 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
877 pm_runtime_put(supplier);
878
879 return link;
880}
881EXPORT_SYMBOL_GPL(device_link_add);
882
883static void __device_link_del(struct kref *kref)
884{
885 struct device_link *link = container_of(kref, struct device_link, kref);
886
887 dev_dbg(link->consumer, "Dropping the link to %s\n",
888 dev_name(link->supplier));
889
890 pm_runtime_drop_link(link);
891
892 device_link_remove_from_lists(link);
893 device_unregister(&link->link_dev);
894}
895
896static void device_link_put_kref(struct device_link *link)
897{
898 if (link->flags & DL_FLAG_STATELESS)
899 kref_put(&link->kref, __device_link_del);
900 else if (!device_is_registered(link->consumer))
901 __device_link_del(&link->kref);
902 else
903 WARN(1, "Unable to drop a managed device link reference\n");
904}
905
906
907
908
909
910
911
912
913
914
915void device_link_del(struct device_link *link)
916{
917 device_links_write_lock();
918 device_link_put_kref(link);
919 device_links_write_unlock();
920}
921EXPORT_SYMBOL_GPL(device_link_del);
922
923
924
925
926
927
928
929
930
931void device_link_remove(void *consumer, struct device *supplier)
932{
933 struct device_link *link;
934
935 if (WARN_ON(consumer == supplier))
936 return;
937
938 device_links_write_lock();
939
940 list_for_each_entry(link, &supplier->links.consumers, s_node) {
941 if (link->consumer == consumer) {
942 device_link_put_kref(link);
943 break;
944 }
945 }
946
947 device_links_write_unlock();
948}
949EXPORT_SYMBOL_GPL(device_link_remove);
950
951static void device_links_missing_supplier(struct device *dev)
952{
953 struct device_link *link;
954
955 list_for_each_entry(link, &dev->links.suppliers, c_node) {
956 if (link->status != DL_STATE_CONSUMER_PROBE)
957 continue;
958
959 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
960 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
961 } else {
962 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
963 WRITE_ONCE(link->status, DL_STATE_DORMANT);
964 }
965 }
966}
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984int device_links_check_suppliers(struct device *dev)
985{
986 struct device_link *link;
987 int ret = 0;
988 struct fwnode_handle *sup_fw;
989
990
991
992
993
994 mutex_lock(&fwnode_link_lock);
995 if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
996 !fw_devlink_is_permissive()) {
997 sup_fw = list_first_entry(&dev->fwnode->suppliers,
998 struct fwnode_link,
999 c_hook)->supplier;
1000 dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwP\n",
1001 sup_fw);
1002 mutex_unlock(&fwnode_link_lock);
1003 return -EPROBE_DEFER;
1004 }
1005 mutex_unlock(&fwnode_link_lock);
1006
1007 device_links_write_lock();
1008
1009 list_for_each_entry(link, &dev->links.suppliers, c_node) {
1010 if (!(link->flags & DL_FLAG_MANAGED))
1011 continue;
1012
1013 if (link->status != DL_STATE_AVAILABLE &&
1014 !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
1015 device_links_missing_supplier(dev);
1016 dev_err_probe(dev, -EPROBE_DEFER,
1017 "supplier %s not ready\n",
1018 dev_name(link->supplier));
1019 ret = -EPROBE_DEFER;
1020 break;
1021 }
1022 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1023 }
1024 dev->links.status = DL_DEV_PROBING;
1025
1026 device_links_write_unlock();
1027 return ret;
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048static void __device_links_queue_sync_state(struct device *dev,
1049 struct list_head *list)
1050{
1051 struct device_link *link;
1052
1053 if (!dev_has_sync_state(dev))
1054 return;
1055 if (dev->state_synced)
1056 return;
1057
1058 list_for_each_entry(link, &dev->links.consumers, s_node) {
1059 if (!(link->flags & DL_FLAG_MANAGED))
1060 continue;
1061 if (link->status != DL_STATE_ACTIVE)
1062 return;
1063 }
1064
1065
1066
1067
1068
1069
1070 dev->state_synced = true;
1071
1072 if (WARN_ON(!list_empty(&dev->links.defer_sync)))
1073 return;
1074
1075 get_device(dev);
1076 list_add_tail(&dev->links.defer_sync, list);
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089static void device_links_flush_sync_list(struct list_head *list,
1090 struct device *dont_lock_dev)
1091{
1092 struct device *dev, *tmp;
1093
1094 list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
1095 list_del_init(&dev->links.defer_sync);
1096
1097 if (dev != dont_lock_dev)
1098 device_lock(dev);
1099
1100 if (dev->bus->sync_state)
1101 dev->bus->sync_state(dev);
1102 else if (dev->driver && dev->driver->sync_state)
1103 dev->driver->sync_state(dev);
1104
1105 if (dev != dont_lock_dev)
1106 device_unlock(dev);
1107
1108 put_device(dev);
1109 }
1110}
1111
1112void device_links_supplier_sync_state_pause(void)
1113{
1114 device_links_write_lock();
1115 defer_sync_state_count++;
1116 device_links_write_unlock();
1117}
1118
1119void device_links_supplier_sync_state_resume(void)
1120{
1121 struct device *dev, *tmp;
1122 LIST_HEAD(sync_list);
1123
1124 device_links_write_lock();
1125 if (!defer_sync_state_count) {
1126 WARN(true, "Unmatched sync_state pause/resume!");
1127 goto out;
1128 }
1129 defer_sync_state_count--;
1130 if (defer_sync_state_count)
1131 goto out;
1132
1133 list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
1134
1135
1136
1137
1138 list_del_init(&dev->links.defer_sync);
1139 __device_links_queue_sync_state(dev, &sync_list);
1140 }
1141out:
1142 device_links_write_unlock();
1143
1144 device_links_flush_sync_list(&sync_list, NULL);
1145}
1146
1147static int sync_state_resume_initcall(void)
1148{
1149 device_links_supplier_sync_state_resume();
1150 return 0;
1151}
1152late_initcall(sync_state_resume_initcall);
1153
1154static void __device_links_supplier_defer_sync(struct device *sup)
1155{
1156 if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
1157 list_add_tail(&sup->links.defer_sync, &deferred_sync);
1158}
1159
1160static void device_link_drop_managed(struct device_link *link)
1161{
1162 link->flags &= ~DL_FLAG_MANAGED;
1163 WRITE_ONCE(link->status, DL_STATE_NONE);
1164 kref_put(&link->kref, __device_link_del);
1165}
1166
1167static ssize_t waiting_for_supplier_show(struct device *dev,
1168 struct device_attribute *attr,
1169 char *buf)
1170{
1171 bool val;
1172
1173 device_lock(dev);
1174 val = !list_empty(&dev->fwnode->suppliers);
1175 device_unlock(dev);
1176 return sysfs_emit(buf, "%u\n", val);
1177}
1178static DEVICE_ATTR_RO(waiting_for_supplier);
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194void device_links_force_bind(struct device *dev)
1195{
1196 struct device_link *link, *ln;
1197
1198 device_links_write_lock();
1199
1200 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1201 if (!(link->flags & DL_FLAG_MANAGED))
1202 continue;
1203
1204 if (link->status != DL_STATE_AVAILABLE) {
1205 device_link_drop_managed(link);
1206 continue;
1207 }
1208 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1209 }
1210 dev->links.status = DL_DEV_PROBING;
1211
1212 device_links_write_unlock();
1213}
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226void device_links_driver_bound(struct device *dev)
1227{
1228 struct device_link *link, *ln;
1229 LIST_HEAD(sync_list);
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242 if (dev->fwnode && dev->fwnode->dev == dev) {
1243 struct fwnode_handle *child;
1244 fwnode_links_purge_suppliers(dev->fwnode);
1245 fwnode_for_each_available_child_node(dev->fwnode, child)
1246 fw_devlink_purge_absent_suppliers(child);
1247 }
1248 device_remove_file(dev, &dev_attr_waiting_for_supplier);
1249
1250 device_links_write_lock();
1251
1252 list_for_each_entry(link, &dev->links.consumers, s_node) {
1253 if (!(link->flags & DL_FLAG_MANAGED))
1254 continue;
1255
1256
1257
1258
1259
1260
1261
1262 if (link->status == DL_STATE_CONSUMER_PROBE ||
1263 link->status == DL_STATE_ACTIVE)
1264 continue;
1265
1266 WARN_ON(link->status != DL_STATE_DORMANT);
1267 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1268
1269 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
1270 driver_deferred_probe_add(link->consumer);
1271 }
1272
1273 if (defer_sync_state_count)
1274 __device_links_supplier_defer_sync(dev);
1275 else
1276 __device_links_queue_sync_state(dev, &sync_list);
1277
1278 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1279 struct device *supplier;
1280
1281 if (!(link->flags & DL_FLAG_MANAGED))
1282 continue;
1283
1284 supplier = link->supplier;
1285 if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
1286
1287
1288
1289
1290
1291 device_link_drop_managed(link);
1292 } else {
1293 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
1294 WRITE_ONCE(link->status, DL_STATE_ACTIVE);
1295 }
1296
1297
1298
1299
1300
1301
1302
1303 if (defer_sync_state_count)
1304 __device_links_supplier_defer_sync(supplier);
1305 else
1306 __device_links_queue_sync_state(supplier, &sync_list);
1307 }
1308
1309 dev->links.status = DL_DEV_DRIVER_BOUND;
1310
1311 device_links_write_unlock();
1312
1313 device_links_flush_sync_list(&sync_list, dev);
1314}
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328static void __device_links_no_driver(struct device *dev)
1329{
1330 struct device_link *link, *ln;
1331
1332 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1333 if (!(link->flags & DL_FLAG_MANAGED))
1334 continue;
1335
1336 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
1337 device_link_drop_managed(link);
1338 continue;
1339 }
1340
1341 if (link->status != DL_STATE_CONSUMER_PROBE &&
1342 link->status != DL_STATE_ACTIVE)
1343 continue;
1344
1345 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
1346 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1347 } else {
1348 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
1349 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1350 }
1351 }
1352
1353 dev->links.status = DL_DEV_NO_DRIVER;
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366void device_links_no_driver(struct device *dev)
1367{
1368 struct device_link *link;
1369
1370 device_links_write_lock();
1371
1372 list_for_each_entry(link, &dev->links.consumers, s_node) {
1373 if (!(link->flags & DL_FLAG_MANAGED))
1374 continue;
1375
1376
1377
1378
1379
1380
1381
1382
1383 if (link->status == DL_STATE_CONSUMER_PROBE ||
1384 link->status == DL_STATE_ACTIVE)
1385 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1386 }
1387
1388 __device_links_no_driver(dev);
1389
1390 device_links_write_unlock();
1391}
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403void device_links_driver_cleanup(struct device *dev)
1404{
1405 struct device_link *link, *ln;
1406
1407 device_links_write_lock();
1408
1409 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
1410 if (!(link->flags & DL_FLAG_MANAGED))
1411 continue;
1412
1413 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
1414 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
1415
1416
1417
1418
1419
1420
1421 if (link->status == DL_STATE_SUPPLIER_UNBIND &&
1422 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
1423 device_link_drop_managed(link);
1424
1425 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1426 }
1427
1428 list_del_init(&dev->links.defer_sync);
1429 __device_links_no_driver(dev);
1430
1431 device_links_write_unlock();
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448bool device_links_busy(struct device *dev)
1449{
1450 struct device_link *link;
1451 bool ret = false;
1452
1453 device_links_write_lock();
1454
1455 list_for_each_entry(link, &dev->links.consumers, s_node) {
1456 if (!(link->flags & DL_FLAG_MANAGED))
1457 continue;
1458
1459 if (link->status == DL_STATE_CONSUMER_PROBE
1460 || link->status == DL_STATE_ACTIVE) {
1461 ret = true;
1462 break;
1463 }
1464 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1465 }
1466
1467 dev->links.status = DL_DEV_UNBINDING;
1468
1469 device_links_write_unlock();
1470 return ret;
1471}
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488void device_links_unbind_consumers(struct device *dev)
1489{
1490 struct device_link *link;
1491
1492 start:
1493 device_links_write_lock();
1494
1495 list_for_each_entry(link, &dev->links.consumers, s_node) {
1496 enum device_link_state status;
1497
1498 if (!(link->flags & DL_FLAG_MANAGED) ||
1499 link->flags & DL_FLAG_SYNC_STATE_ONLY)
1500 continue;
1501
1502 status = link->status;
1503 if (status == DL_STATE_CONSUMER_PROBE) {
1504 device_links_write_unlock();
1505
1506 wait_for_device_probe();
1507 goto start;
1508 }
1509 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1510 if (status == DL_STATE_ACTIVE) {
1511 struct device *consumer = link->consumer;
1512
1513 get_device(consumer);
1514
1515 device_links_write_unlock();
1516
1517 device_release_driver_internal(consumer, NULL,
1518 consumer->parent);
1519 put_device(consumer);
1520 goto start;
1521 }
1522 }
1523
1524 device_links_write_unlock();
1525}
1526
1527
1528
1529
1530
1531static void device_links_purge(struct device *dev)
1532{
1533 struct device_link *link, *ln;
1534
1535 if (dev->class == &devlink_class)
1536 return;
1537
1538
1539
1540
1541
1542 device_links_write_lock();
1543
1544 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1545 WARN_ON(link->status == DL_STATE_ACTIVE);
1546 __device_link_del(&link->kref);
1547 }
1548
1549 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
1550 WARN_ON(link->status != DL_STATE_DORMANT &&
1551 link->status != DL_STATE_NONE);
1552 __device_link_del(&link->kref);
1553 }
1554
1555 device_links_write_unlock();
1556}
1557
1558#define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \
1559 DL_FLAG_SYNC_STATE_ONLY)
1560#define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \
1561 DL_FLAG_AUTOPROBE_CONSUMER)
1562#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
1563 DL_FLAG_PM_RUNTIME)
1564
1565static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
1566static int __init fw_devlink_setup(char *arg)
1567{
1568 if (!arg)
1569 return -EINVAL;
1570
1571 if (strcmp(arg, "off") == 0) {
1572 fw_devlink_flags = 0;
1573 } else if (strcmp(arg, "permissive") == 0) {
1574 fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1575 } else if (strcmp(arg, "on") == 0) {
1576 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
1577 } else if (strcmp(arg, "rpm") == 0) {
1578 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
1579 }
1580 return 0;
1581}
1582early_param("fw_devlink", fw_devlink_setup);
1583
1584static bool fw_devlink_strict;
1585static int __init fw_devlink_strict_setup(char *arg)
1586{
1587 return strtobool(arg, &fw_devlink_strict);
1588}
1589early_param("fw_devlink.strict", fw_devlink_strict_setup);
1590
1591u32 fw_devlink_get_flags(void)
1592{
1593 return fw_devlink_flags;
1594}
1595
1596static bool fw_devlink_is_permissive(void)
1597{
1598 return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
1599}
1600
1601bool fw_devlink_is_strict(void)
1602{
1603 return fw_devlink_strict && !fw_devlink_is_permissive();
1604}
1605
1606static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
1607{
1608 if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
1609 return;
1610
1611 fwnode_call_int_op(fwnode, add_links);
1612 fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
1613}
1614
1615static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
1616{
1617 struct fwnode_handle *child = NULL;
1618
1619 fw_devlink_parse_fwnode(fwnode);
1620
1621 while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1622 fw_devlink_parse_fwtree(child);
1623}
1624
1625static void fw_devlink_relax_link(struct device_link *link)
1626{
1627 if (!(link->flags & DL_FLAG_INFERRED))
1628 return;
1629
1630 if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE))
1631 return;
1632
1633 pm_runtime_drop_link(link);
1634 link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
1635 dev_dbg(link->consumer, "Relaxing link with %s\n",
1636 dev_name(link->supplier));
1637}
1638
1639static int fw_devlink_no_driver(struct device *dev, void *data)
1640{
1641 struct device_link *link = to_devlink(dev);
1642
1643 if (!link->supplier->can_match)
1644 fw_devlink_relax_link(link);
1645
1646 return 0;
1647}
1648
1649void fw_devlink_drivers_done(void)
1650{
1651 fw_devlink_drv_reg_done = true;
1652 device_links_write_lock();
1653 class_for_each_device(&devlink_class, NULL, NULL,
1654 fw_devlink_no_driver);
1655 device_links_write_unlock();
1656}
1657
1658static void fw_devlink_unblock_consumers(struct device *dev)
1659{
1660 struct device_link *link;
1661
1662 if (!fw_devlink_flags || fw_devlink_is_permissive())
1663 return;
1664
1665 device_links_write_lock();
1666 list_for_each_entry(link, &dev->links.consumers, s_node)
1667 fw_devlink_relax_link(link);
1668 device_links_write_unlock();
1669}
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685static int fw_devlink_relax_cycle(struct device *con, void *sup)
1686{
1687 struct device_link *link;
1688 int ret;
1689
1690 if (con == sup)
1691 return 1;
1692
1693 ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
1694 if (ret)
1695 return ret;
1696
1697 list_for_each_entry(link, &con->links.consumers, s_node) {
1698 if ((link->flags & ~DL_FLAG_INFERRED) ==
1699 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
1700 continue;
1701
1702 if (!fw_devlink_relax_cycle(link->consumer, sup))
1703 continue;
1704
1705 ret = 1;
1706
1707 fw_devlink_relax_link(link);
1708 }
1709 return ret;
1710}
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732static int fw_devlink_create_devlink(struct device *con,
1733 struct fwnode_handle *sup_handle, u32 flags)
1734{
1735 struct device *sup_dev;
1736 int ret = 0;
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753 if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
1754 fwnode_is_ancestor_of(sup_handle, con->fwnode))
1755 return -EINVAL;
1756
1757 sup_dev = get_dev_from_fwnode(sup_handle);
1758 if (sup_dev) {
1759
1760
1761
1762
1763
1764 if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
1765 sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
1766 ret = -EINVAL;
1767 goto out;
1768 }
1769
1770
1771
1772
1773
1774 if (!device_link_add(con, sup_dev, flags) &&
1775 !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
1776 dev_info(con, "Fixing up cyclic dependency with %s\n",
1777 dev_name(sup_dev));
1778 device_links_write_lock();
1779 fw_devlink_relax_cycle(con, sup_dev);
1780 device_links_write_unlock();
1781 device_link_add(con, sup_dev,
1782 FW_DEVLINK_FLAGS_PERMISSIVE);
1783 ret = -EINVAL;
1784 }
1785
1786 goto out;
1787 }
1788
1789
1790 if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
1791 return -EINVAL;
1792
1793
1794
1795
1796
1797
1798 if (flags & DL_FLAG_SYNC_STATE_ONLY)
1799 return -EAGAIN;
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815 sup_dev = fwnode_get_next_parent_dev(sup_handle);
1816 if (sup_dev && device_is_dependent(con, sup_dev)) {
1817 dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
1818 sup_handle, dev_name(sup_dev));
1819 device_links_write_lock();
1820 fw_devlink_relax_cycle(con, sup_dev);
1821 device_links_write_unlock();
1822 ret = -EINVAL;
1823 } else {
1824
1825
1826
1827
1828 ret = -EAGAIN;
1829 }
1830
1831out:
1832 put_device(sup_dev);
1833 return ret;
1834}
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852static void __fw_devlink_link_to_consumers(struct device *dev)
1853{
1854 struct fwnode_handle *fwnode = dev->fwnode;
1855 struct fwnode_link *link, *tmp;
1856
1857 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
1858 u32 dl_flags = fw_devlink_get_flags();
1859 struct device *con_dev;
1860 bool own_link = true;
1861 int ret;
1862
1863 con_dev = get_dev_from_fwnode(link->consumer);
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874 if (!con_dev) {
1875 con_dev = fwnode_get_next_parent_dev(link->consumer);
1876
1877
1878
1879
1880
1881
1882 if (con_dev &&
1883 fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) {
1884 put_device(con_dev);
1885 con_dev = NULL;
1886 } else {
1887 own_link = false;
1888 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1889 }
1890 }
1891
1892 if (!con_dev)
1893 continue;
1894
1895 ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags);
1896 put_device(con_dev);
1897 if (!own_link || ret == -EAGAIN)
1898 continue;
1899
1900 __fwnode_link_del(link);
1901 }
1902}
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930static void __fw_devlink_link_to_suppliers(struct device *dev,
1931 struct fwnode_handle *fwnode)
1932{
1933 bool own_link = (dev->fwnode == fwnode);
1934 struct fwnode_link *link, *tmp;
1935 struct fwnode_handle *child = NULL;
1936 u32 dl_flags;
1937
1938 if (own_link)
1939 dl_flags = fw_devlink_get_flags();
1940 else
1941 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1942
1943 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
1944 int ret;
1945 struct device *sup_dev;
1946 struct fwnode_handle *sup = link->supplier;
1947
1948 ret = fw_devlink_create_devlink(dev, sup, dl_flags);
1949 if (!own_link || ret == -EAGAIN)
1950 continue;
1951
1952 __fwnode_link_del(link);
1953
1954
1955 if (ret)
1956 continue;
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971 sup_dev = get_dev_from_fwnode(sup);
1972 __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode);
1973 put_device(sup_dev);
1974 }
1975
1976
1977
1978
1979
1980
1981
1982 while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1983 __fw_devlink_link_to_suppliers(dev, child);
1984}
1985
1986static void fw_devlink_link_device(struct device *dev)
1987{
1988 struct fwnode_handle *fwnode = dev->fwnode;
1989
1990 if (!fw_devlink_flags)
1991 return;
1992
1993 fw_devlink_parse_fwtree(fwnode);
1994
1995 mutex_lock(&fwnode_link_lock);
1996 __fw_devlink_link_to_consumers(dev);
1997 __fw_devlink_link_to_suppliers(dev, fwnode);
1998 mutex_unlock(&fwnode_link_lock);
1999}
2000
2001
2002
2003int (*platform_notify)(struct device *dev) = NULL;
2004int (*platform_notify_remove)(struct device *dev) = NULL;
2005static struct kobject *dev_kobj;
2006struct kobject *sysfs_dev_char_kobj;
2007struct kobject *sysfs_dev_block_kobj;
2008
2009static DEFINE_MUTEX(device_hotplug_lock);
2010
2011void lock_device_hotplug(void)
2012{
2013 mutex_lock(&device_hotplug_lock);
2014}
2015
2016void unlock_device_hotplug(void)
2017{
2018 mutex_unlock(&device_hotplug_lock);
2019}
2020
2021int lock_device_hotplug_sysfs(void)
2022{
2023 if (mutex_trylock(&device_hotplug_lock))
2024 return 0;
2025
2026
2027 msleep(5);
2028 return restart_syscall();
2029}
2030
2031#ifdef CONFIG_BLOCK
2032static inline int device_is_not_partition(struct device *dev)
2033{
2034 return !(dev->type == &part_type);
2035}
2036#else
2037static inline int device_is_not_partition(struct device *dev)
2038{
2039 return 1;
2040}
2041#endif
2042
2043static void device_platform_notify(struct device *dev)
2044{
2045 acpi_device_notify(dev);
2046
2047 software_node_notify(dev);
2048
2049 if (platform_notify)
2050 platform_notify(dev);
2051}
2052
2053static void device_platform_notify_remove(struct device *dev)
2054{
2055 acpi_device_notify_remove(dev);
2056
2057 software_node_notify_remove(dev);
2058
2059 if (platform_notify_remove)
2060 platform_notify_remove(dev);
2061}
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072const char *dev_driver_string(const struct device *dev)
2073{
2074 struct device_driver *drv;
2075
2076
2077
2078
2079
2080 drv = READ_ONCE(dev->driver);
2081 return drv ? drv->name : dev_bus_name(dev);
2082}
2083EXPORT_SYMBOL(dev_driver_string);
2084
2085#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
2086
2087static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
2088 char *buf)
2089{
2090 struct device_attribute *dev_attr = to_dev_attr(attr);
2091 struct device *dev = kobj_to_dev(kobj);
2092 ssize_t ret = -EIO;
2093
2094 if (dev_attr->show)
2095 ret = dev_attr->show(dev, dev_attr, buf);
2096 if (ret >= (ssize_t)PAGE_SIZE) {
2097 printk("dev_attr_show: %pS returned bad count\n",
2098 dev_attr->show);
2099 }
2100 return ret;
2101}
2102
2103static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
2104 const char *buf, size_t count)
2105{
2106 struct device_attribute *dev_attr = to_dev_attr(attr);
2107 struct device *dev = kobj_to_dev(kobj);
2108 ssize_t ret = -EIO;
2109
2110 if (dev_attr->store)
2111 ret = dev_attr->store(dev, dev_attr, buf, count);
2112 return ret;
2113}
2114
2115static const struct sysfs_ops dev_sysfs_ops = {
2116 .show = dev_attr_show,
2117 .store = dev_attr_store,
2118};
2119
2120#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
2121
2122ssize_t device_store_ulong(struct device *dev,
2123 struct device_attribute *attr,
2124 const char *buf, size_t size)
2125{
2126 struct dev_ext_attribute *ea = to_ext_attr(attr);
2127 int ret;
2128 unsigned long new;
2129
2130 ret = kstrtoul(buf, 0, &new);
2131 if (ret)
2132 return ret;
2133 *(unsigned long *)(ea->var) = new;
2134
2135 return size;
2136}
2137EXPORT_SYMBOL_GPL(device_store_ulong);
2138
2139ssize_t device_show_ulong(struct device *dev,
2140 struct device_attribute *attr,
2141 char *buf)
2142{
2143 struct dev_ext_attribute *ea = to_ext_attr(attr);
2144 return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
2145}
2146EXPORT_SYMBOL_GPL(device_show_ulong);
2147
2148ssize_t device_store_int(struct device *dev,
2149 struct device_attribute *attr,
2150 const char *buf, size_t size)
2151{
2152 struct dev_ext_attribute *ea = to_ext_attr(attr);
2153 int ret;
2154 long new;
2155
2156 ret = kstrtol(buf, 0, &new);
2157 if (ret)
2158 return ret;
2159
2160 if (new > INT_MAX || new < INT_MIN)
2161 return -EINVAL;
2162 *(int *)(ea->var) = new;
2163
2164 return size;
2165}
2166EXPORT_SYMBOL_GPL(device_store_int);
2167
2168ssize_t device_show_int(struct device *dev,
2169 struct device_attribute *attr,
2170 char *buf)
2171{
2172 struct dev_ext_attribute *ea = to_ext_attr(attr);
2173
2174 return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
2175}
2176EXPORT_SYMBOL_GPL(device_show_int);
2177
2178ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
2179 const char *buf, size_t size)
2180{
2181 struct dev_ext_attribute *ea = to_ext_attr(attr);
2182
2183 if (strtobool(buf, ea->var) < 0)
2184 return -EINVAL;
2185
2186 return size;
2187}
2188EXPORT_SYMBOL_GPL(device_store_bool);
2189
2190ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
2191 char *buf)
2192{
2193 struct dev_ext_attribute *ea = to_ext_attr(attr);
2194
2195 return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
2196}
2197EXPORT_SYMBOL_GPL(device_show_bool);
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207static void device_release(struct kobject *kobj)
2208{
2209 struct device *dev = kobj_to_dev(kobj);
2210 struct device_private *p = dev->p;
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221 devres_release_all(dev);
2222
2223 kfree(dev->dma_range_map);
2224
2225 if (dev->release)
2226 dev->release(dev);
2227 else if (dev->type && dev->type->release)
2228 dev->type->release(dev);
2229 else if (dev->class && dev->class->dev_release)
2230 dev->class->dev_release(dev);
2231 else
2232 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
2233 dev_name(dev));
2234 kfree(p);
2235}
2236
2237static const void *device_namespace(struct kobject *kobj)
2238{
2239 struct device *dev = kobj_to_dev(kobj);
2240 const void *ns = NULL;
2241
2242 if (dev->class && dev->class->ns_type)
2243 ns = dev->class->namespace(dev);
2244
2245 return ns;
2246}
2247
2248static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
2249{
2250 struct device *dev = kobj_to_dev(kobj);
2251
2252 if (dev->class && dev->class->get_ownership)
2253 dev->class->get_ownership(dev, uid, gid);
2254}
2255
2256static struct kobj_type device_ktype = {
2257 .release = device_release,
2258 .sysfs_ops = &dev_sysfs_ops,
2259 .namespace = device_namespace,
2260 .get_ownership = device_get_ownership,
2261};
2262
2263
2264static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
2265{
2266 struct kobj_type *ktype = get_ktype(kobj);
2267
2268 if (ktype == &device_ktype) {
2269 struct device *dev = kobj_to_dev(kobj);
2270 if (dev->bus)
2271 return 1;
2272 if (dev->class)
2273 return 1;
2274 }
2275 return 0;
2276}
2277
2278static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
2279{
2280 struct device *dev = kobj_to_dev(kobj);
2281
2282 if (dev->bus)
2283 return dev->bus->name;
2284 if (dev->class)
2285 return dev->class->name;
2286 return NULL;
2287}
2288
2289static int dev_uevent(struct kset *kset, struct kobject *kobj,
2290 struct kobj_uevent_env *env)
2291{
2292 struct device *dev = kobj_to_dev(kobj);
2293 int retval = 0;
2294
2295
2296 if (MAJOR(dev->devt)) {
2297 const char *tmp;
2298 const char *name;
2299 umode_t mode = 0;
2300 kuid_t uid = GLOBAL_ROOT_UID;
2301 kgid_t gid = GLOBAL_ROOT_GID;
2302
2303 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
2304 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
2305 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
2306 if (name) {
2307 add_uevent_var(env, "DEVNAME=%s", name);
2308 if (mode)
2309 add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
2310 if (!uid_eq(uid, GLOBAL_ROOT_UID))
2311 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
2312 if (!gid_eq(gid, GLOBAL_ROOT_GID))
2313 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
2314 kfree(tmp);
2315 }
2316 }
2317
2318 if (dev->type && dev->type->name)
2319 add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
2320
2321 if (dev->driver)
2322 add_uevent_var(env, "DRIVER=%s", dev->driver->name);
2323
2324
2325 of_device_uevent(dev, env);
2326
2327
2328 if (dev->bus && dev->bus->uevent) {
2329 retval = dev->bus->uevent(dev, env);
2330 if (retval)
2331 pr_debug("device: '%s': %s: bus uevent() returned %d\n",
2332 dev_name(dev), __func__, retval);
2333 }
2334
2335
2336 if (dev->class && dev->class->dev_uevent) {
2337 retval = dev->class->dev_uevent(dev, env);
2338 if (retval)
2339 pr_debug("device: '%s': %s: class uevent() "
2340 "returned %d\n", dev_name(dev),
2341 __func__, retval);
2342 }
2343
2344
2345 if (dev->type && dev->type->uevent) {
2346 retval = dev->type->uevent(dev, env);
2347 if (retval)
2348 pr_debug("device: '%s': %s: dev_type uevent() "
2349 "returned %d\n", dev_name(dev),
2350 __func__, retval);
2351 }
2352
2353 return retval;
2354}
2355
2356static const struct kset_uevent_ops device_uevent_ops = {
2357 .filter = dev_uevent_filter,
2358 .name = dev_uevent_name,
2359 .uevent = dev_uevent,
2360};
2361
2362static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
2363 char *buf)
2364{
2365 struct kobject *top_kobj;
2366 struct kset *kset;
2367 struct kobj_uevent_env *env = NULL;
2368 int i;
2369 int len = 0;
2370 int retval;
2371
2372
2373 top_kobj = &dev->kobj;
2374 while (!top_kobj->kset && top_kobj->parent)
2375 top_kobj = top_kobj->parent;
2376 if (!top_kobj->kset)
2377 goto out;
2378
2379 kset = top_kobj->kset;
2380 if (!kset->uevent_ops || !kset->uevent_ops->uevent)
2381 goto out;
2382
2383
2384 if (kset->uevent_ops && kset->uevent_ops->filter)
2385 if (!kset->uevent_ops->filter(kset, &dev->kobj))
2386 goto out;
2387
2388 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
2389 if (!env)
2390 return -ENOMEM;
2391
2392
2393 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
2394 if (retval)
2395 goto out;
2396
2397
2398 for (i = 0; i < env->envp_idx; i++)
2399 len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
2400out:
2401 kfree(env);
2402 return len;
2403}
2404
2405static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
2406 const char *buf, size_t count)
2407{
2408 int rc;
2409
2410 rc = kobject_synth_uevent(&dev->kobj, buf, count);
2411
2412 if (rc) {
2413 dev_err(dev, "uevent: failed to send synthetic uevent\n");
2414 return rc;
2415 }
2416
2417 return count;
2418}
2419static DEVICE_ATTR_RW(uevent);
2420
2421static ssize_t online_show(struct device *dev, struct device_attribute *attr,
2422 char *buf)
2423{
2424 bool val;
2425
2426 device_lock(dev);
2427 val = !dev->offline;
2428 device_unlock(dev);
2429 return sysfs_emit(buf, "%u\n", val);
2430}
2431
2432static ssize_t online_store(struct device *dev, struct device_attribute *attr,
2433 const char *buf, size_t count)
2434{
2435 bool val;
2436 int ret;
2437
2438 ret = strtobool(buf, &val);
2439 if (ret < 0)
2440 return ret;
2441
2442 ret = lock_device_hotplug_sysfs();
2443 if (ret)
2444 return ret;
2445
2446 ret = val ? device_online(dev) : device_offline(dev);
2447 unlock_device_hotplug();
2448 return ret < 0 ? ret : count;
2449}
2450static DEVICE_ATTR_RW(online);
2451
2452static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
2453 char *buf)
2454{
2455 const char *loc;
2456
2457 switch (dev->removable) {
2458 case DEVICE_REMOVABLE:
2459 loc = "removable";
2460 break;
2461 case DEVICE_FIXED:
2462 loc = "fixed";
2463 break;
2464 default:
2465 loc = "unknown";
2466 }
2467 return sysfs_emit(buf, "%s\n", loc);
2468}
2469static DEVICE_ATTR_RO(removable);
2470
2471int device_add_groups(struct device *dev, const struct attribute_group **groups)
2472{
2473 return sysfs_create_groups(&dev->kobj, groups);
2474}
2475EXPORT_SYMBOL_GPL(device_add_groups);
2476
2477void device_remove_groups(struct device *dev,
2478 const struct attribute_group **groups)
2479{
2480 sysfs_remove_groups(&dev->kobj, groups);
2481}
2482EXPORT_SYMBOL_GPL(device_remove_groups);
2483
2484union device_attr_group_devres {
2485 const struct attribute_group *group;
2486 const struct attribute_group **groups;
2487};
2488
2489static int devm_attr_group_match(struct device *dev, void *res, void *data)
2490{
2491 return ((union device_attr_group_devres *)res)->group == data;
2492}
2493
2494static void devm_attr_group_remove(struct device *dev, void *res)
2495{
2496 union device_attr_group_devres *devres = res;
2497 const struct attribute_group *group = devres->group;
2498
2499 dev_dbg(dev, "%s: removing group %p\n", __func__, group);
2500 sysfs_remove_group(&dev->kobj, group);
2501}
2502
2503static void devm_attr_groups_remove(struct device *dev, void *res)
2504{
2505 union device_attr_group_devres *devres = res;
2506 const struct attribute_group **groups = devres->groups;
2507
2508 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
2509 sysfs_remove_groups(&dev->kobj, groups);
2510}
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
2523{
2524 union device_attr_group_devres *devres;
2525 int error;
2526
2527 devres = devres_alloc(devm_attr_group_remove,
2528 sizeof(*devres), GFP_KERNEL);
2529 if (!devres)
2530 return -ENOMEM;
2531
2532 error = sysfs_create_group(&dev->kobj, grp);
2533 if (error) {
2534 devres_free(devres);
2535 return error;
2536 }
2537
2538 devres->group = grp;
2539 devres_add(dev, devres);
2540 return 0;
2541}
2542EXPORT_SYMBOL_GPL(devm_device_add_group);
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552void devm_device_remove_group(struct device *dev,
2553 const struct attribute_group *grp)
2554{
2555 WARN_ON(devres_release(dev, devm_attr_group_remove,
2556 devm_attr_group_match,
2557 (void *)grp));
2558}
2559EXPORT_SYMBOL_GPL(devm_device_remove_group);
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574int devm_device_add_groups(struct device *dev,
2575 const struct attribute_group **groups)
2576{
2577 union device_attr_group_devres *devres;
2578 int error;
2579
2580 devres = devres_alloc(devm_attr_groups_remove,
2581 sizeof(*devres), GFP_KERNEL);
2582 if (!devres)
2583 return -ENOMEM;
2584
2585 error = sysfs_create_groups(&dev->kobj, groups);
2586 if (error) {
2587 devres_free(devres);
2588 return error;
2589 }
2590
2591 devres->groups = groups;
2592 devres_add(dev, devres);
2593 return 0;
2594}
2595EXPORT_SYMBOL_GPL(devm_device_add_groups);
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605void devm_device_remove_groups(struct device *dev,
2606 const struct attribute_group **groups)
2607{
2608 WARN_ON(devres_release(dev, devm_attr_groups_remove,
2609 devm_attr_group_match,
2610 (void *)groups));
2611}
2612EXPORT_SYMBOL_GPL(devm_device_remove_groups);
2613
2614static int device_add_attrs(struct device *dev)
2615{
2616 struct class *class = dev->class;
2617 const struct device_type *type = dev->type;
2618 int error;
2619
2620 if (class) {
2621 error = device_add_groups(dev, class->dev_groups);
2622 if (error)
2623 return error;
2624 }
2625
2626 if (type) {
2627 error = device_add_groups(dev, type->groups);
2628 if (error)
2629 goto err_remove_class_groups;
2630 }
2631
2632 error = device_add_groups(dev, dev->groups);
2633 if (error)
2634 goto err_remove_type_groups;
2635
2636 if (device_supports_offline(dev) && !dev->offline_disabled) {
2637 error = device_create_file(dev, &dev_attr_online);
2638 if (error)
2639 goto err_remove_dev_groups;
2640 }
2641
2642 if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) {
2643 error = device_create_file(dev, &dev_attr_waiting_for_supplier);
2644 if (error)
2645 goto err_remove_dev_online;
2646 }
2647
2648 if (dev_removable_is_valid(dev)) {
2649 error = device_create_file(dev, &dev_attr_removable);
2650 if (error)
2651 goto err_remove_dev_waiting_for_supplier;
2652 }
2653
2654 return 0;
2655
2656 err_remove_dev_waiting_for_supplier:
2657 device_remove_file(dev, &dev_attr_waiting_for_supplier);
2658 err_remove_dev_online:
2659 device_remove_file(dev, &dev_attr_online);
2660 err_remove_dev_groups:
2661 device_remove_groups(dev, dev->groups);
2662 err_remove_type_groups:
2663 if (type)
2664 device_remove_groups(dev, type->groups);
2665 err_remove_class_groups:
2666 if (class)
2667 device_remove_groups(dev, class->dev_groups);
2668
2669 return error;
2670}
2671
2672static void device_remove_attrs(struct device *dev)
2673{
2674 struct class *class = dev->class;
2675 const struct device_type *type = dev->type;
2676
2677 device_remove_file(dev, &dev_attr_removable);
2678 device_remove_file(dev, &dev_attr_waiting_for_supplier);
2679 device_remove_file(dev, &dev_attr_online);
2680 device_remove_groups(dev, dev->groups);
2681
2682 if (type)
2683 device_remove_groups(dev, type->groups);
2684
2685 if (class)
2686 device_remove_groups(dev, class->dev_groups);
2687}
2688
2689static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
2690 char *buf)
2691{
2692 return print_dev_t(buf, dev->devt);
2693}
2694static DEVICE_ATTR_RO(dev);
2695
2696
2697struct kset *devices_kset;
2698
2699
2700
2701
2702
2703
2704static void devices_kset_move_before(struct device *deva, struct device *devb)
2705{
2706 if (!devices_kset)
2707 return;
2708 pr_debug("devices_kset: Moving %s before %s\n",
2709 dev_name(deva), dev_name(devb));
2710 spin_lock(&devices_kset->list_lock);
2711 list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
2712 spin_unlock(&devices_kset->list_lock);
2713}
2714
2715
2716
2717
2718
2719
2720static void devices_kset_move_after(struct device *deva, struct device *devb)
2721{
2722 if (!devices_kset)
2723 return;
2724 pr_debug("devices_kset: Moving %s after %s\n",
2725 dev_name(deva), dev_name(devb));
2726 spin_lock(&devices_kset->list_lock);
2727 list_move(&deva->kobj.entry, &devb->kobj.entry);
2728 spin_unlock(&devices_kset->list_lock);
2729}
2730
2731
2732
2733
2734
2735void devices_kset_move_last(struct device *dev)
2736{
2737 if (!devices_kset)
2738 return;
2739 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
2740 spin_lock(&devices_kset->list_lock);
2741 list_move_tail(&dev->kobj.entry, &devices_kset->list);
2742 spin_unlock(&devices_kset->list_lock);
2743}
2744
2745
2746
2747
2748
2749
2750int device_create_file(struct device *dev,
2751 const struct device_attribute *attr)
2752{
2753 int error = 0;
2754
2755 if (dev) {
2756 WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
2757 "Attribute %s: write permission without 'store'\n",
2758 attr->attr.name);
2759 WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
2760 "Attribute %s: read permission without 'show'\n",
2761 attr->attr.name);
2762 error = sysfs_create_file(&dev->kobj, &attr->attr);
2763 }
2764
2765 return error;
2766}
2767EXPORT_SYMBOL_GPL(device_create_file);
2768
2769
2770
2771
2772
2773
2774void device_remove_file(struct device *dev,
2775 const struct device_attribute *attr)
2776{
2777 if (dev)
2778 sysfs_remove_file(&dev->kobj, &attr->attr);
2779}
2780EXPORT_SYMBOL_GPL(device_remove_file);
2781
2782
2783
2784
2785
2786
2787
2788
2789bool device_remove_file_self(struct device *dev,
2790 const struct device_attribute *attr)
2791{
2792 if (dev)
2793 return sysfs_remove_file_self(&dev->kobj, &attr->attr);
2794 else
2795 return false;
2796}
2797EXPORT_SYMBOL_GPL(device_remove_file_self);
2798
2799
2800
2801
2802
2803
2804int device_create_bin_file(struct device *dev,
2805 const struct bin_attribute *attr)
2806{
2807 int error = -EINVAL;
2808 if (dev)
2809 error = sysfs_create_bin_file(&dev->kobj, attr);
2810 return error;
2811}
2812EXPORT_SYMBOL_GPL(device_create_bin_file);
2813
2814
2815
2816
2817
2818
2819void device_remove_bin_file(struct device *dev,
2820 const struct bin_attribute *attr)
2821{
2822 if (dev)
2823 sysfs_remove_bin_file(&dev->kobj, attr);
2824}
2825EXPORT_SYMBOL_GPL(device_remove_bin_file);
2826
2827static void klist_children_get(struct klist_node *n)
2828{
2829 struct device_private *p = to_device_private_parent(n);
2830 struct device *dev = p->device;
2831
2832 get_device(dev);
2833}
2834
2835static void klist_children_put(struct klist_node *n)
2836{
2837 struct device_private *p = to_device_private_parent(n);
2838 struct device *dev = p->device;
2839
2840 put_device(dev);
2841}
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863void device_initialize(struct device *dev)
2864{
2865 dev->kobj.kset = devices_kset;
2866 kobject_init(&dev->kobj, &device_ktype);
2867 INIT_LIST_HEAD(&dev->dma_pools);
2868 mutex_init(&dev->mutex);
2869#ifdef CONFIG_PROVE_LOCKING
2870 mutex_init(&dev->lockdep_mutex);
2871#endif
2872 lockdep_set_novalidate_class(&dev->mutex);
2873 spin_lock_init(&dev->devres_lock);
2874 INIT_LIST_HEAD(&dev->devres_head);
2875 device_pm_init(dev);
2876 set_dev_node(dev, NUMA_NO_NODE);
2877#ifdef CONFIG_GENERIC_MSI_IRQ
2878 raw_spin_lock_init(&dev->msi_lock);
2879 INIT_LIST_HEAD(&dev->msi_list);
2880#endif
2881 INIT_LIST_HEAD(&dev->links.consumers);
2882 INIT_LIST_HEAD(&dev->links.suppliers);
2883 INIT_LIST_HEAD(&dev->links.defer_sync);
2884 dev->links.status = DL_DEV_NO_DRIVER;
2885#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
2886 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
2887 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
2888 dev->dma_coherent = dma_default_coherent;
2889#endif
2890#ifdef CONFIG_SWIOTLB
2891 dev->dma_io_tlb_mem = &io_tlb_default_mem;
2892#endif
2893}
2894EXPORT_SYMBOL_GPL(device_initialize);
2895
2896struct kobject *virtual_device_parent(struct device *dev)
2897{
2898 static struct kobject *virtual_dir = NULL;
2899
2900 if (!virtual_dir)
2901 virtual_dir = kobject_create_and_add("virtual",
2902 &devices_kset->kobj);
2903
2904 return virtual_dir;
2905}
2906
2907struct class_dir {
2908 struct kobject kobj;
2909 struct class *class;
2910};
2911
2912#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
2913
2914static void class_dir_release(struct kobject *kobj)
2915{
2916 struct class_dir *dir = to_class_dir(kobj);
2917 kfree(dir);
2918}
2919
2920static const
2921struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
2922{
2923 struct class_dir *dir = to_class_dir(kobj);
2924 return dir->class->ns_type;
2925}
2926
2927static struct kobj_type class_dir_ktype = {
2928 .release = class_dir_release,
2929 .sysfs_ops = &kobj_sysfs_ops,
2930 .child_ns_type = class_dir_child_ns_type
2931};
2932
2933static struct kobject *
2934class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
2935{
2936 struct class_dir *dir;
2937 int retval;
2938
2939 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
2940 if (!dir)
2941 return ERR_PTR(-ENOMEM);
2942
2943 dir->class = class;
2944 kobject_init(&dir->kobj, &class_dir_ktype);
2945
2946 dir->kobj.kset = &class->p->glue_dirs;
2947
2948 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
2949 if (retval < 0) {
2950 kobject_put(&dir->kobj);
2951 return ERR_PTR(retval);
2952 }
2953 return &dir->kobj;
2954}
2955
2956static DEFINE_MUTEX(gdp_mutex);
2957
2958static struct kobject *get_device_parent(struct device *dev,
2959 struct device *parent)
2960{
2961 if (dev->class) {
2962 struct kobject *kobj = NULL;
2963 struct kobject *parent_kobj;
2964 struct kobject *k;
2965
2966#ifdef CONFIG_BLOCK
2967
2968 if (sysfs_deprecated && dev->class == &block_class) {
2969 if (parent && parent->class == &block_class)
2970 return &parent->kobj;
2971 return &block_class.p->subsys.kobj;
2972 }
2973#endif
2974
2975
2976
2977
2978
2979
2980 if (parent == NULL)
2981 parent_kobj = virtual_device_parent(dev);
2982 else if (parent->class && !dev->class->ns_type)
2983 return &parent->kobj;
2984 else
2985 parent_kobj = &parent->kobj;
2986
2987 mutex_lock(&gdp_mutex);
2988
2989
2990 spin_lock(&dev->class->p->glue_dirs.list_lock);
2991 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
2992 if (k->parent == parent_kobj) {
2993 kobj = kobject_get(k);
2994 break;
2995 }
2996 spin_unlock(&dev->class->p->glue_dirs.list_lock);
2997 if (kobj) {
2998 mutex_unlock(&gdp_mutex);
2999 return kobj;
3000 }
3001
3002
3003 k = class_dir_create_and_add(dev->class, parent_kobj);
3004
3005 mutex_unlock(&gdp_mutex);
3006 return k;
3007 }
3008
3009
3010 if (!parent && dev->bus && dev->bus->dev_root)
3011 return &dev->bus->dev_root->kobj;
3012
3013 if (parent)
3014 return &parent->kobj;
3015 return NULL;
3016}
3017
3018static inline bool live_in_glue_dir(struct kobject *kobj,
3019 struct device *dev)
3020{
3021 if (!kobj || !dev->class ||
3022 kobj->kset != &dev->class->p->glue_dirs)
3023 return false;
3024 return true;
3025}
3026
3027static inline struct kobject *get_glue_dir(struct device *dev)
3028{
3029 return dev->kobj.parent;
3030}
3031
3032
3033
3034
3035
3036
3037static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
3038{
3039 unsigned int ref;
3040
3041
3042 if (!live_in_glue_dir(glue_dir, dev))
3043 return;
3044
3045 mutex_lock(&gdp_mutex);
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094 ref = kref_read(&glue_dir->kref);
3095 if (!kobject_has_children(glue_dir) && !--ref)
3096 kobject_del(glue_dir);
3097 kobject_put(glue_dir);
3098 mutex_unlock(&gdp_mutex);
3099}
3100
3101static int device_add_class_symlinks(struct device *dev)
3102{
3103 struct device_node *of_node = dev_of_node(dev);
3104 int error;
3105
3106 if (of_node) {
3107 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
3108 if (error)
3109 dev_warn(dev, "Error %d creating of_node link\n",error);
3110
3111 }
3112
3113 if (!dev->class)
3114 return 0;
3115
3116 error = sysfs_create_link(&dev->kobj,
3117 &dev->class->p->subsys.kobj,
3118 "subsystem");
3119 if (error)
3120 goto out_devnode;
3121
3122 if (dev->parent && device_is_not_partition(dev)) {
3123 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
3124 "device");
3125 if (error)
3126 goto out_subsys;
3127 }
3128
3129#ifdef CONFIG_BLOCK
3130
3131 if (sysfs_deprecated && dev->class == &block_class)
3132 return 0;
3133#endif
3134
3135
3136 error = sysfs_create_link(&dev->class->p->subsys.kobj,
3137 &dev->kobj, dev_name(dev));
3138 if (error)
3139 goto out_device;
3140
3141 return 0;
3142
3143out_device:
3144 sysfs_remove_link(&dev->kobj, "device");
3145
3146out_subsys:
3147 sysfs_remove_link(&dev->kobj, "subsystem");
3148out_devnode:
3149 sysfs_remove_link(&dev->kobj, "of_node");
3150 return error;
3151}
3152
3153static void device_remove_class_symlinks(struct device *dev)
3154{
3155 if (dev_of_node(dev))
3156 sysfs_remove_link(&dev->kobj, "of_node");
3157
3158 if (!dev->class)
3159 return;
3160
3161 if (dev->parent && device_is_not_partition(dev))
3162 sysfs_remove_link(&dev->kobj, "device");
3163 sysfs_remove_link(&dev->kobj, "subsystem");
3164#ifdef CONFIG_BLOCK
3165 if (sysfs_deprecated && dev->class == &block_class)
3166 return;
3167#endif
3168 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
3169}
3170
3171
3172
3173
3174
3175
3176int dev_set_name(struct device *dev, const char *fmt, ...)
3177{
3178 va_list vargs;
3179 int err;
3180
3181 va_start(vargs, fmt);
3182 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
3183 va_end(vargs);
3184 return err;
3185}
3186EXPORT_SYMBOL_GPL(dev_set_name);
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199static struct kobject *device_to_dev_kobj(struct device *dev)
3200{
3201 struct kobject *kobj;
3202
3203 if (dev->class)
3204 kobj = dev->class->dev_kobj;
3205 else
3206 kobj = sysfs_dev_char_kobj;
3207
3208 return kobj;
3209}
3210
3211static int device_create_sys_dev_entry(struct device *dev)
3212{
3213 struct kobject *kobj = device_to_dev_kobj(dev);
3214 int error = 0;
3215 char devt_str[15];
3216
3217 if (kobj) {
3218 format_dev_t(devt_str, dev->devt);
3219 error = sysfs_create_link(kobj, &dev->kobj, devt_str);
3220 }
3221
3222 return error;
3223}
3224
3225static void device_remove_sys_dev_entry(struct device *dev)
3226{
3227 struct kobject *kobj = device_to_dev_kobj(dev);
3228 char devt_str[15];
3229
3230 if (kobj) {
3231 format_dev_t(devt_str, dev->devt);
3232 sysfs_remove_link(kobj, devt_str);
3233 }
3234}
3235
3236static int device_private_init(struct device *dev)
3237{
3238 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
3239 if (!dev->p)
3240 return -ENOMEM;
3241 dev->p->device = dev;
3242 klist_init(&dev->p->klist_children, klist_children_get,
3243 klist_children_put);
3244 INIT_LIST_HEAD(&dev->p->deferred_probe);
3245 return 0;
3246}
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275int device_add(struct device *dev)
3276{
3277 struct device *parent;
3278 struct kobject *kobj;
3279 struct class_interface *class_intf;
3280 int error = -EINVAL;
3281 struct kobject *glue_dir = NULL;
3282
3283 dev = get_device(dev);
3284 if (!dev)
3285 goto done;
3286
3287 if (!dev->p) {
3288 error = device_private_init(dev);
3289 if (error)
3290 goto done;
3291 }
3292
3293
3294
3295
3296
3297
3298 if (dev->init_name) {
3299 dev_set_name(dev, "%s", dev->init_name);
3300 dev->init_name = NULL;
3301 }
3302
3303
3304 if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
3305 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
3306
3307 if (!dev_name(dev)) {
3308 error = -EINVAL;
3309 goto name_error;
3310 }
3311
3312 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3313
3314 parent = get_device(dev->parent);
3315 kobj = get_device_parent(dev, parent);
3316 if (IS_ERR(kobj)) {
3317 error = PTR_ERR(kobj);
3318 goto parent_error;
3319 }
3320 if (kobj)
3321 dev->kobj.parent = kobj;
3322
3323
3324 if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
3325 set_dev_node(dev, dev_to_node(parent));
3326
3327
3328
3329 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
3330 if (error) {
3331 glue_dir = get_glue_dir(dev);
3332 goto Error;
3333 }
3334
3335
3336 device_platform_notify(dev);
3337
3338 error = device_create_file(dev, &dev_attr_uevent);
3339 if (error)
3340 goto attrError;
3341
3342 error = device_add_class_symlinks(dev);
3343 if (error)
3344 goto SymlinkError;
3345 error = device_add_attrs(dev);
3346 if (error)
3347 goto AttrsError;
3348 error = bus_add_device(dev);
3349 if (error)
3350 goto BusError;
3351 error = dpm_sysfs_add(dev);
3352 if (error)
3353 goto DPMError;
3354 device_pm_add(dev);
3355
3356 if (MAJOR(dev->devt)) {
3357 error = device_create_file(dev, &dev_attr_dev);
3358 if (error)
3359 goto DevAttrError;
3360
3361 error = device_create_sys_dev_entry(dev);
3362 if (error)
3363 goto SysEntryError;
3364
3365 devtmpfs_create_node(dev);
3366 }
3367
3368
3369
3370
3371 if (dev->bus)
3372 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3373 BUS_NOTIFY_ADD_DEVICE, dev);
3374
3375 kobject_uevent(&dev->kobj, KOBJ_ADD);
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389 if (dev->fwnode && !dev->fwnode->dev) {
3390 dev->fwnode->dev = dev;
3391 fw_devlink_link_device(dev);
3392 }
3393
3394 bus_probe_device(dev);
3395
3396
3397
3398
3399
3400
3401 if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match)
3402 fw_devlink_unblock_consumers(dev);
3403
3404 if (parent)
3405 klist_add_tail(&dev->p->knode_parent,
3406 &parent->p->klist_children);
3407
3408 if (dev->class) {
3409 mutex_lock(&dev->class->p->mutex);
3410
3411 klist_add_tail(&dev->p->knode_class,
3412 &dev->class->p->klist_devices);
3413
3414
3415 list_for_each_entry(class_intf,
3416 &dev->class->p->interfaces, node)
3417 if (class_intf->add_dev)
3418 class_intf->add_dev(dev, class_intf);
3419 mutex_unlock(&dev->class->p->mutex);
3420 }
3421done:
3422 put_device(dev);
3423 return error;
3424 SysEntryError:
3425 if (MAJOR(dev->devt))
3426 device_remove_file(dev, &dev_attr_dev);
3427 DevAttrError:
3428 device_pm_remove(dev);
3429 dpm_sysfs_remove(dev);
3430 DPMError:
3431 bus_remove_device(dev);
3432 BusError:
3433 device_remove_attrs(dev);
3434 AttrsError:
3435 device_remove_class_symlinks(dev);
3436 SymlinkError:
3437 device_remove_file(dev, &dev_attr_uevent);
3438 attrError:
3439 device_platform_notify_remove(dev);
3440 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3441 glue_dir = get_glue_dir(dev);
3442 kobject_del(&dev->kobj);
3443 Error:
3444 cleanup_glue_dir(dev, glue_dir);
3445parent_error:
3446 put_device(parent);
3447name_error:
3448 kfree(dev->p);
3449 dev->p = NULL;
3450 goto done;
3451}
3452EXPORT_SYMBOL_GPL(device_add);
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472int device_register(struct device *dev)
3473{
3474 device_initialize(dev);
3475 return device_add(dev);
3476}
3477EXPORT_SYMBOL_GPL(device_register);
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487struct device *get_device(struct device *dev)
3488{
3489 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
3490}
3491EXPORT_SYMBOL_GPL(get_device);
3492
3493
3494
3495
3496
3497void put_device(struct device *dev)
3498{
3499
3500 if (dev)
3501 kobject_put(&dev->kobj);
3502}
3503EXPORT_SYMBOL_GPL(put_device);
3504
3505bool kill_device(struct device *dev)
3506{
3507
3508
3509
3510
3511
3512
3513
3514 device_lock_assert(dev);
3515
3516 if (dev->p->dead)
3517 return false;
3518 dev->p->dead = true;
3519 return true;
3520}
3521EXPORT_SYMBOL_GPL(kill_device);
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536void device_del(struct device *dev)
3537{
3538 struct device *parent = dev->parent;
3539 struct kobject *glue_dir = NULL;
3540 struct class_interface *class_intf;
3541 unsigned int noio_flag;
3542
3543 device_lock(dev);
3544 kill_device(dev);
3545 device_unlock(dev);
3546
3547 if (dev->fwnode && dev->fwnode->dev == dev)
3548 dev->fwnode->dev = NULL;
3549
3550
3551
3552
3553 noio_flag = memalloc_noio_save();
3554 if (dev->bus)
3555 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3556 BUS_NOTIFY_DEL_DEVICE, dev);
3557
3558 dpm_sysfs_remove(dev);
3559 if (parent)
3560 klist_del(&dev->p->knode_parent);
3561 if (MAJOR(dev->devt)) {
3562 devtmpfs_delete_node(dev);
3563 device_remove_sys_dev_entry(dev);
3564 device_remove_file(dev, &dev_attr_dev);
3565 }
3566 if (dev->class) {
3567 device_remove_class_symlinks(dev);
3568
3569 mutex_lock(&dev->class->p->mutex);
3570
3571 list_for_each_entry(class_intf,
3572 &dev->class->p->interfaces, node)
3573 if (class_intf->remove_dev)
3574 class_intf->remove_dev(dev, class_intf);
3575
3576 klist_del(&dev->p->knode_class);
3577 mutex_unlock(&dev->class->p->mutex);
3578 }
3579 device_remove_file(dev, &dev_attr_uevent);
3580 device_remove_attrs(dev);
3581 bus_remove_device(dev);
3582 device_pm_remove(dev);
3583 driver_deferred_probe_del(dev);
3584 device_platform_notify_remove(dev);
3585 device_remove_properties(dev);
3586 device_links_purge(dev);
3587
3588 if (dev->bus)
3589 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3590 BUS_NOTIFY_REMOVED_DEVICE, dev);
3591 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3592 glue_dir = get_glue_dir(dev);
3593 kobject_del(&dev->kobj);
3594 cleanup_glue_dir(dev, glue_dir);
3595 memalloc_noio_restore(noio_flag);
3596 put_device(parent);
3597}
3598EXPORT_SYMBOL_GPL(device_del);
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611void device_unregister(struct device *dev)
3612{
3613 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3614 device_del(dev);
3615 put_device(dev);
3616}
3617EXPORT_SYMBOL_GPL(device_unregister);
3618
3619static struct device *prev_device(struct klist_iter *i)
3620{
3621 struct klist_node *n = klist_prev(i);
3622 struct device *dev = NULL;
3623 struct device_private *p;
3624
3625 if (n) {
3626 p = to_device_private_parent(n);
3627 dev = p->device;
3628 }
3629 return dev;
3630}
3631
3632static struct device *next_device(struct klist_iter *i)
3633{
3634 struct klist_node *n = klist_next(i);
3635 struct device *dev = NULL;
3636 struct device_private *p;
3637
3638 if (n) {
3639 p = to_device_private_parent(n);
3640 dev = p->device;
3641 }
3642 return dev;
3643}
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658const char *device_get_devnode(struct device *dev,
3659 umode_t *mode, kuid_t *uid, kgid_t *gid,
3660 const char **tmp)
3661{
3662 char *s;
3663
3664 *tmp = NULL;
3665
3666
3667 if (dev->type && dev->type->devnode)
3668 *tmp = dev->type->devnode(dev, mode, uid, gid);
3669 if (*tmp)
3670 return *tmp;
3671
3672
3673 if (dev->class && dev->class->devnode)
3674 *tmp = dev->class->devnode(dev, mode);
3675 if (*tmp)
3676 return *tmp;
3677
3678
3679 if (strchr(dev_name(dev), '!') == NULL)
3680 return dev_name(dev);
3681
3682
3683 s = kstrdup(dev_name(dev), GFP_KERNEL);
3684 if (!s)
3685 return NULL;
3686 strreplace(s, '!', '/');
3687 return *tmp = s;
3688}
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702int device_for_each_child(struct device *parent, void *data,
3703 int (*fn)(struct device *dev, void *data))
3704{
3705 struct klist_iter i;
3706 struct device *child;
3707 int error = 0;
3708
3709 if (!parent->p)
3710 return 0;
3711
3712 klist_iter_init(&parent->p->klist_children, &i);
3713 while (!error && (child = next_device(&i)))
3714 error = fn(child, data);
3715 klist_iter_exit(&i);
3716 return error;
3717}
3718EXPORT_SYMBOL_GPL(device_for_each_child);
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732int device_for_each_child_reverse(struct device *parent, void *data,
3733 int (*fn)(struct device *dev, void *data))
3734{
3735 struct klist_iter i;
3736 struct device *child;
3737 int error = 0;
3738
3739 if (!parent->p)
3740 return 0;
3741
3742 klist_iter_init(&parent->p->klist_children, &i);
3743 while ((child = prev_device(&i)) && !error)
3744 error = fn(child, data);
3745 klist_iter_exit(&i);
3746 return error;
3747}
3748EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767struct device *device_find_child(struct device *parent, void *data,
3768 int (*match)(struct device *dev, void *data))
3769{
3770 struct klist_iter i;
3771 struct device *child;
3772
3773 if (!parent)
3774 return NULL;
3775
3776 klist_iter_init(&parent->p->klist_children, &i);
3777 while ((child = next_device(&i)))
3778 if (match(child, data) && get_device(child))
3779 break;
3780 klist_iter_exit(&i);
3781 return child;
3782}
3783EXPORT_SYMBOL_GPL(device_find_child);
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795struct device *device_find_child_by_name(struct device *parent,
3796 const char *name)
3797{
3798 struct klist_iter i;
3799 struct device *child;
3800
3801 if (!parent)
3802 return NULL;
3803
3804 klist_iter_init(&parent->p->klist_children, &i);
3805 while ((child = next_device(&i)))
3806 if (sysfs_streq(dev_name(child), name) && get_device(child))
3807 break;
3808 klist_iter_exit(&i);
3809 return child;
3810}
3811EXPORT_SYMBOL_GPL(device_find_child_by_name);
3812
3813int __init devices_init(void)
3814{
3815 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
3816 if (!devices_kset)
3817 return -ENOMEM;
3818 dev_kobj = kobject_create_and_add("dev", NULL);
3819 if (!dev_kobj)
3820 goto dev_kobj_err;
3821 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
3822 if (!sysfs_dev_block_kobj)
3823 goto block_kobj_err;
3824 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
3825 if (!sysfs_dev_char_kobj)
3826 goto char_kobj_err;
3827
3828 return 0;
3829
3830 char_kobj_err:
3831 kobject_put(sysfs_dev_block_kobj);
3832 block_kobj_err:
3833 kobject_put(dev_kobj);
3834 dev_kobj_err:
3835 kset_unregister(devices_kset);
3836 return -ENOMEM;
3837}
3838
3839static int device_check_offline(struct device *dev, void *not_used)
3840{
3841 int ret;
3842
3843 ret = device_for_each_child(dev, NULL, device_check_offline);
3844 if (ret)
3845 return ret;
3846
3847 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
3848}
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861int device_offline(struct device *dev)
3862{
3863 int ret;
3864
3865 if (dev->offline_disabled)
3866 return -EPERM;
3867
3868 ret = device_for_each_child(dev, NULL, device_check_offline);
3869 if (ret)
3870 return ret;
3871
3872 device_lock(dev);
3873 if (device_supports_offline(dev)) {
3874 if (dev->offline) {
3875 ret = 1;
3876 } else {
3877 ret = dev->bus->offline(dev);
3878 if (!ret) {
3879 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
3880 dev->offline = true;
3881 }
3882 }
3883 }
3884 device_unlock(dev);
3885
3886 return ret;
3887}
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899int device_online(struct device *dev)
3900{
3901 int ret = 0;
3902
3903 device_lock(dev);
3904 if (device_supports_offline(dev)) {
3905 if (dev->offline) {
3906 ret = dev->bus->online(dev);
3907 if (!ret) {
3908 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
3909 dev->offline = false;
3910 }
3911 } else {
3912 ret = 1;
3913 }
3914 }
3915 device_unlock(dev);
3916
3917 return ret;
3918}
3919
3920struct root_device {
3921 struct device dev;
3922 struct module *owner;
3923};
3924
3925static inline struct root_device *to_root_device(struct device *d)
3926{
3927 return container_of(d, struct root_device, dev);
3928}
3929
3930static void root_device_release(struct device *dev)
3931{
3932 kfree(to_root_device(dev));
3933}
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957struct device *__root_device_register(const char *name, struct module *owner)
3958{
3959 struct root_device *root;
3960 int err = -ENOMEM;
3961
3962 root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
3963 if (!root)
3964 return ERR_PTR(err);
3965
3966 err = dev_set_name(&root->dev, "%s", name);
3967 if (err) {
3968 kfree(root);
3969 return ERR_PTR(err);
3970 }
3971
3972 root->dev.release = root_device_release;
3973
3974 err = device_register(&root->dev);
3975 if (err) {
3976 put_device(&root->dev);
3977 return ERR_PTR(err);
3978 }
3979
3980#ifdef CONFIG_MODULES
3981 if (owner) {
3982 struct module_kobject *mk = &owner->mkobj;
3983
3984 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
3985 if (err) {
3986 device_unregister(&root->dev);
3987 return ERR_PTR(err);
3988 }
3989 root->owner = owner;
3990 }
3991#endif
3992
3993 return &root->dev;
3994}
3995EXPORT_SYMBOL_GPL(__root_device_register);
3996
3997
3998
3999
4000
4001
4002
4003
4004void root_device_unregister(struct device *dev)
4005{
4006 struct root_device *root = to_root_device(dev);
4007
4008 if (root->owner)
4009 sysfs_remove_link(&root->dev.kobj, "module");
4010
4011 device_unregister(dev);
4012}
4013EXPORT_SYMBOL_GPL(root_device_unregister);
4014
4015
4016static void device_create_release(struct device *dev)
4017{
4018 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
4019 kfree(dev);
4020}
4021
4022static __printf(6, 0) struct device *
4023device_create_groups_vargs(struct class *class, struct device *parent,
4024 dev_t devt, void *drvdata,
4025 const struct attribute_group **groups,
4026 const char *fmt, va_list args)
4027{
4028 struct device *dev = NULL;
4029 int retval = -ENODEV;
4030
4031 if (class == NULL || IS_ERR(class))
4032 goto error;
4033
4034 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
4035 if (!dev) {
4036 retval = -ENOMEM;
4037 goto error;
4038 }
4039
4040 device_initialize(dev);
4041 dev->devt = devt;
4042 dev->class = class;
4043 dev->parent = parent;
4044 dev->groups = groups;
4045 dev->release = device_create_release;
4046 dev_set_drvdata(dev, drvdata);
4047
4048 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
4049 if (retval)
4050 goto error;
4051
4052 retval = device_add(dev);
4053 if (retval)
4054 goto error;
4055
4056 return dev;
4057
4058error:
4059 put_device(dev);
4060 return ERR_PTR(retval);
4061}
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087struct device *device_create(struct class *class, struct device *parent,
4088 dev_t devt, void *drvdata, const char *fmt, ...)
4089{
4090 va_list vargs;
4091 struct device *dev;
4092
4093 va_start(vargs, fmt);
4094 dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
4095 fmt, vargs);
4096 va_end(vargs);
4097 return dev;
4098}
4099EXPORT_SYMBOL_GPL(device_create);
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128struct device *device_create_with_groups(struct class *class,
4129 struct device *parent, dev_t devt,
4130 void *drvdata,
4131 const struct attribute_group **groups,
4132 const char *fmt, ...)
4133{
4134 va_list vargs;
4135 struct device *dev;
4136
4137 va_start(vargs, fmt);
4138 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
4139 fmt, vargs);
4140 va_end(vargs);
4141 return dev;
4142}
4143EXPORT_SYMBOL_GPL(device_create_with_groups);
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153void device_destroy(struct class *class, dev_t devt)
4154{
4155 struct device *dev;
4156
4157 dev = class_find_device_by_devt(class, devt);
4158 if (dev) {
4159 put_device(dev);
4160 device_unregister(dev);
4161 }
4162}
4163EXPORT_SYMBOL_GPL(device_destroy);
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204int device_rename(struct device *dev, const char *new_name)
4205{
4206 struct kobject *kobj = &dev->kobj;
4207 char *old_device_name = NULL;
4208 int error;
4209
4210 dev = get_device(dev);
4211 if (!dev)
4212 return -EINVAL;
4213
4214 dev_dbg(dev, "renaming to %s\n", new_name);
4215
4216 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
4217 if (!old_device_name) {
4218 error = -ENOMEM;
4219 goto out;
4220 }
4221
4222 if (dev->class) {
4223 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
4224 kobj, old_device_name,
4225 new_name, kobject_namespace(kobj));
4226 if (error)
4227 goto out;
4228 }
4229
4230 error = kobject_rename(kobj, new_name);
4231 if (error)
4232 goto out;
4233
4234out:
4235 put_device(dev);
4236
4237 kfree(old_device_name);
4238
4239 return error;
4240}
4241EXPORT_SYMBOL_GPL(device_rename);
4242
4243static int device_move_class_links(struct device *dev,
4244 struct device *old_parent,
4245 struct device *new_parent)
4246{
4247 int error = 0;
4248
4249 if (old_parent)
4250 sysfs_remove_link(&dev->kobj, "device");
4251 if (new_parent)
4252 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
4253 "device");
4254 return error;
4255}
4256
4257
4258
4259
4260
4261
4262
4263int device_move(struct device *dev, struct device *new_parent,
4264 enum dpm_order dpm_order)
4265{
4266 int error;
4267 struct device *old_parent;
4268 struct kobject *new_parent_kobj;
4269
4270 dev = get_device(dev);
4271 if (!dev)
4272 return -EINVAL;
4273
4274 device_pm_lock();
4275 new_parent = get_device(new_parent);
4276 new_parent_kobj = get_device_parent(dev, new_parent);
4277 if (IS_ERR(new_parent_kobj)) {
4278 error = PTR_ERR(new_parent_kobj);
4279 put_device(new_parent);
4280 goto out;
4281 }
4282
4283 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
4284 __func__, new_parent ? dev_name(new_parent) : "<NULL>");
4285 error = kobject_move(&dev->kobj, new_parent_kobj);
4286 if (error) {
4287 cleanup_glue_dir(dev, new_parent_kobj);
4288 put_device(new_parent);
4289 goto out;
4290 }
4291 old_parent = dev->parent;
4292 dev->parent = new_parent;
4293 if (old_parent)
4294 klist_remove(&dev->p->knode_parent);
4295 if (new_parent) {
4296 klist_add_tail(&dev->p->knode_parent,
4297 &new_parent->p->klist_children);
4298 set_dev_node(dev, dev_to_node(new_parent));
4299 }
4300
4301 if (dev->class) {
4302 error = device_move_class_links(dev, old_parent, new_parent);
4303 if (error) {
4304
4305 device_move_class_links(dev, new_parent, old_parent);
4306 if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
4307 if (new_parent)
4308 klist_remove(&dev->p->knode_parent);
4309 dev->parent = old_parent;
4310 if (old_parent) {
4311 klist_add_tail(&dev->p->knode_parent,
4312 &old_parent->p->klist_children);
4313 set_dev_node(dev, dev_to_node(old_parent));
4314 }
4315 }
4316 cleanup_glue_dir(dev, new_parent_kobj);
4317 put_device(new_parent);
4318 goto out;
4319 }
4320 }
4321 switch (dpm_order) {
4322 case DPM_ORDER_NONE:
4323 break;
4324 case DPM_ORDER_DEV_AFTER_PARENT:
4325 device_pm_move_after(dev, new_parent);
4326 devices_kset_move_after(dev, new_parent);
4327 break;
4328 case DPM_ORDER_PARENT_BEFORE_DEV:
4329 device_pm_move_before(new_parent, dev);
4330 devices_kset_move_before(new_parent, dev);
4331 break;
4332 case DPM_ORDER_DEV_LAST:
4333 device_pm_move_last(dev);
4334 devices_kset_move_last(dev);
4335 break;
4336 }
4337
4338 put_device(old_parent);
4339out:
4340 device_pm_unlock();
4341 put_device(dev);
4342 return error;
4343}
4344EXPORT_SYMBOL_GPL(device_move);
4345
4346static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
4347 kgid_t kgid)
4348{
4349 struct kobject *kobj = &dev->kobj;
4350 struct class *class = dev->class;
4351 const struct device_type *type = dev->type;
4352 int error;
4353
4354 if (class) {
4355
4356
4357
4358
4359 error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
4360 kgid);
4361 if (error)
4362 return error;
4363 }
4364
4365 if (type) {
4366
4367
4368
4369
4370 error = sysfs_groups_change_owner(kobj, type->groups, kuid,
4371 kgid);
4372 if (error)
4373 return error;
4374 }
4375
4376
4377 error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
4378 if (error)
4379 return error;
4380
4381 if (device_supports_offline(dev) && !dev->offline_disabled) {
4382
4383 error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
4384 kuid, kgid);
4385 if (error)
4386 return error;
4387 }
4388
4389 return 0;
4390}
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
4405{
4406 int error;
4407 struct kobject *kobj = &dev->kobj;
4408
4409 dev = get_device(dev);
4410 if (!dev)
4411 return -EINVAL;
4412
4413
4414
4415
4416
4417 error = sysfs_change_owner(kobj, kuid, kgid);
4418 if (error)
4419 goto out;
4420
4421
4422
4423
4424
4425
4426 error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
4427 kgid);
4428 if (error)
4429 goto out;
4430
4431
4432
4433
4434
4435
4436 error = device_attrs_change_owner(dev, kuid, kgid);
4437 if (error)
4438 goto out;
4439
4440 error = dpm_sysfs_change_owner(dev, kuid, kgid);
4441 if (error)
4442 goto out;
4443
4444#ifdef CONFIG_BLOCK
4445 if (sysfs_deprecated && dev->class == &block_class)
4446 goto out;
4447#endif
4448
4449
4450
4451
4452
4453
4454
4455 error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj,
4456 dev_name(dev), kuid, kgid);
4457 if (error)
4458 goto out;
4459
4460out:
4461 put_device(dev);
4462 return error;
4463}
4464EXPORT_SYMBOL_GPL(device_change_owner);
4465
4466
4467
4468
4469void device_shutdown(void)
4470{
4471 struct device *dev, *parent;
4472
4473 wait_for_device_probe();
4474 device_block_probing();
4475
4476 cpufreq_suspend();
4477
4478 spin_lock(&devices_kset->list_lock);
4479
4480
4481
4482
4483
4484 while (!list_empty(&devices_kset->list)) {
4485 dev = list_entry(devices_kset->list.prev, struct device,
4486 kobj.entry);
4487
4488
4489
4490
4491
4492
4493 parent = get_device(dev->parent);
4494 get_device(dev);
4495
4496
4497
4498
4499 list_del_init(&dev->kobj.entry);
4500 spin_unlock(&devices_kset->list_lock);
4501
4502
4503 if (parent)
4504 device_lock(parent);
4505 device_lock(dev);
4506
4507
4508 pm_runtime_get_noresume(dev);
4509 pm_runtime_barrier(dev);
4510
4511 if (dev->class && dev->class->shutdown_pre) {
4512 if (initcall_debug)
4513 dev_info(dev, "shutdown_pre\n");
4514 dev->class->shutdown_pre(dev);
4515 }
4516 if (dev->bus && dev->bus->shutdown) {
4517 if (initcall_debug)
4518 dev_info(dev, "shutdown\n");
4519 dev->bus->shutdown(dev);
4520 } else if (dev->driver && dev->driver->shutdown) {
4521 if (initcall_debug)
4522 dev_info(dev, "shutdown\n");
4523 dev->driver->shutdown(dev);
4524 }
4525
4526 device_unlock(dev);
4527 if (parent)
4528 device_unlock(parent);
4529
4530 put_device(dev);
4531 put_device(parent);
4532
4533 spin_lock(&devices_kset->list_lock);
4534 }
4535 spin_unlock(&devices_kset->list_lock);
4536}
4537
4538
4539
4540
4541
4542#ifdef CONFIG_PRINTK
4543static void
4544set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
4545{
4546 const char *subsys;
4547
4548 memset(dev_info, 0, sizeof(*dev_info));
4549
4550 if (dev->class)
4551 subsys = dev->class->name;
4552 else if (dev->bus)
4553 subsys = dev->bus->name;
4554 else
4555 return;
4556
4557 strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
4558
4559
4560
4561
4562
4563
4564
4565
4566 if (MAJOR(dev->devt)) {
4567 char c;
4568
4569 if (strcmp(subsys, "block") == 0)
4570 c = 'b';
4571 else
4572 c = 'c';
4573
4574 snprintf(dev_info->device, sizeof(dev_info->device),
4575 "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
4576 } else if (strcmp(subsys, "net") == 0) {
4577 struct net_device *net = to_net_dev(dev);
4578
4579 snprintf(dev_info->device, sizeof(dev_info->device),
4580 "n%u", net->ifindex);
4581 } else {
4582 snprintf(dev_info->device, sizeof(dev_info->device),
4583 "+%s:%s", subsys, dev_name(dev));
4584 }
4585}
4586
4587int dev_vprintk_emit(int level, const struct device *dev,
4588 const char *fmt, va_list args)
4589{
4590 struct dev_printk_info dev_info;
4591
4592 set_dev_info(dev, &dev_info);
4593
4594 return vprintk_emit(0, level, &dev_info, fmt, args);
4595}
4596EXPORT_SYMBOL(dev_vprintk_emit);
4597
4598int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
4599{
4600 va_list args;
4601 int r;
4602
4603 va_start(args, fmt);
4604
4605 r = dev_vprintk_emit(level, dev, fmt, args);
4606
4607 va_end(args);
4608
4609 return r;
4610}
4611EXPORT_SYMBOL(dev_printk_emit);
4612
4613static void __dev_printk(const char *level, const struct device *dev,
4614 struct va_format *vaf)
4615{
4616 if (dev)
4617 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
4618 dev_driver_string(dev), dev_name(dev), vaf);
4619 else
4620 printk("%s(NULL device *): %pV", level, vaf);
4621}
4622
4623void _dev_printk(const char *level, const struct device *dev,
4624 const char *fmt, ...)
4625{
4626 struct va_format vaf;
4627 va_list args;
4628
4629 va_start(args, fmt);
4630
4631 vaf.fmt = fmt;
4632 vaf.va = &args;
4633
4634 __dev_printk(level, dev, &vaf);
4635
4636 va_end(args);
4637}
4638EXPORT_SYMBOL(_dev_printk);
4639
4640#define define_dev_printk_level(func, kern_level) \
4641void func(const struct device *dev, const char *fmt, ...) \
4642{ \
4643 struct va_format vaf; \
4644 va_list args; \
4645 \
4646 va_start(args, fmt); \
4647 \
4648 vaf.fmt = fmt; \
4649 vaf.va = &args; \
4650 \
4651 __dev_printk(kern_level, dev, &vaf); \
4652 \
4653 va_end(args); \
4654} \
4655EXPORT_SYMBOL(func);
4656
4657define_dev_printk_level(_dev_emerg, KERN_EMERG);
4658define_dev_printk_level(_dev_alert, KERN_ALERT);
4659define_dev_printk_level(_dev_crit, KERN_CRIT);
4660define_dev_printk_level(_dev_err, KERN_ERR);
4661define_dev_printk_level(_dev_warn, KERN_WARNING);
4662define_dev_printk_level(_dev_notice, KERN_NOTICE);
4663define_dev_printk_level(_dev_info, KERN_INFO);
4664
4665#endif
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
4700{
4701 struct va_format vaf;
4702 va_list args;
4703
4704 va_start(args, fmt);
4705 vaf.fmt = fmt;
4706 vaf.va = &args;
4707
4708 if (err != -EPROBE_DEFER) {
4709 dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4710 } else {
4711 device_set_deferred_probe_reason(dev, &vaf);
4712 dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4713 }
4714
4715 va_end(args);
4716
4717 return err;
4718}
4719EXPORT_SYMBOL_GPL(dev_err_probe);
4720
4721static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
4722{
4723 return fwnode && !IS_ERR(fwnode->secondary);
4724}
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4741{
4742 struct device *parent = dev->parent;
4743 struct fwnode_handle *fn = dev->fwnode;
4744
4745 if (fwnode) {
4746 if (fwnode_is_primary(fn))
4747 fn = fn->secondary;
4748
4749 if (fn) {
4750 WARN_ON(fwnode->secondary);
4751 fwnode->secondary = fn;
4752 }
4753 dev->fwnode = fwnode;
4754 } else {
4755 if (fwnode_is_primary(fn)) {
4756 dev->fwnode = fn->secondary;
4757
4758 if (!(parent && fn == parent->fwnode))
4759 fn->secondary = NULL;
4760 } else {
4761 dev->fwnode = NULL;
4762 }
4763 }
4764}
4765EXPORT_SYMBOL_GPL(set_primary_fwnode);
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4777{
4778 if (fwnode)
4779 fwnode->secondary = ERR_PTR(-ENODEV);
4780
4781 if (fwnode_is_primary(dev->fwnode))
4782 dev->fwnode->secondary = fwnode;
4783 else
4784 dev->fwnode = fwnode;
4785}
4786EXPORT_SYMBOL_GPL(set_secondary_fwnode);
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
4797{
4798 of_node_put(dev->of_node);
4799 dev->of_node = of_node_get(dev2->of_node);
4800 dev->of_node_reused = true;
4801}
4802EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
4803
4804void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
4805{
4806 dev->fwnode = fwnode;
4807 dev->of_node = to_of_node(fwnode);
4808}
4809EXPORT_SYMBOL_GPL(device_set_node);
4810
4811int device_match_name(struct device *dev, const void *name)
4812{
4813 return sysfs_streq(dev_name(dev), name);
4814}
4815EXPORT_SYMBOL_GPL(device_match_name);
4816
4817int device_match_of_node(struct device *dev, const void *np)
4818{
4819 return dev->of_node == np;
4820}
4821EXPORT_SYMBOL_GPL(device_match_of_node);
4822
4823int device_match_fwnode(struct device *dev, const void *fwnode)
4824{
4825 return dev_fwnode(dev) == fwnode;
4826}
4827EXPORT_SYMBOL_GPL(device_match_fwnode);
4828
4829int device_match_devt(struct device *dev, const void *pdevt)
4830{
4831 return dev->devt == *(dev_t *)pdevt;
4832}
4833EXPORT_SYMBOL_GPL(device_match_devt);
4834
4835int device_match_acpi_dev(struct device *dev, const void *adev)
4836{
4837 return ACPI_COMPANION(dev) == adev;
4838}
4839EXPORT_SYMBOL(device_match_acpi_dev);
4840
4841int device_match_acpi_handle(struct device *dev, const void *handle)
4842{
4843 return ACPI_HANDLE(dev) == handle;
4844}
4845EXPORT_SYMBOL(device_match_acpi_handle);
4846
4847int device_match_any(struct device *dev, const void *unused)
4848{
4849 return 1;
4850}
4851EXPORT_SYMBOL_GPL(device_match_any);
4852