1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/cpufreq.h>
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/fwnode.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/kdev_t.h>
21#include <linux/notifier.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/genhd.h>
25#include <linux/mutex.h>
26#include <linux/pm_runtime.h>
27#include <linux/netdevice.h>
28#include <linux/sched/signal.h>
29#include <linux/sched/mm.h>
30#include <linux/swiotlb.h>
31#include <linux/sysfs.h>
32#include <linux/dma-map-ops.h>
33
34#include "base.h"
35#include "power/power.h"
36
37#ifdef CONFIG_SYSFS_DEPRECATED
38#ifdef CONFIG_SYSFS_DEPRECATED_V2
39long sysfs_deprecated = 1;
40#else
41long sysfs_deprecated = 0;
42#endif
43static int __init sysfs_deprecated_setup(char *arg)
44{
45 return kstrtol(arg, 10, &sysfs_deprecated);
46}
47early_param("sysfs.deprecated", sysfs_deprecated_setup);
48#endif
49
50
51static LIST_HEAD(deferred_sync);
52static unsigned int defer_sync_state_count = 1;
53static DEFINE_MUTEX(fwnode_link_lock);
54static bool fw_devlink_is_permissive(void);
55static bool fw_devlink_drv_reg_done;
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
75{
76 struct fwnode_link *link;
77 int ret = 0;
78
79 mutex_lock(&fwnode_link_lock);
80
81 list_for_each_entry(link, &sup->consumers, s_hook)
82 if (link->consumer == con)
83 goto out;
84
85 link = kzalloc(sizeof(*link), GFP_KERNEL);
86 if (!link) {
87 ret = -ENOMEM;
88 goto out;
89 }
90
91 link->supplier = sup;
92 INIT_LIST_HEAD(&link->s_hook);
93 link->consumer = con;
94 INIT_LIST_HEAD(&link->c_hook);
95
96 list_add(&link->s_hook, &sup->consumers);
97 list_add(&link->c_hook, &con->suppliers);
98 pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",
99 con, sup);
100out:
101 mutex_unlock(&fwnode_link_lock);
102
103 return ret;
104}
105
106
107
108
109
110
111
112static void __fwnode_link_del(struct fwnode_link *link)
113{
114 pr_debug("%pfwP Dropping the fwnode link to %pfwP\n",
115 link->consumer, link->supplier);
116 list_del(&link->s_hook);
117 list_del(&link->c_hook);
118 kfree(link);
119}
120
121
122
123
124
125
126
127static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
128{
129 struct fwnode_link *link, *tmp;
130
131 mutex_lock(&fwnode_link_lock);
132 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
133 __fwnode_link_del(link);
134 mutex_unlock(&fwnode_link_lock);
135}
136
137
138
139
140
141
142
143static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
144{
145 struct fwnode_link *link, *tmp;
146
147 mutex_lock(&fwnode_link_lock);
148 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
149 __fwnode_link_del(link);
150 mutex_unlock(&fwnode_link_lock);
151}
152
153
154
155
156
157
158
159void fwnode_links_purge(struct fwnode_handle *fwnode)
160{
161 fwnode_links_purge_suppliers(fwnode);
162 fwnode_links_purge_consumers(fwnode);
163}
164
165void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
166{
167 struct fwnode_handle *child;
168
169
170 if (fwnode->dev)
171 return;
172
173 fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
174 fwnode_links_purge_consumers(fwnode);
175
176 fwnode_for_each_available_child_node(fwnode, child)
177 fw_devlink_purge_absent_suppliers(child);
178}
179EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
180
181#ifdef CONFIG_SRCU
182static DEFINE_MUTEX(device_links_lock);
183DEFINE_STATIC_SRCU(device_links_srcu);
184
185static inline void device_links_write_lock(void)
186{
187 mutex_lock(&device_links_lock);
188}
189
190static inline void device_links_write_unlock(void)
191{
192 mutex_unlock(&device_links_lock);
193}
194
195int device_links_read_lock(void) __acquires(&device_links_srcu)
196{
197 return srcu_read_lock(&device_links_srcu);
198}
199
200void device_links_read_unlock(int idx) __releases(&device_links_srcu)
201{
202 srcu_read_unlock(&device_links_srcu, idx);
203}
204
205int device_links_read_lock_held(void)
206{
207 return srcu_read_lock_held(&device_links_srcu);
208}
209
210static void device_link_synchronize_removal(void)
211{
212 synchronize_srcu(&device_links_srcu);
213}
214
215static void device_link_remove_from_lists(struct device_link *link)
216{
217 list_del_rcu(&link->s_node);
218 list_del_rcu(&link->c_node);
219}
220#else
221static DECLARE_RWSEM(device_links_lock);
222
223static inline void device_links_write_lock(void)
224{
225 down_write(&device_links_lock);
226}
227
228static inline void device_links_write_unlock(void)
229{
230 up_write(&device_links_lock);
231}
232
233int device_links_read_lock(void)
234{
235 down_read(&device_links_lock);
236 return 0;
237}
238
239void device_links_read_unlock(int not_used)
240{
241 up_read(&device_links_lock);
242}
243
244#ifdef CONFIG_DEBUG_LOCK_ALLOC
245int device_links_read_lock_held(void)
246{
247 return lockdep_is_held(&device_links_lock);
248}
249#endif
250
251static inline void device_link_synchronize_removal(void)
252{
253}
254
255static void device_link_remove_from_lists(struct device_link *link)
256{
257 list_del(&link->s_node);
258 list_del(&link->c_node);
259}
260#endif
261
262static bool device_is_ancestor(struct device *dev, struct device *target)
263{
264 while (target->parent) {
265 target = target->parent;
266 if (dev == target)
267 return true;
268 }
269 return false;
270}
271
272
273
274
275
276
277
278
279
280int device_is_dependent(struct device *dev, void *target)
281{
282 struct device_link *link;
283 int ret;
284
285
286
287
288
289
290 if (dev == target || device_is_ancestor(dev, target))
291 return 1;
292
293 ret = device_for_each_child(dev, target, device_is_dependent);
294 if (ret)
295 return ret;
296
297 list_for_each_entry(link, &dev->links.consumers, s_node) {
298 if ((link->flags & ~DL_FLAG_INFERRED) ==
299 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
300 continue;
301
302 if (link->consumer == target)
303 return 1;
304
305 ret = device_is_dependent(link->consumer, target);
306 if (ret)
307 break;
308 }
309 return ret;
310}
311
312static void device_link_init_status(struct device_link *link,
313 struct device *consumer,
314 struct device *supplier)
315{
316 switch (supplier->links.status) {
317 case DL_DEV_PROBING:
318 switch (consumer->links.status) {
319 case DL_DEV_PROBING:
320
321
322
323
324
325
326
327 link->status = DL_STATE_CONSUMER_PROBE;
328 break;
329 default:
330 link->status = DL_STATE_DORMANT;
331 break;
332 }
333 break;
334 case DL_DEV_DRIVER_BOUND:
335 switch (consumer->links.status) {
336 case DL_DEV_PROBING:
337 link->status = DL_STATE_CONSUMER_PROBE;
338 break;
339 case DL_DEV_DRIVER_BOUND:
340 link->status = DL_STATE_ACTIVE;
341 break;
342 default:
343 link->status = DL_STATE_AVAILABLE;
344 break;
345 }
346 break;
347 case DL_DEV_UNBINDING:
348 link->status = DL_STATE_SUPPLIER_UNBIND;
349 break;
350 default:
351 link->status = DL_STATE_DORMANT;
352 break;
353 }
354}
355
356static int device_reorder_to_tail(struct device *dev, void *not_used)
357{
358 struct device_link *link;
359
360
361
362
363
364 if (device_is_registered(dev))
365 devices_kset_move_last(dev);
366
367 if (device_pm_initialized(dev))
368 device_pm_move_last(dev);
369
370 device_for_each_child(dev, NULL, device_reorder_to_tail);
371 list_for_each_entry(link, &dev->links.consumers, s_node) {
372 if ((link->flags & ~DL_FLAG_INFERRED) ==
373 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
374 continue;
375 device_reorder_to_tail(link->consumer, NULL);
376 }
377
378 return 0;
379}
380
381
382
383
384
385
386
387
388
389
390void device_pm_move_to_tail(struct device *dev)
391{
392 int idx;
393
394 idx = device_links_read_lock();
395 device_pm_lock();
396 device_reorder_to_tail(dev, NULL);
397 device_pm_unlock();
398 device_links_read_unlock(idx);
399}
400
401#define to_devlink(dev) container_of((dev), struct device_link, link_dev)
402
403static ssize_t status_show(struct device *dev,
404 struct device_attribute *attr, char *buf)
405{
406 const char *output;
407
408 switch (to_devlink(dev)->status) {
409 case DL_STATE_NONE:
410 output = "not tracked";
411 break;
412 case DL_STATE_DORMANT:
413 output = "dormant";
414 break;
415 case DL_STATE_AVAILABLE:
416 output = "available";
417 break;
418 case DL_STATE_CONSUMER_PROBE:
419 output = "consumer probing";
420 break;
421 case DL_STATE_ACTIVE:
422 output = "active";
423 break;
424 case DL_STATE_SUPPLIER_UNBIND:
425 output = "supplier unbinding";
426 break;
427 default:
428 output = "unknown";
429 break;
430 }
431
432 return sysfs_emit(buf, "%s\n", output);
433}
434static DEVICE_ATTR_RO(status);
435
436static ssize_t auto_remove_on_show(struct device *dev,
437 struct device_attribute *attr, char *buf)
438{
439 struct device_link *link = to_devlink(dev);
440 const char *output;
441
442 if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
443 output = "supplier unbind";
444 else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
445 output = "consumer unbind";
446 else
447 output = "never";
448
449 return sysfs_emit(buf, "%s\n", output);
450}
451static DEVICE_ATTR_RO(auto_remove_on);
452
453static ssize_t runtime_pm_show(struct device *dev,
454 struct device_attribute *attr, char *buf)
455{
456 struct device_link *link = to_devlink(dev);
457
458 return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
459}
460static DEVICE_ATTR_RO(runtime_pm);
461
462static ssize_t sync_state_only_show(struct device *dev,
463 struct device_attribute *attr, char *buf)
464{
465 struct device_link *link = to_devlink(dev);
466
467 return sysfs_emit(buf, "%d\n",
468 !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
469}
470static DEVICE_ATTR_RO(sync_state_only);
471
472static struct attribute *devlink_attrs[] = {
473 &dev_attr_status.attr,
474 &dev_attr_auto_remove_on.attr,
475 &dev_attr_runtime_pm.attr,
476 &dev_attr_sync_state_only.attr,
477 NULL,
478};
479ATTRIBUTE_GROUPS(devlink);
480
481static void device_link_release_fn(struct work_struct *work)
482{
483 struct device_link *link = container_of(work, struct device_link, rm_work);
484
485
486 device_link_synchronize_removal();
487
488 while (refcount_dec_not_one(&link->rpm_active))
489 pm_runtime_put(link->supplier);
490
491 put_device(link->consumer);
492 put_device(link->supplier);
493 kfree(link);
494}
495
496static void devlink_dev_release(struct device *dev)
497{
498 struct device_link *link = to_devlink(dev);
499
500 INIT_WORK(&link->rm_work, device_link_release_fn);
501
502
503
504
505
506
507 queue_work(system_long_wq, &link->rm_work);
508}
509
510static struct class devlink_class = {
511 .name = "devlink",
512 .owner = THIS_MODULE,
513 .dev_groups = devlink_groups,
514 .dev_release = devlink_dev_release,
515};
516
517static int devlink_add_symlinks(struct device *dev,
518 struct class_interface *class_intf)
519{
520 int ret;
521 size_t len;
522 struct device_link *link = to_devlink(dev);
523 struct device *sup = link->supplier;
524 struct device *con = link->consumer;
525 char *buf;
526
527 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
528 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
529 len += strlen(":");
530 len += strlen("supplier:") + 1;
531 buf = kzalloc(len, GFP_KERNEL);
532 if (!buf)
533 return -ENOMEM;
534
535 ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
536 if (ret)
537 goto out;
538
539 ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer");
540 if (ret)
541 goto err_con;
542
543 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
544 ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
545 if (ret)
546 goto err_con_dev;
547
548 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
549 ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
550 if (ret)
551 goto err_sup_dev;
552
553 goto out;
554
555err_sup_dev:
556 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
557 sysfs_remove_link(&sup->kobj, buf);
558err_con_dev:
559 sysfs_remove_link(&link->link_dev.kobj, "consumer");
560err_con:
561 sysfs_remove_link(&link->link_dev.kobj, "supplier");
562out:
563 kfree(buf);
564 return ret;
565}
566
567static void devlink_remove_symlinks(struct device *dev,
568 struct class_interface *class_intf)
569{
570 struct device_link *link = to_devlink(dev);
571 size_t len;
572 struct device *sup = link->supplier;
573 struct device *con = link->consumer;
574 char *buf;
575
576 sysfs_remove_link(&link->link_dev.kobj, "consumer");
577 sysfs_remove_link(&link->link_dev.kobj, "supplier");
578
579 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
580 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
581 len += strlen(":");
582 len += strlen("supplier:") + 1;
583 buf = kzalloc(len, GFP_KERNEL);
584 if (!buf) {
585 WARN(1, "Unable to properly free device link symlinks!\n");
586 return;
587 }
588
589 if (device_is_registered(con)) {
590 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
591 sysfs_remove_link(&con->kobj, buf);
592 }
593 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
594 sysfs_remove_link(&sup->kobj, buf);
595 kfree(buf);
596}
597
598static struct class_interface devlink_class_intf = {
599 .class = &devlink_class,
600 .add_dev = devlink_add_symlinks,
601 .remove_dev = devlink_remove_symlinks,
602};
603
604static int __init devlink_class_init(void)
605{
606 int ret;
607
608 ret = class_register(&devlink_class);
609 if (ret)
610 return ret;
611
612 ret = class_interface_register(&devlink_class_intf);
613 if (ret)
614 class_unregister(&devlink_class);
615
616 return ret;
617}
618postcore_initcall(devlink_class_init);
619
620#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
621 DL_FLAG_AUTOREMOVE_SUPPLIER | \
622 DL_FLAG_AUTOPROBE_CONSUMER | \
623 DL_FLAG_SYNC_STATE_ONLY | \
624 DL_FLAG_INFERRED)
625
626#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
627 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685struct device_link *device_link_add(struct device *consumer,
686 struct device *supplier, u32 flags)
687{
688 struct device_link *link;
689
690 if (!consumer || !supplier || consumer == supplier ||
691 flags & ~DL_ADD_VALID_FLAGS ||
692 (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
693 (flags & DL_FLAG_SYNC_STATE_ONLY &&
694 (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
695 (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
696 flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
697 DL_FLAG_AUTOREMOVE_SUPPLIER)))
698 return NULL;
699
700 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
701 if (pm_runtime_get_sync(supplier) < 0) {
702 pm_runtime_put_noidle(supplier);
703 return NULL;
704 }
705 }
706
707 if (!(flags & DL_FLAG_STATELESS))
708 flags |= DL_FLAG_MANAGED;
709
710 device_links_write_lock();
711 device_pm_lock();
712
713
714
715
716
717
718
719
720 if (!device_pm_initialized(supplier)
721 || (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
722 device_is_dependent(consumer, supplier))) {
723 link = NULL;
724 goto out;
725 }
726
727
728
729
730
731 if (flags & DL_FLAG_SYNC_STATE_ONLY &&
732 consumer->links.status != DL_DEV_NO_DRIVER &&
733 consumer->links.status != DL_DEV_PROBING) {
734 link = NULL;
735 goto out;
736 }
737
738
739
740
741
742
743 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
744 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
745
746 list_for_each_entry(link, &supplier->links.consumers, s_node) {
747 if (link->consumer != consumer)
748 continue;
749
750 if (link->flags & DL_FLAG_INFERRED &&
751 !(flags & DL_FLAG_INFERRED))
752 link->flags &= ~DL_FLAG_INFERRED;
753
754 if (flags & DL_FLAG_PM_RUNTIME) {
755 if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
756 pm_runtime_new_link(consumer);
757 link->flags |= DL_FLAG_PM_RUNTIME;
758 }
759 if (flags & DL_FLAG_RPM_ACTIVE)
760 refcount_inc(&link->rpm_active);
761 }
762
763 if (flags & DL_FLAG_STATELESS) {
764 kref_get(&link->kref);
765 if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
766 !(link->flags & DL_FLAG_STATELESS)) {
767 link->flags |= DL_FLAG_STATELESS;
768 goto reorder;
769 } else {
770 link->flags |= DL_FLAG_STATELESS;
771 goto out;
772 }
773 }
774
775
776
777
778
779
780 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
781 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
782 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
783 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
784 }
785 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
786 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
787 DL_FLAG_AUTOREMOVE_SUPPLIER);
788 }
789 if (!(link->flags & DL_FLAG_MANAGED)) {
790 kref_get(&link->kref);
791 link->flags |= DL_FLAG_MANAGED;
792 device_link_init_status(link, consumer, supplier);
793 }
794 if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
795 !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
796 link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
797 goto reorder;
798 }
799
800 goto out;
801 }
802
803 link = kzalloc(sizeof(*link), GFP_KERNEL);
804 if (!link)
805 goto out;
806
807 refcount_set(&link->rpm_active, 1);
808
809 get_device(supplier);
810 link->supplier = supplier;
811 INIT_LIST_HEAD(&link->s_node);
812 get_device(consumer);
813 link->consumer = consumer;
814 INIT_LIST_HEAD(&link->c_node);
815 link->flags = flags;
816 kref_init(&link->kref);
817
818 link->link_dev.class = &devlink_class;
819 device_set_pm_not_required(&link->link_dev);
820 dev_set_name(&link->link_dev, "%s:%s--%s:%s",
821 dev_bus_name(supplier), dev_name(supplier),
822 dev_bus_name(consumer), dev_name(consumer));
823 if (device_register(&link->link_dev)) {
824 put_device(consumer);
825 put_device(supplier);
826 kfree(link);
827 link = NULL;
828 goto out;
829 }
830
831 if (flags & DL_FLAG_PM_RUNTIME) {
832 if (flags & DL_FLAG_RPM_ACTIVE)
833 refcount_inc(&link->rpm_active);
834
835 pm_runtime_new_link(consumer);
836 }
837
838
839 if (flags & DL_FLAG_STATELESS)
840 link->status = DL_STATE_NONE;
841 else
842 device_link_init_status(link, consumer, supplier);
843
844
845
846
847
848 if (link->status == DL_STATE_CONSUMER_PROBE &&
849 flags & DL_FLAG_PM_RUNTIME)
850 pm_runtime_resume(supplier);
851
852 list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
853 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
854
855 if (flags & DL_FLAG_SYNC_STATE_ONLY) {
856 dev_dbg(consumer,
857 "Linked as a sync state only consumer to %s\n",
858 dev_name(supplier));
859 goto out;
860 }
861
862reorder:
863
864
865
866
867
868
869
870 device_reorder_to_tail(consumer, NULL);
871
872 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
873
874out:
875 device_pm_unlock();
876 device_links_write_unlock();
877
878 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
879 pm_runtime_put(supplier);
880
881 return link;
882}
883EXPORT_SYMBOL_GPL(device_link_add);
884
885static void __device_link_del(struct kref *kref)
886{
887 struct device_link *link = container_of(kref, struct device_link, kref);
888
889 dev_dbg(link->consumer, "Dropping the link to %s\n",
890 dev_name(link->supplier));
891
892 pm_runtime_drop_link(link);
893
894 device_link_remove_from_lists(link);
895 device_unregister(&link->link_dev);
896}
897
898static void device_link_put_kref(struct device_link *link)
899{
900 if (link->flags & DL_FLAG_STATELESS)
901 kref_put(&link->kref, __device_link_del);
902 else if (!device_is_registered(link->consumer))
903 __device_link_del(&link->kref);
904 else
905 WARN(1, "Unable to drop a managed device link reference\n");
906}
907
908
909
910
911
912
913
914
915
916
917void device_link_del(struct device_link *link)
918{
919 device_links_write_lock();
920 device_link_put_kref(link);
921 device_links_write_unlock();
922}
923EXPORT_SYMBOL_GPL(device_link_del);
924
925
926
927
928
929
930
931
932
933void device_link_remove(void *consumer, struct device *supplier)
934{
935 struct device_link *link;
936
937 if (WARN_ON(consumer == supplier))
938 return;
939
940 device_links_write_lock();
941
942 list_for_each_entry(link, &supplier->links.consumers, s_node) {
943 if (link->consumer == consumer) {
944 device_link_put_kref(link);
945 break;
946 }
947 }
948
949 device_links_write_unlock();
950}
951EXPORT_SYMBOL_GPL(device_link_remove);
952
953static void device_links_missing_supplier(struct device *dev)
954{
955 struct device_link *link;
956
957 list_for_each_entry(link, &dev->links.suppliers, c_node) {
958 if (link->status != DL_STATE_CONSUMER_PROBE)
959 continue;
960
961 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
962 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
963 } else {
964 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
965 WRITE_ONCE(link->status, DL_STATE_DORMANT);
966 }
967 }
968}
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986int device_links_check_suppliers(struct device *dev)
987{
988 struct device_link *link;
989 int ret = 0;
990 struct fwnode_handle *sup_fw;
991
992
993
994
995
996 mutex_lock(&fwnode_link_lock);
997 if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
998 !fw_devlink_is_permissive()) {
999 sup_fw = list_first_entry(&dev->fwnode->suppliers,
1000 struct fwnode_link,
1001 c_hook)->supplier;
1002 dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwP\n",
1003 sup_fw);
1004 mutex_unlock(&fwnode_link_lock);
1005 return -EPROBE_DEFER;
1006 }
1007 mutex_unlock(&fwnode_link_lock);
1008
1009 device_links_write_lock();
1010
1011 list_for_each_entry(link, &dev->links.suppliers, c_node) {
1012 if (!(link->flags & DL_FLAG_MANAGED))
1013 continue;
1014
1015 if (link->status != DL_STATE_AVAILABLE &&
1016 !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
1017 device_links_missing_supplier(dev);
1018 dev_err_probe(dev, -EPROBE_DEFER,
1019 "supplier %s not ready\n",
1020 dev_name(link->supplier));
1021 ret = -EPROBE_DEFER;
1022 break;
1023 }
1024 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1025 }
1026 dev->links.status = DL_DEV_PROBING;
1027
1028 device_links_write_unlock();
1029 return ret;
1030}
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050static void __device_links_queue_sync_state(struct device *dev,
1051 struct list_head *list)
1052{
1053 struct device_link *link;
1054
1055 if (!dev_has_sync_state(dev))
1056 return;
1057 if (dev->state_synced)
1058 return;
1059
1060 list_for_each_entry(link, &dev->links.consumers, s_node) {
1061 if (!(link->flags & DL_FLAG_MANAGED))
1062 continue;
1063 if (link->status != DL_STATE_ACTIVE)
1064 return;
1065 }
1066
1067
1068
1069
1070
1071
1072 dev->state_synced = true;
1073
1074 if (WARN_ON(!list_empty(&dev->links.defer_sync)))
1075 return;
1076
1077 get_device(dev);
1078 list_add_tail(&dev->links.defer_sync, list);
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091static void device_links_flush_sync_list(struct list_head *list,
1092 struct device *dont_lock_dev)
1093{
1094 struct device *dev, *tmp;
1095
1096 list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
1097 list_del_init(&dev->links.defer_sync);
1098
1099 if (dev != dont_lock_dev)
1100 device_lock(dev);
1101
1102 if (dev->bus->sync_state)
1103 dev->bus->sync_state(dev);
1104 else if (dev->driver && dev->driver->sync_state)
1105 dev->driver->sync_state(dev);
1106
1107 if (dev != dont_lock_dev)
1108 device_unlock(dev);
1109
1110 put_device(dev);
1111 }
1112}
1113
1114void device_links_supplier_sync_state_pause(void)
1115{
1116 device_links_write_lock();
1117 defer_sync_state_count++;
1118 device_links_write_unlock();
1119}
1120
1121void device_links_supplier_sync_state_resume(void)
1122{
1123 struct device *dev, *tmp;
1124 LIST_HEAD(sync_list);
1125
1126 device_links_write_lock();
1127 if (!defer_sync_state_count) {
1128 WARN(true, "Unmatched sync_state pause/resume!");
1129 goto out;
1130 }
1131 defer_sync_state_count--;
1132 if (defer_sync_state_count)
1133 goto out;
1134
1135 list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
1136
1137
1138
1139
1140 list_del_init(&dev->links.defer_sync);
1141 __device_links_queue_sync_state(dev, &sync_list);
1142 }
1143out:
1144 device_links_write_unlock();
1145
1146 device_links_flush_sync_list(&sync_list, NULL);
1147}
1148
1149static int sync_state_resume_initcall(void)
1150{
1151 device_links_supplier_sync_state_resume();
1152 return 0;
1153}
1154late_initcall(sync_state_resume_initcall);
1155
1156static void __device_links_supplier_defer_sync(struct device *sup)
1157{
1158 if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
1159 list_add_tail(&sup->links.defer_sync, &deferred_sync);
1160}
1161
1162static void device_link_drop_managed(struct device_link *link)
1163{
1164 link->flags &= ~DL_FLAG_MANAGED;
1165 WRITE_ONCE(link->status, DL_STATE_NONE);
1166 kref_put(&link->kref, __device_link_del);
1167}
1168
1169static ssize_t waiting_for_supplier_show(struct device *dev,
1170 struct device_attribute *attr,
1171 char *buf)
1172{
1173 bool val;
1174
1175 device_lock(dev);
1176 val = !list_empty(&dev->fwnode->suppliers);
1177 device_unlock(dev);
1178 return sysfs_emit(buf, "%u\n", val);
1179}
1180static DEVICE_ATTR_RO(waiting_for_supplier);
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196void device_links_force_bind(struct device *dev)
1197{
1198 struct device_link *link, *ln;
1199
1200 device_links_write_lock();
1201
1202 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1203 if (!(link->flags & DL_FLAG_MANAGED))
1204 continue;
1205
1206 if (link->status != DL_STATE_AVAILABLE) {
1207 device_link_drop_managed(link);
1208 continue;
1209 }
1210 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1211 }
1212 dev->links.status = DL_DEV_PROBING;
1213
1214 device_links_write_unlock();
1215}
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228void device_links_driver_bound(struct device *dev)
1229{
1230 struct device_link *link, *ln;
1231 LIST_HEAD(sync_list);
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 if (dev->fwnode && dev->fwnode->dev == dev) {
1245 struct fwnode_handle *child;
1246 fwnode_links_purge_suppliers(dev->fwnode);
1247 fwnode_for_each_available_child_node(dev->fwnode, child)
1248 fw_devlink_purge_absent_suppliers(child);
1249 }
1250 device_remove_file(dev, &dev_attr_waiting_for_supplier);
1251
1252 device_links_write_lock();
1253
1254 list_for_each_entry(link, &dev->links.consumers, s_node) {
1255 if (!(link->flags & DL_FLAG_MANAGED))
1256 continue;
1257
1258
1259
1260
1261
1262
1263
1264 if (link->status == DL_STATE_CONSUMER_PROBE ||
1265 link->status == DL_STATE_ACTIVE)
1266 continue;
1267
1268 WARN_ON(link->status != DL_STATE_DORMANT);
1269 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1270
1271 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
1272 driver_deferred_probe_add(link->consumer);
1273 }
1274
1275 if (defer_sync_state_count)
1276 __device_links_supplier_defer_sync(dev);
1277 else
1278 __device_links_queue_sync_state(dev, &sync_list);
1279
1280 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1281 struct device *supplier;
1282
1283 if (!(link->flags & DL_FLAG_MANAGED))
1284 continue;
1285
1286 supplier = link->supplier;
1287 if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
1288
1289
1290
1291
1292
1293 device_link_drop_managed(link);
1294 } else {
1295 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
1296 WRITE_ONCE(link->status, DL_STATE_ACTIVE);
1297 }
1298
1299
1300
1301
1302
1303
1304
1305 if (defer_sync_state_count)
1306 __device_links_supplier_defer_sync(supplier);
1307 else
1308 __device_links_queue_sync_state(supplier, &sync_list);
1309 }
1310
1311 dev->links.status = DL_DEV_DRIVER_BOUND;
1312
1313 device_links_write_unlock();
1314
1315 device_links_flush_sync_list(&sync_list, dev);
1316}
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330static void __device_links_no_driver(struct device *dev)
1331{
1332 struct device_link *link, *ln;
1333
1334 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1335 if (!(link->flags & DL_FLAG_MANAGED))
1336 continue;
1337
1338 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
1339 device_link_drop_managed(link);
1340 continue;
1341 }
1342
1343 if (link->status != DL_STATE_CONSUMER_PROBE &&
1344 link->status != DL_STATE_ACTIVE)
1345 continue;
1346
1347 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
1348 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1349 } else {
1350 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
1351 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1352 }
1353 }
1354
1355 dev->links.status = DL_DEV_NO_DRIVER;
1356}
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368void device_links_no_driver(struct device *dev)
1369{
1370 struct device_link *link;
1371
1372 device_links_write_lock();
1373
1374 list_for_each_entry(link, &dev->links.consumers, s_node) {
1375 if (!(link->flags & DL_FLAG_MANAGED))
1376 continue;
1377
1378
1379
1380
1381
1382
1383
1384
1385 if (link->status == DL_STATE_CONSUMER_PROBE ||
1386 link->status == DL_STATE_ACTIVE)
1387 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1388 }
1389
1390 __device_links_no_driver(dev);
1391
1392 device_links_write_unlock();
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405void device_links_driver_cleanup(struct device *dev)
1406{
1407 struct device_link *link, *ln;
1408
1409 device_links_write_lock();
1410
1411 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
1412 if (!(link->flags & DL_FLAG_MANAGED))
1413 continue;
1414
1415 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
1416 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
1417
1418
1419
1420
1421
1422
1423 if (link->status == DL_STATE_SUPPLIER_UNBIND &&
1424 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
1425 device_link_drop_managed(link);
1426
1427 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1428 }
1429
1430 list_del_init(&dev->links.defer_sync);
1431 __device_links_no_driver(dev);
1432
1433 device_links_write_unlock();
1434}
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450bool device_links_busy(struct device *dev)
1451{
1452 struct device_link *link;
1453 bool ret = false;
1454
1455 device_links_write_lock();
1456
1457 list_for_each_entry(link, &dev->links.consumers, s_node) {
1458 if (!(link->flags & DL_FLAG_MANAGED))
1459 continue;
1460
1461 if (link->status == DL_STATE_CONSUMER_PROBE
1462 || link->status == DL_STATE_ACTIVE) {
1463 ret = true;
1464 break;
1465 }
1466 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1467 }
1468
1469 dev->links.status = DL_DEV_UNBINDING;
1470
1471 device_links_write_unlock();
1472 return ret;
1473}
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490void device_links_unbind_consumers(struct device *dev)
1491{
1492 struct device_link *link;
1493
1494 start:
1495 device_links_write_lock();
1496
1497 list_for_each_entry(link, &dev->links.consumers, s_node) {
1498 enum device_link_state status;
1499
1500 if (!(link->flags & DL_FLAG_MANAGED) ||
1501 link->flags & DL_FLAG_SYNC_STATE_ONLY)
1502 continue;
1503
1504 status = link->status;
1505 if (status == DL_STATE_CONSUMER_PROBE) {
1506 device_links_write_unlock();
1507
1508 wait_for_device_probe();
1509 goto start;
1510 }
1511 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1512 if (status == DL_STATE_ACTIVE) {
1513 struct device *consumer = link->consumer;
1514
1515 get_device(consumer);
1516
1517 device_links_write_unlock();
1518
1519 device_release_driver_internal(consumer, NULL,
1520 consumer->parent);
1521 put_device(consumer);
1522 goto start;
1523 }
1524 }
1525
1526 device_links_write_unlock();
1527}
1528
1529
1530
1531
1532
1533static void device_links_purge(struct device *dev)
1534{
1535 struct device_link *link, *ln;
1536
1537 if (dev->class == &devlink_class)
1538 return;
1539
1540
1541
1542
1543
1544 device_links_write_lock();
1545
1546 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1547 WARN_ON(link->status == DL_STATE_ACTIVE);
1548 __device_link_del(&link->kref);
1549 }
1550
1551 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
1552 WARN_ON(link->status != DL_STATE_DORMANT &&
1553 link->status != DL_STATE_NONE);
1554 __device_link_del(&link->kref);
1555 }
1556
1557 device_links_write_unlock();
1558}
1559
1560#define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \
1561 DL_FLAG_SYNC_STATE_ONLY)
1562#define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \
1563 DL_FLAG_AUTOPROBE_CONSUMER)
1564#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
1565 DL_FLAG_PM_RUNTIME)
1566
1567static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
1568static int __init fw_devlink_setup(char *arg)
1569{
1570 if (!arg)
1571 return -EINVAL;
1572
1573 if (strcmp(arg, "off") == 0) {
1574 fw_devlink_flags = 0;
1575 } else if (strcmp(arg, "permissive") == 0) {
1576 fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1577 } else if (strcmp(arg, "on") == 0) {
1578 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
1579 } else if (strcmp(arg, "rpm") == 0) {
1580 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
1581 }
1582 return 0;
1583}
1584early_param("fw_devlink", fw_devlink_setup);
1585
1586static bool fw_devlink_strict;
1587static int __init fw_devlink_strict_setup(char *arg)
1588{
1589 return strtobool(arg, &fw_devlink_strict);
1590}
1591early_param("fw_devlink.strict", fw_devlink_strict_setup);
1592
1593u32 fw_devlink_get_flags(void)
1594{
1595 return fw_devlink_flags;
1596}
1597
1598static bool fw_devlink_is_permissive(void)
1599{
1600 return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
1601}
1602
1603bool fw_devlink_is_strict(void)
1604{
1605 return fw_devlink_strict && !fw_devlink_is_permissive();
1606}
1607
1608static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
1609{
1610 if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
1611 return;
1612
1613 fwnode_call_int_op(fwnode, add_links);
1614 fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
1615}
1616
1617static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
1618{
1619 struct fwnode_handle *child = NULL;
1620
1621 fw_devlink_parse_fwnode(fwnode);
1622
1623 while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1624 fw_devlink_parse_fwtree(child);
1625}
1626
1627static void fw_devlink_relax_link(struct device_link *link)
1628{
1629 if (!(link->flags & DL_FLAG_INFERRED))
1630 return;
1631
1632 if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE))
1633 return;
1634
1635 pm_runtime_drop_link(link);
1636 link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
1637 dev_dbg(link->consumer, "Relaxing link with %s\n",
1638 dev_name(link->supplier));
1639}
1640
1641static int fw_devlink_no_driver(struct device *dev, void *data)
1642{
1643 struct device_link *link = to_devlink(dev);
1644
1645 if (!link->supplier->can_match)
1646 fw_devlink_relax_link(link);
1647
1648 return 0;
1649}
1650
1651void fw_devlink_drivers_done(void)
1652{
1653 fw_devlink_drv_reg_done = true;
1654 device_links_write_lock();
1655 class_for_each_device(&devlink_class, NULL, NULL,
1656 fw_devlink_no_driver);
1657 device_links_write_unlock();
1658}
1659
1660static void fw_devlink_unblock_consumers(struct device *dev)
1661{
1662 struct device_link *link;
1663
1664 if (!fw_devlink_flags || fw_devlink_is_permissive())
1665 return;
1666
1667 device_links_write_lock();
1668 list_for_each_entry(link, &dev->links.consumers, s_node)
1669 fw_devlink_relax_link(link);
1670 device_links_write_unlock();
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687static int fw_devlink_relax_cycle(struct device *con, void *sup)
1688{
1689 struct device_link *link;
1690 int ret;
1691
1692 if (con == sup)
1693 return 1;
1694
1695 ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
1696 if (ret)
1697 return ret;
1698
1699 list_for_each_entry(link, &con->links.consumers, s_node) {
1700 if ((link->flags & ~DL_FLAG_INFERRED) ==
1701 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
1702 continue;
1703
1704 if (!fw_devlink_relax_cycle(link->consumer, sup))
1705 continue;
1706
1707 ret = 1;
1708
1709 fw_devlink_relax_link(link);
1710 }
1711 return ret;
1712}
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734static int fw_devlink_create_devlink(struct device *con,
1735 struct fwnode_handle *sup_handle, u32 flags)
1736{
1737 struct device *sup_dev;
1738 int ret = 0;
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755 if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
1756 fwnode_is_ancestor_of(sup_handle, con->fwnode))
1757 return -EINVAL;
1758
1759 sup_dev = get_dev_from_fwnode(sup_handle);
1760 if (sup_dev) {
1761
1762
1763
1764
1765
1766 if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
1767 sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
1768 ret = -EINVAL;
1769 goto out;
1770 }
1771
1772
1773
1774
1775
1776 if (!device_link_add(con, sup_dev, flags) &&
1777 !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
1778 dev_info(con, "Fixing up cyclic dependency with %s\n",
1779 dev_name(sup_dev));
1780 device_links_write_lock();
1781 fw_devlink_relax_cycle(con, sup_dev);
1782 device_links_write_unlock();
1783 device_link_add(con, sup_dev,
1784 FW_DEVLINK_FLAGS_PERMISSIVE);
1785 ret = -EINVAL;
1786 }
1787
1788 goto out;
1789 }
1790
1791
1792 if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
1793 return -EINVAL;
1794
1795
1796
1797
1798
1799
1800 if (flags & DL_FLAG_SYNC_STATE_ONLY)
1801 return -EAGAIN;
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817 sup_dev = fwnode_get_next_parent_dev(sup_handle);
1818 if (sup_dev && device_is_dependent(con, sup_dev)) {
1819 dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
1820 sup_handle, dev_name(sup_dev));
1821 device_links_write_lock();
1822 fw_devlink_relax_cycle(con, sup_dev);
1823 device_links_write_unlock();
1824 ret = -EINVAL;
1825 } else {
1826
1827
1828
1829
1830 ret = -EAGAIN;
1831 }
1832
1833out:
1834 put_device(sup_dev);
1835 return ret;
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854static void __fw_devlink_link_to_consumers(struct device *dev)
1855{
1856 struct fwnode_handle *fwnode = dev->fwnode;
1857 struct fwnode_link *link, *tmp;
1858
1859 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
1860 u32 dl_flags = fw_devlink_get_flags();
1861 struct device *con_dev;
1862 bool own_link = true;
1863 int ret;
1864
1865 con_dev = get_dev_from_fwnode(link->consumer);
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876 if (!con_dev) {
1877 con_dev = fwnode_get_next_parent_dev(link->consumer);
1878
1879
1880
1881
1882
1883
1884 if (con_dev &&
1885 fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) {
1886 put_device(con_dev);
1887 con_dev = NULL;
1888 } else {
1889 own_link = false;
1890 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1891 }
1892 }
1893
1894 if (!con_dev)
1895 continue;
1896
1897 ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags);
1898 put_device(con_dev);
1899 if (!own_link || ret == -EAGAIN)
1900 continue;
1901
1902 __fwnode_link_del(link);
1903 }
1904}
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932static void __fw_devlink_link_to_suppliers(struct device *dev,
1933 struct fwnode_handle *fwnode)
1934{
1935 bool own_link = (dev->fwnode == fwnode);
1936 struct fwnode_link *link, *tmp;
1937 struct fwnode_handle *child = NULL;
1938 u32 dl_flags;
1939
1940 if (own_link)
1941 dl_flags = fw_devlink_get_flags();
1942 else
1943 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1944
1945 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
1946 int ret;
1947 struct device *sup_dev;
1948 struct fwnode_handle *sup = link->supplier;
1949
1950 ret = fw_devlink_create_devlink(dev, sup, dl_flags);
1951 if (!own_link || ret == -EAGAIN)
1952 continue;
1953
1954 __fwnode_link_del(link);
1955
1956
1957 if (ret)
1958 continue;
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973 sup_dev = get_dev_from_fwnode(sup);
1974 __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode);
1975 put_device(sup_dev);
1976 }
1977
1978
1979
1980
1981
1982
1983
1984 while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1985 __fw_devlink_link_to_suppliers(dev, child);
1986}
1987
1988static void fw_devlink_link_device(struct device *dev)
1989{
1990 struct fwnode_handle *fwnode = dev->fwnode;
1991
1992 if (!fw_devlink_flags)
1993 return;
1994
1995 fw_devlink_parse_fwtree(fwnode);
1996
1997 mutex_lock(&fwnode_link_lock);
1998 __fw_devlink_link_to_consumers(dev);
1999 __fw_devlink_link_to_suppliers(dev, fwnode);
2000 mutex_unlock(&fwnode_link_lock);
2001}
2002
2003
2004
2005int (*platform_notify)(struct device *dev) = NULL;
2006int (*platform_notify_remove)(struct device *dev) = NULL;
2007static struct kobject *dev_kobj;
2008struct kobject *sysfs_dev_char_kobj;
2009struct kobject *sysfs_dev_block_kobj;
2010
2011static DEFINE_MUTEX(device_hotplug_lock);
2012
2013void lock_device_hotplug(void)
2014{
2015 mutex_lock(&device_hotplug_lock);
2016}
2017
2018void unlock_device_hotplug(void)
2019{
2020 mutex_unlock(&device_hotplug_lock);
2021}
2022
2023int lock_device_hotplug_sysfs(void)
2024{
2025 if (mutex_trylock(&device_hotplug_lock))
2026 return 0;
2027
2028
2029 msleep(5);
2030 return restart_syscall();
2031}
2032
2033#ifdef CONFIG_BLOCK
2034static inline int device_is_not_partition(struct device *dev)
2035{
2036 return !(dev->type == &part_type);
2037}
2038#else
2039static inline int device_is_not_partition(struct device *dev)
2040{
2041 return 1;
2042}
2043#endif
2044
2045static void device_platform_notify(struct device *dev)
2046{
2047 acpi_device_notify(dev);
2048
2049 software_node_notify(dev);
2050
2051 if (platform_notify)
2052 platform_notify(dev);
2053}
2054
2055static void device_platform_notify_remove(struct device *dev)
2056{
2057 acpi_device_notify_remove(dev);
2058
2059 software_node_notify_remove(dev);
2060
2061 if (platform_notify_remove)
2062 platform_notify_remove(dev);
2063}
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074const char *dev_driver_string(const struct device *dev)
2075{
2076 struct device_driver *drv;
2077
2078
2079
2080
2081
2082 drv = READ_ONCE(dev->driver);
2083 return drv ? drv->name : dev_bus_name(dev);
2084}
2085EXPORT_SYMBOL(dev_driver_string);
2086
2087#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
2088
2089static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
2090 char *buf)
2091{
2092 struct device_attribute *dev_attr = to_dev_attr(attr);
2093 struct device *dev = kobj_to_dev(kobj);
2094 ssize_t ret = -EIO;
2095
2096 if (dev_attr->show)
2097 ret = dev_attr->show(dev, dev_attr, buf);
2098 if (ret >= (ssize_t)PAGE_SIZE) {
2099 printk("dev_attr_show: %pS returned bad count\n",
2100 dev_attr->show);
2101 }
2102 return ret;
2103}
2104
2105static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
2106 const char *buf, size_t count)
2107{
2108 struct device_attribute *dev_attr = to_dev_attr(attr);
2109 struct device *dev = kobj_to_dev(kobj);
2110 ssize_t ret = -EIO;
2111
2112 if (dev_attr->store)
2113 ret = dev_attr->store(dev, dev_attr, buf, count);
2114 return ret;
2115}
2116
2117static const struct sysfs_ops dev_sysfs_ops = {
2118 .show = dev_attr_show,
2119 .store = dev_attr_store,
2120};
2121
2122#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
2123
2124ssize_t device_store_ulong(struct device *dev,
2125 struct device_attribute *attr,
2126 const char *buf, size_t size)
2127{
2128 struct dev_ext_attribute *ea = to_ext_attr(attr);
2129 int ret;
2130 unsigned long new;
2131
2132 ret = kstrtoul(buf, 0, &new);
2133 if (ret)
2134 return ret;
2135 *(unsigned long *)(ea->var) = new;
2136
2137 return size;
2138}
2139EXPORT_SYMBOL_GPL(device_store_ulong);
2140
2141ssize_t device_show_ulong(struct device *dev,
2142 struct device_attribute *attr,
2143 char *buf)
2144{
2145 struct dev_ext_attribute *ea = to_ext_attr(attr);
2146 return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
2147}
2148EXPORT_SYMBOL_GPL(device_show_ulong);
2149
2150ssize_t device_store_int(struct device *dev,
2151 struct device_attribute *attr,
2152 const char *buf, size_t size)
2153{
2154 struct dev_ext_attribute *ea = to_ext_attr(attr);
2155 int ret;
2156 long new;
2157
2158 ret = kstrtol(buf, 0, &new);
2159 if (ret)
2160 return ret;
2161
2162 if (new > INT_MAX || new < INT_MIN)
2163 return -EINVAL;
2164 *(int *)(ea->var) = new;
2165
2166 return size;
2167}
2168EXPORT_SYMBOL_GPL(device_store_int);
2169
2170ssize_t device_show_int(struct device *dev,
2171 struct device_attribute *attr,
2172 char *buf)
2173{
2174 struct dev_ext_attribute *ea = to_ext_attr(attr);
2175
2176 return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
2177}
2178EXPORT_SYMBOL_GPL(device_show_int);
2179
2180ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
2181 const char *buf, size_t size)
2182{
2183 struct dev_ext_attribute *ea = to_ext_attr(attr);
2184
2185 if (strtobool(buf, ea->var) < 0)
2186 return -EINVAL;
2187
2188 return size;
2189}
2190EXPORT_SYMBOL_GPL(device_store_bool);
2191
2192ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
2193 char *buf)
2194{
2195 struct dev_ext_attribute *ea = to_ext_attr(attr);
2196
2197 return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
2198}
2199EXPORT_SYMBOL_GPL(device_show_bool);
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209static void device_release(struct kobject *kobj)
2210{
2211 struct device *dev = kobj_to_dev(kobj);
2212 struct device_private *p = dev->p;
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223 devres_release_all(dev);
2224
2225 kfree(dev->dma_range_map);
2226
2227 if (dev->release)
2228 dev->release(dev);
2229 else if (dev->type && dev->type->release)
2230 dev->type->release(dev);
2231 else if (dev->class && dev->class->dev_release)
2232 dev->class->dev_release(dev);
2233 else
2234 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
2235 dev_name(dev));
2236 kfree(p);
2237}
2238
2239static const void *device_namespace(struct kobject *kobj)
2240{
2241 struct device *dev = kobj_to_dev(kobj);
2242 const void *ns = NULL;
2243
2244 if (dev->class && dev->class->ns_type)
2245 ns = dev->class->namespace(dev);
2246
2247 return ns;
2248}
2249
2250static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
2251{
2252 struct device *dev = kobj_to_dev(kobj);
2253
2254 if (dev->class && dev->class->get_ownership)
2255 dev->class->get_ownership(dev, uid, gid);
2256}
2257
2258static struct kobj_type device_ktype = {
2259 .release = device_release,
2260 .sysfs_ops = &dev_sysfs_ops,
2261 .namespace = device_namespace,
2262 .get_ownership = device_get_ownership,
2263};
2264
2265
2266static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
2267{
2268 struct kobj_type *ktype = get_ktype(kobj);
2269
2270 if (ktype == &device_ktype) {
2271 struct device *dev = kobj_to_dev(kobj);
2272 if (dev->bus)
2273 return 1;
2274 if (dev->class)
2275 return 1;
2276 }
2277 return 0;
2278}
2279
2280static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
2281{
2282 struct device *dev = kobj_to_dev(kobj);
2283
2284 if (dev->bus)
2285 return dev->bus->name;
2286 if (dev->class)
2287 return dev->class->name;
2288 return NULL;
2289}
2290
2291static int dev_uevent(struct kset *kset, struct kobject *kobj,
2292 struct kobj_uevent_env *env)
2293{
2294 struct device *dev = kobj_to_dev(kobj);
2295 int retval = 0;
2296
2297
2298 if (MAJOR(dev->devt)) {
2299 const char *tmp;
2300 const char *name;
2301 umode_t mode = 0;
2302 kuid_t uid = GLOBAL_ROOT_UID;
2303 kgid_t gid = GLOBAL_ROOT_GID;
2304
2305 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
2306 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
2307 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
2308 if (name) {
2309 add_uevent_var(env, "DEVNAME=%s", name);
2310 if (mode)
2311 add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
2312 if (!uid_eq(uid, GLOBAL_ROOT_UID))
2313 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
2314 if (!gid_eq(gid, GLOBAL_ROOT_GID))
2315 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
2316 kfree(tmp);
2317 }
2318 }
2319
2320 if (dev->type && dev->type->name)
2321 add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
2322
2323 if (dev->driver)
2324 add_uevent_var(env, "DRIVER=%s", dev->driver->name);
2325
2326
2327 of_device_uevent(dev, env);
2328
2329
2330 if (dev->bus && dev->bus->uevent) {
2331 retval = dev->bus->uevent(dev, env);
2332 if (retval)
2333 pr_debug("device: '%s': %s: bus uevent() returned %d\n",
2334 dev_name(dev), __func__, retval);
2335 }
2336
2337
2338 if (dev->class && dev->class->dev_uevent) {
2339 retval = dev->class->dev_uevent(dev, env);
2340 if (retval)
2341 pr_debug("device: '%s': %s: class uevent() "
2342 "returned %d\n", dev_name(dev),
2343 __func__, retval);
2344 }
2345
2346
2347 if (dev->type && dev->type->uevent) {
2348 retval = dev->type->uevent(dev, env);
2349 if (retval)
2350 pr_debug("device: '%s': %s: dev_type uevent() "
2351 "returned %d\n", dev_name(dev),
2352 __func__, retval);
2353 }
2354
2355 return retval;
2356}
2357
2358static const struct kset_uevent_ops device_uevent_ops = {
2359 .filter = dev_uevent_filter,
2360 .name = dev_uevent_name,
2361 .uevent = dev_uevent,
2362};
2363
2364static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
2365 char *buf)
2366{
2367 struct kobject *top_kobj;
2368 struct kset *kset;
2369 struct kobj_uevent_env *env = NULL;
2370 int i;
2371 int len = 0;
2372 int retval;
2373
2374
2375 top_kobj = &dev->kobj;
2376 while (!top_kobj->kset && top_kobj->parent)
2377 top_kobj = top_kobj->parent;
2378 if (!top_kobj->kset)
2379 goto out;
2380
2381 kset = top_kobj->kset;
2382 if (!kset->uevent_ops || !kset->uevent_ops->uevent)
2383 goto out;
2384
2385
2386 if (kset->uevent_ops && kset->uevent_ops->filter)
2387 if (!kset->uevent_ops->filter(kset, &dev->kobj))
2388 goto out;
2389
2390 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
2391 if (!env)
2392 return -ENOMEM;
2393
2394
2395 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
2396 if (retval)
2397 goto out;
2398
2399
2400 for (i = 0; i < env->envp_idx; i++)
2401 len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
2402out:
2403 kfree(env);
2404 return len;
2405}
2406
2407static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
2408 const char *buf, size_t count)
2409{
2410 int rc;
2411
2412 rc = kobject_synth_uevent(&dev->kobj, buf, count);
2413
2414 if (rc) {
2415 dev_err(dev, "uevent: failed to send synthetic uevent\n");
2416 return rc;
2417 }
2418
2419 return count;
2420}
2421static DEVICE_ATTR_RW(uevent);
2422
2423static ssize_t online_show(struct device *dev, struct device_attribute *attr,
2424 char *buf)
2425{
2426 bool val;
2427
2428 device_lock(dev);
2429 val = !dev->offline;
2430 device_unlock(dev);
2431 return sysfs_emit(buf, "%u\n", val);
2432}
2433
2434static ssize_t online_store(struct device *dev, struct device_attribute *attr,
2435 const char *buf, size_t count)
2436{
2437 bool val;
2438 int ret;
2439
2440 ret = strtobool(buf, &val);
2441 if (ret < 0)
2442 return ret;
2443
2444 ret = lock_device_hotplug_sysfs();
2445 if (ret)
2446 return ret;
2447
2448 ret = val ? device_online(dev) : device_offline(dev);
2449 unlock_device_hotplug();
2450 return ret < 0 ? ret : count;
2451}
2452static DEVICE_ATTR_RW(online);
2453
2454static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
2455 char *buf)
2456{
2457 const char *loc;
2458
2459 switch (dev->removable) {
2460 case DEVICE_REMOVABLE:
2461 loc = "removable";
2462 break;
2463 case DEVICE_FIXED:
2464 loc = "fixed";
2465 break;
2466 default:
2467 loc = "unknown";
2468 }
2469 return sysfs_emit(buf, "%s\n", loc);
2470}
2471static DEVICE_ATTR_RO(removable);
2472
2473int device_add_groups(struct device *dev, const struct attribute_group **groups)
2474{
2475 return sysfs_create_groups(&dev->kobj, groups);
2476}
2477EXPORT_SYMBOL_GPL(device_add_groups);
2478
2479void device_remove_groups(struct device *dev,
2480 const struct attribute_group **groups)
2481{
2482 sysfs_remove_groups(&dev->kobj, groups);
2483}
2484EXPORT_SYMBOL_GPL(device_remove_groups);
2485
2486union device_attr_group_devres {
2487 const struct attribute_group *group;
2488 const struct attribute_group **groups;
2489};
2490
2491static int devm_attr_group_match(struct device *dev, void *res, void *data)
2492{
2493 return ((union device_attr_group_devres *)res)->group == data;
2494}
2495
2496static void devm_attr_group_remove(struct device *dev, void *res)
2497{
2498 union device_attr_group_devres *devres = res;
2499 const struct attribute_group *group = devres->group;
2500
2501 dev_dbg(dev, "%s: removing group %p\n", __func__, group);
2502 sysfs_remove_group(&dev->kobj, group);
2503}
2504
2505static void devm_attr_groups_remove(struct device *dev, void *res)
2506{
2507 union device_attr_group_devres *devres = res;
2508 const struct attribute_group **groups = devres->groups;
2509
2510 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
2511 sysfs_remove_groups(&dev->kobj, groups);
2512}
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
2525{
2526 union device_attr_group_devres *devres;
2527 int error;
2528
2529 devres = devres_alloc(devm_attr_group_remove,
2530 sizeof(*devres), GFP_KERNEL);
2531 if (!devres)
2532 return -ENOMEM;
2533
2534 error = sysfs_create_group(&dev->kobj, grp);
2535 if (error) {
2536 devres_free(devres);
2537 return error;
2538 }
2539
2540 devres->group = grp;
2541 devres_add(dev, devres);
2542 return 0;
2543}
2544EXPORT_SYMBOL_GPL(devm_device_add_group);
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554void devm_device_remove_group(struct device *dev,
2555 const struct attribute_group *grp)
2556{
2557 WARN_ON(devres_release(dev, devm_attr_group_remove,
2558 devm_attr_group_match,
2559 (void *)grp));
2560}
2561EXPORT_SYMBOL_GPL(devm_device_remove_group);
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576int devm_device_add_groups(struct device *dev,
2577 const struct attribute_group **groups)
2578{
2579 union device_attr_group_devres *devres;
2580 int error;
2581
2582 devres = devres_alloc(devm_attr_groups_remove,
2583 sizeof(*devres), GFP_KERNEL);
2584 if (!devres)
2585 return -ENOMEM;
2586
2587 error = sysfs_create_groups(&dev->kobj, groups);
2588 if (error) {
2589 devres_free(devres);
2590 return error;
2591 }
2592
2593 devres->groups = groups;
2594 devres_add(dev, devres);
2595 return 0;
2596}
2597EXPORT_SYMBOL_GPL(devm_device_add_groups);
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607void devm_device_remove_groups(struct device *dev,
2608 const struct attribute_group **groups)
2609{
2610 WARN_ON(devres_release(dev, devm_attr_groups_remove,
2611 devm_attr_group_match,
2612 (void *)groups));
2613}
2614EXPORT_SYMBOL_GPL(devm_device_remove_groups);
2615
2616static int device_add_attrs(struct device *dev)
2617{
2618 struct class *class = dev->class;
2619 const struct device_type *type = dev->type;
2620 int error;
2621
2622 if (class) {
2623 error = device_add_groups(dev, class->dev_groups);
2624 if (error)
2625 return error;
2626 }
2627
2628 if (type) {
2629 error = device_add_groups(dev, type->groups);
2630 if (error)
2631 goto err_remove_class_groups;
2632 }
2633
2634 error = device_add_groups(dev, dev->groups);
2635 if (error)
2636 goto err_remove_type_groups;
2637
2638 if (device_supports_offline(dev) && !dev->offline_disabled) {
2639 error = device_create_file(dev, &dev_attr_online);
2640 if (error)
2641 goto err_remove_dev_groups;
2642 }
2643
2644 if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) {
2645 error = device_create_file(dev, &dev_attr_waiting_for_supplier);
2646 if (error)
2647 goto err_remove_dev_online;
2648 }
2649
2650 if (dev_removable_is_valid(dev)) {
2651 error = device_create_file(dev, &dev_attr_removable);
2652 if (error)
2653 goto err_remove_dev_waiting_for_supplier;
2654 }
2655
2656 return 0;
2657
2658 err_remove_dev_waiting_for_supplier:
2659 device_remove_file(dev, &dev_attr_waiting_for_supplier);
2660 err_remove_dev_online:
2661 device_remove_file(dev, &dev_attr_online);
2662 err_remove_dev_groups:
2663 device_remove_groups(dev, dev->groups);
2664 err_remove_type_groups:
2665 if (type)
2666 device_remove_groups(dev, type->groups);
2667 err_remove_class_groups:
2668 if (class)
2669 device_remove_groups(dev, class->dev_groups);
2670
2671 return error;
2672}
2673
2674static void device_remove_attrs(struct device *dev)
2675{
2676 struct class *class = dev->class;
2677 const struct device_type *type = dev->type;
2678
2679 device_remove_file(dev, &dev_attr_removable);
2680 device_remove_file(dev, &dev_attr_waiting_for_supplier);
2681 device_remove_file(dev, &dev_attr_online);
2682 device_remove_groups(dev, dev->groups);
2683
2684 if (type)
2685 device_remove_groups(dev, type->groups);
2686
2687 if (class)
2688 device_remove_groups(dev, class->dev_groups);
2689}
2690
2691static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
2692 char *buf)
2693{
2694 return print_dev_t(buf, dev->devt);
2695}
2696static DEVICE_ATTR_RO(dev);
2697
2698
2699struct kset *devices_kset;
2700
2701
2702
2703
2704
2705
2706static void devices_kset_move_before(struct device *deva, struct device *devb)
2707{
2708 if (!devices_kset)
2709 return;
2710 pr_debug("devices_kset: Moving %s before %s\n",
2711 dev_name(deva), dev_name(devb));
2712 spin_lock(&devices_kset->list_lock);
2713 list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
2714 spin_unlock(&devices_kset->list_lock);
2715}
2716
2717
2718
2719
2720
2721
2722static void devices_kset_move_after(struct device *deva, struct device *devb)
2723{
2724 if (!devices_kset)
2725 return;
2726 pr_debug("devices_kset: Moving %s after %s\n",
2727 dev_name(deva), dev_name(devb));
2728 spin_lock(&devices_kset->list_lock);
2729 list_move(&deva->kobj.entry, &devb->kobj.entry);
2730 spin_unlock(&devices_kset->list_lock);
2731}
2732
2733
2734
2735
2736
2737void devices_kset_move_last(struct device *dev)
2738{
2739 if (!devices_kset)
2740 return;
2741 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
2742 spin_lock(&devices_kset->list_lock);
2743 list_move_tail(&dev->kobj.entry, &devices_kset->list);
2744 spin_unlock(&devices_kset->list_lock);
2745}
2746
2747
2748
2749
2750
2751
2752int device_create_file(struct device *dev,
2753 const struct device_attribute *attr)
2754{
2755 int error = 0;
2756
2757 if (dev) {
2758 WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
2759 "Attribute %s: write permission without 'store'\n",
2760 attr->attr.name);
2761 WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
2762 "Attribute %s: read permission without 'show'\n",
2763 attr->attr.name);
2764 error = sysfs_create_file(&dev->kobj, &attr->attr);
2765 }
2766
2767 return error;
2768}
2769EXPORT_SYMBOL_GPL(device_create_file);
2770
2771
2772
2773
2774
2775
2776void device_remove_file(struct device *dev,
2777 const struct device_attribute *attr)
2778{
2779 if (dev)
2780 sysfs_remove_file(&dev->kobj, &attr->attr);
2781}
2782EXPORT_SYMBOL_GPL(device_remove_file);
2783
2784
2785
2786
2787
2788
2789
2790
2791bool device_remove_file_self(struct device *dev,
2792 const struct device_attribute *attr)
2793{
2794 if (dev)
2795 return sysfs_remove_file_self(&dev->kobj, &attr->attr);
2796 else
2797 return false;
2798}
2799EXPORT_SYMBOL_GPL(device_remove_file_self);
2800
2801
2802
2803
2804
2805
2806int device_create_bin_file(struct device *dev,
2807 const struct bin_attribute *attr)
2808{
2809 int error = -EINVAL;
2810 if (dev)
2811 error = sysfs_create_bin_file(&dev->kobj, attr);
2812 return error;
2813}
2814EXPORT_SYMBOL_GPL(device_create_bin_file);
2815
2816
2817
2818
2819
2820
2821void device_remove_bin_file(struct device *dev,
2822 const struct bin_attribute *attr)
2823{
2824 if (dev)
2825 sysfs_remove_bin_file(&dev->kobj, attr);
2826}
2827EXPORT_SYMBOL_GPL(device_remove_bin_file);
2828
2829static void klist_children_get(struct klist_node *n)
2830{
2831 struct device_private *p = to_device_private_parent(n);
2832 struct device *dev = p->device;
2833
2834 get_device(dev);
2835}
2836
2837static void klist_children_put(struct klist_node *n)
2838{
2839 struct device_private *p = to_device_private_parent(n);
2840 struct device *dev = p->device;
2841
2842 put_device(dev);
2843}
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865void device_initialize(struct device *dev)
2866{
2867 dev->kobj.kset = devices_kset;
2868 kobject_init(&dev->kobj, &device_ktype);
2869 INIT_LIST_HEAD(&dev->dma_pools);
2870 mutex_init(&dev->mutex);
2871#ifdef CONFIG_PROVE_LOCKING
2872 mutex_init(&dev->lockdep_mutex);
2873#endif
2874 lockdep_set_novalidate_class(&dev->mutex);
2875 spin_lock_init(&dev->devres_lock);
2876 INIT_LIST_HEAD(&dev->devres_head);
2877 device_pm_init(dev);
2878 set_dev_node(dev, -1);
2879#ifdef CONFIG_GENERIC_MSI_IRQ
2880 raw_spin_lock_init(&dev->msi_lock);
2881 INIT_LIST_HEAD(&dev->msi_list);
2882#endif
2883 INIT_LIST_HEAD(&dev->links.consumers);
2884 INIT_LIST_HEAD(&dev->links.suppliers);
2885 INIT_LIST_HEAD(&dev->links.defer_sync);
2886 dev->links.status = DL_DEV_NO_DRIVER;
2887#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
2888 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
2889 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
2890 dev->dma_coherent = dma_default_coherent;
2891#endif
2892#ifdef CONFIG_SWIOTLB
2893 dev->dma_io_tlb_mem = &io_tlb_default_mem;
2894#endif
2895}
2896EXPORT_SYMBOL_GPL(device_initialize);
2897
2898struct kobject *virtual_device_parent(struct device *dev)
2899{
2900 static struct kobject *virtual_dir = NULL;
2901
2902 if (!virtual_dir)
2903 virtual_dir = kobject_create_and_add("virtual",
2904 &devices_kset->kobj);
2905
2906 return virtual_dir;
2907}
2908
2909struct class_dir {
2910 struct kobject kobj;
2911 struct class *class;
2912};
2913
2914#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
2915
2916static void class_dir_release(struct kobject *kobj)
2917{
2918 struct class_dir *dir = to_class_dir(kobj);
2919 kfree(dir);
2920}
2921
2922static const
2923struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
2924{
2925 struct class_dir *dir = to_class_dir(kobj);
2926 return dir->class->ns_type;
2927}
2928
2929static struct kobj_type class_dir_ktype = {
2930 .release = class_dir_release,
2931 .sysfs_ops = &kobj_sysfs_ops,
2932 .child_ns_type = class_dir_child_ns_type
2933};
2934
2935static struct kobject *
2936class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
2937{
2938 struct class_dir *dir;
2939 int retval;
2940
2941 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
2942 if (!dir)
2943 return ERR_PTR(-ENOMEM);
2944
2945 dir->class = class;
2946 kobject_init(&dir->kobj, &class_dir_ktype);
2947
2948 dir->kobj.kset = &class->p->glue_dirs;
2949
2950 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
2951 if (retval < 0) {
2952 kobject_put(&dir->kobj);
2953 return ERR_PTR(retval);
2954 }
2955 return &dir->kobj;
2956}
2957
2958static DEFINE_MUTEX(gdp_mutex);
2959
2960static struct kobject *get_device_parent(struct device *dev,
2961 struct device *parent)
2962{
2963 if (dev->class) {
2964 struct kobject *kobj = NULL;
2965 struct kobject *parent_kobj;
2966 struct kobject *k;
2967
2968#ifdef CONFIG_BLOCK
2969
2970 if (sysfs_deprecated && dev->class == &block_class) {
2971 if (parent && parent->class == &block_class)
2972 return &parent->kobj;
2973 return &block_class.p->subsys.kobj;
2974 }
2975#endif
2976
2977
2978
2979
2980
2981
2982 if (parent == NULL)
2983 parent_kobj = virtual_device_parent(dev);
2984 else if (parent->class && !dev->class->ns_type)
2985 return &parent->kobj;
2986 else
2987 parent_kobj = &parent->kobj;
2988
2989 mutex_lock(&gdp_mutex);
2990
2991
2992 spin_lock(&dev->class->p->glue_dirs.list_lock);
2993 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
2994 if (k->parent == parent_kobj) {
2995 kobj = kobject_get(k);
2996 break;
2997 }
2998 spin_unlock(&dev->class->p->glue_dirs.list_lock);
2999 if (kobj) {
3000 mutex_unlock(&gdp_mutex);
3001 return kobj;
3002 }
3003
3004
3005 k = class_dir_create_and_add(dev->class, parent_kobj);
3006
3007 mutex_unlock(&gdp_mutex);
3008 return k;
3009 }
3010
3011
3012 if (!parent && dev->bus && dev->bus->dev_root)
3013 return &dev->bus->dev_root->kobj;
3014
3015 if (parent)
3016 return &parent->kobj;
3017 return NULL;
3018}
3019
3020static inline bool live_in_glue_dir(struct kobject *kobj,
3021 struct device *dev)
3022{
3023 if (!kobj || !dev->class ||
3024 kobj->kset != &dev->class->p->glue_dirs)
3025 return false;
3026 return true;
3027}
3028
3029static inline struct kobject *get_glue_dir(struct device *dev)
3030{
3031 return dev->kobj.parent;
3032}
3033
3034
3035
3036
3037
3038
3039static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
3040{
3041 unsigned int ref;
3042
3043
3044 if (!live_in_glue_dir(glue_dir, dev))
3045 return;
3046
3047 mutex_lock(&gdp_mutex);
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096 ref = kref_read(&glue_dir->kref);
3097 if (!kobject_has_children(glue_dir) && !--ref)
3098 kobject_del(glue_dir);
3099 kobject_put(glue_dir);
3100 mutex_unlock(&gdp_mutex);
3101}
3102
3103static int device_add_class_symlinks(struct device *dev)
3104{
3105 struct device_node *of_node = dev_of_node(dev);
3106 int error;
3107
3108 if (of_node) {
3109 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
3110 if (error)
3111 dev_warn(dev, "Error %d creating of_node link\n",error);
3112
3113 }
3114
3115 if (!dev->class)
3116 return 0;
3117
3118 error = sysfs_create_link(&dev->kobj,
3119 &dev->class->p->subsys.kobj,
3120 "subsystem");
3121 if (error)
3122 goto out_devnode;
3123
3124 if (dev->parent && device_is_not_partition(dev)) {
3125 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
3126 "device");
3127 if (error)
3128 goto out_subsys;
3129 }
3130
3131#ifdef CONFIG_BLOCK
3132
3133 if (sysfs_deprecated && dev->class == &block_class)
3134 return 0;
3135#endif
3136
3137
3138 error = sysfs_create_link(&dev->class->p->subsys.kobj,
3139 &dev->kobj, dev_name(dev));
3140 if (error)
3141 goto out_device;
3142
3143 return 0;
3144
3145out_device:
3146 sysfs_remove_link(&dev->kobj, "device");
3147
3148out_subsys:
3149 sysfs_remove_link(&dev->kobj, "subsystem");
3150out_devnode:
3151 sysfs_remove_link(&dev->kobj, "of_node");
3152 return error;
3153}
3154
3155static void device_remove_class_symlinks(struct device *dev)
3156{
3157 if (dev_of_node(dev))
3158 sysfs_remove_link(&dev->kobj, "of_node");
3159
3160 if (!dev->class)
3161 return;
3162
3163 if (dev->parent && device_is_not_partition(dev))
3164 sysfs_remove_link(&dev->kobj, "device");
3165 sysfs_remove_link(&dev->kobj, "subsystem");
3166#ifdef CONFIG_BLOCK
3167 if (sysfs_deprecated && dev->class == &block_class)
3168 return;
3169#endif
3170 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
3171}
3172
3173
3174
3175
3176
3177
3178int dev_set_name(struct device *dev, const char *fmt, ...)
3179{
3180 va_list vargs;
3181 int err;
3182
3183 va_start(vargs, fmt);
3184 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
3185 va_end(vargs);
3186 return err;
3187}
3188EXPORT_SYMBOL_GPL(dev_set_name);
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201static struct kobject *device_to_dev_kobj(struct device *dev)
3202{
3203 struct kobject *kobj;
3204
3205 if (dev->class)
3206 kobj = dev->class->dev_kobj;
3207 else
3208 kobj = sysfs_dev_char_kobj;
3209
3210 return kobj;
3211}
3212
3213static int device_create_sys_dev_entry(struct device *dev)
3214{
3215 struct kobject *kobj = device_to_dev_kobj(dev);
3216 int error = 0;
3217 char devt_str[15];
3218
3219 if (kobj) {
3220 format_dev_t(devt_str, dev->devt);
3221 error = sysfs_create_link(kobj, &dev->kobj, devt_str);
3222 }
3223
3224 return error;
3225}
3226
3227static void device_remove_sys_dev_entry(struct device *dev)
3228{
3229 struct kobject *kobj = device_to_dev_kobj(dev);
3230 char devt_str[15];
3231
3232 if (kobj) {
3233 format_dev_t(devt_str, dev->devt);
3234 sysfs_remove_link(kobj, devt_str);
3235 }
3236}
3237
3238static int device_private_init(struct device *dev)
3239{
3240 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
3241 if (!dev->p)
3242 return -ENOMEM;
3243 dev->p->device = dev;
3244 klist_init(&dev->p->klist_children, klist_children_get,
3245 klist_children_put);
3246 INIT_LIST_HEAD(&dev->p->deferred_probe);
3247 return 0;
3248}
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277int device_add(struct device *dev)
3278{
3279 struct device *parent;
3280 struct kobject *kobj;
3281 struct class_interface *class_intf;
3282 int error = -EINVAL;
3283 struct kobject *glue_dir = NULL;
3284
3285 dev = get_device(dev);
3286 if (!dev)
3287 goto done;
3288
3289 if (!dev->p) {
3290 error = device_private_init(dev);
3291 if (error)
3292 goto done;
3293 }
3294
3295
3296
3297
3298
3299
3300 if (dev->init_name) {
3301 dev_set_name(dev, "%s", dev->init_name);
3302 dev->init_name = NULL;
3303 }
3304
3305
3306 if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
3307 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
3308
3309 if (!dev_name(dev)) {
3310 error = -EINVAL;
3311 goto name_error;
3312 }
3313
3314 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3315
3316 parent = get_device(dev->parent);
3317 kobj = get_device_parent(dev, parent);
3318 if (IS_ERR(kobj)) {
3319 error = PTR_ERR(kobj);
3320 goto parent_error;
3321 }
3322 if (kobj)
3323 dev->kobj.parent = kobj;
3324
3325
3326 if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
3327 set_dev_node(dev, dev_to_node(parent));
3328
3329
3330
3331 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
3332 if (error) {
3333 glue_dir = get_glue_dir(dev);
3334 goto Error;
3335 }
3336
3337
3338 device_platform_notify(dev);
3339
3340 error = device_create_file(dev, &dev_attr_uevent);
3341 if (error)
3342 goto attrError;
3343
3344 error = device_add_class_symlinks(dev);
3345 if (error)
3346 goto SymlinkError;
3347 error = device_add_attrs(dev);
3348 if (error)
3349 goto AttrsError;
3350 error = bus_add_device(dev);
3351 if (error)
3352 goto BusError;
3353 error = dpm_sysfs_add(dev);
3354 if (error)
3355 goto DPMError;
3356 device_pm_add(dev);
3357
3358 if (MAJOR(dev->devt)) {
3359 error = device_create_file(dev, &dev_attr_dev);
3360 if (error)
3361 goto DevAttrError;
3362
3363 error = device_create_sys_dev_entry(dev);
3364 if (error)
3365 goto SysEntryError;
3366
3367 devtmpfs_create_node(dev);
3368 }
3369
3370
3371
3372
3373 if (dev->bus)
3374 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3375 BUS_NOTIFY_ADD_DEVICE, dev);
3376
3377 kobject_uevent(&dev->kobj, KOBJ_ADD);
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391 if (dev->fwnode && !dev->fwnode->dev) {
3392 dev->fwnode->dev = dev;
3393 fw_devlink_link_device(dev);
3394 }
3395
3396 bus_probe_device(dev);
3397
3398
3399
3400
3401
3402
3403 if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match)
3404 fw_devlink_unblock_consumers(dev);
3405
3406 if (parent)
3407 klist_add_tail(&dev->p->knode_parent,
3408 &parent->p->klist_children);
3409
3410 if (dev->class) {
3411 mutex_lock(&dev->class->p->mutex);
3412
3413 klist_add_tail(&dev->p->knode_class,
3414 &dev->class->p->klist_devices);
3415
3416
3417 list_for_each_entry(class_intf,
3418 &dev->class->p->interfaces, node)
3419 if (class_intf->add_dev)
3420 class_intf->add_dev(dev, class_intf);
3421 mutex_unlock(&dev->class->p->mutex);
3422 }
3423done:
3424 put_device(dev);
3425 return error;
3426 SysEntryError:
3427 if (MAJOR(dev->devt))
3428 device_remove_file(dev, &dev_attr_dev);
3429 DevAttrError:
3430 device_pm_remove(dev);
3431 dpm_sysfs_remove(dev);
3432 DPMError:
3433 bus_remove_device(dev);
3434 BusError:
3435 device_remove_attrs(dev);
3436 AttrsError:
3437 device_remove_class_symlinks(dev);
3438 SymlinkError:
3439 device_remove_file(dev, &dev_attr_uevent);
3440 attrError:
3441 device_platform_notify_remove(dev);
3442 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3443 glue_dir = get_glue_dir(dev);
3444 kobject_del(&dev->kobj);
3445 Error:
3446 cleanup_glue_dir(dev, glue_dir);
3447parent_error:
3448 put_device(parent);
3449name_error:
3450 kfree(dev->p);
3451 dev->p = NULL;
3452 goto done;
3453}
3454EXPORT_SYMBOL_GPL(device_add);
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474int device_register(struct device *dev)
3475{
3476 device_initialize(dev);
3477 return device_add(dev);
3478}
3479EXPORT_SYMBOL_GPL(device_register);
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489struct device *get_device(struct device *dev)
3490{
3491 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
3492}
3493EXPORT_SYMBOL_GPL(get_device);
3494
3495
3496
3497
3498
3499void put_device(struct device *dev)
3500{
3501
3502 if (dev)
3503 kobject_put(&dev->kobj);
3504}
3505EXPORT_SYMBOL_GPL(put_device);
3506
3507bool kill_device(struct device *dev)
3508{
3509
3510
3511
3512
3513
3514
3515
3516 device_lock_assert(dev);
3517
3518 if (dev->p->dead)
3519 return false;
3520 dev->p->dead = true;
3521 return true;
3522}
3523EXPORT_SYMBOL_GPL(kill_device);
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538void device_del(struct device *dev)
3539{
3540 struct device *parent = dev->parent;
3541 struct kobject *glue_dir = NULL;
3542 struct class_interface *class_intf;
3543 unsigned int noio_flag;
3544
3545 device_lock(dev);
3546 kill_device(dev);
3547 device_unlock(dev);
3548
3549 if (dev->fwnode && dev->fwnode->dev == dev)
3550 dev->fwnode->dev = NULL;
3551
3552
3553
3554
3555 noio_flag = memalloc_noio_save();
3556 if (dev->bus)
3557 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3558 BUS_NOTIFY_DEL_DEVICE, dev);
3559
3560 dpm_sysfs_remove(dev);
3561 if (parent)
3562 klist_del(&dev->p->knode_parent);
3563 if (MAJOR(dev->devt)) {
3564 devtmpfs_delete_node(dev);
3565 device_remove_sys_dev_entry(dev);
3566 device_remove_file(dev, &dev_attr_dev);
3567 }
3568 if (dev->class) {
3569 device_remove_class_symlinks(dev);
3570
3571 mutex_lock(&dev->class->p->mutex);
3572
3573 list_for_each_entry(class_intf,
3574 &dev->class->p->interfaces, node)
3575 if (class_intf->remove_dev)
3576 class_intf->remove_dev(dev, class_intf);
3577
3578 klist_del(&dev->p->knode_class);
3579 mutex_unlock(&dev->class->p->mutex);
3580 }
3581 device_remove_file(dev, &dev_attr_uevent);
3582 device_remove_attrs(dev);
3583 bus_remove_device(dev);
3584 device_pm_remove(dev);
3585 driver_deferred_probe_del(dev);
3586 device_platform_notify_remove(dev);
3587 device_remove_properties(dev);
3588 device_links_purge(dev);
3589
3590 if (dev->bus)
3591 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3592 BUS_NOTIFY_REMOVED_DEVICE, dev);
3593 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3594 glue_dir = get_glue_dir(dev);
3595 kobject_del(&dev->kobj);
3596 cleanup_glue_dir(dev, glue_dir);
3597 memalloc_noio_restore(noio_flag);
3598 put_device(parent);
3599}
3600EXPORT_SYMBOL_GPL(device_del);
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613void device_unregister(struct device *dev)
3614{
3615 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3616 device_del(dev);
3617 put_device(dev);
3618}
3619EXPORT_SYMBOL_GPL(device_unregister);
3620
3621static struct device *prev_device(struct klist_iter *i)
3622{
3623 struct klist_node *n = klist_prev(i);
3624 struct device *dev = NULL;
3625 struct device_private *p;
3626
3627 if (n) {
3628 p = to_device_private_parent(n);
3629 dev = p->device;
3630 }
3631 return dev;
3632}
3633
3634static struct device *next_device(struct klist_iter *i)
3635{
3636 struct klist_node *n = klist_next(i);
3637 struct device *dev = NULL;
3638 struct device_private *p;
3639
3640 if (n) {
3641 p = to_device_private_parent(n);
3642 dev = p->device;
3643 }
3644 return dev;
3645}
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660const char *device_get_devnode(struct device *dev,
3661 umode_t *mode, kuid_t *uid, kgid_t *gid,
3662 const char **tmp)
3663{
3664 char *s;
3665
3666 *tmp = NULL;
3667
3668
3669 if (dev->type && dev->type->devnode)
3670 *tmp = dev->type->devnode(dev, mode, uid, gid);
3671 if (*tmp)
3672 return *tmp;
3673
3674
3675 if (dev->class && dev->class->devnode)
3676 *tmp = dev->class->devnode(dev, mode);
3677 if (*tmp)
3678 return *tmp;
3679
3680
3681 if (strchr(dev_name(dev), '!') == NULL)
3682 return dev_name(dev);
3683
3684
3685 s = kstrdup(dev_name(dev), GFP_KERNEL);
3686 if (!s)
3687 return NULL;
3688 strreplace(s, '!', '/');
3689 return *tmp = s;
3690}
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704int device_for_each_child(struct device *parent, void *data,
3705 int (*fn)(struct device *dev, void *data))
3706{
3707 struct klist_iter i;
3708 struct device *child;
3709 int error = 0;
3710
3711 if (!parent->p)
3712 return 0;
3713
3714 klist_iter_init(&parent->p->klist_children, &i);
3715 while (!error && (child = next_device(&i)))
3716 error = fn(child, data);
3717 klist_iter_exit(&i);
3718 return error;
3719}
3720EXPORT_SYMBOL_GPL(device_for_each_child);
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734int device_for_each_child_reverse(struct device *parent, void *data,
3735 int (*fn)(struct device *dev, void *data))
3736{
3737 struct klist_iter i;
3738 struct device *child;
3739 int error = 0;
3740
3741 if (!parent->p)
3742 return 0;
3743
3744 klist_iter_init(&parent->p->klist_children, &i);
3745 while ((child = prev_device(&i)) && !error)
3746 error = fn(child, data);
3747 klist_iter_exit(&i);
3748 return error;
3749}
3750EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769struct device *device_find_child(struct device *parent, void *data,
3770 int (*match)(struct device *dev, void *data))
3771{
3772 struct klist_iter i;
3773 struct device *child;
3774
3775 if (!parent)
3776 return NULL;
3777
3778 klist_iter_init(&parent->p->klist_children, &i);
3779 while ((child = next_device(&i)))
3780 if (match(child, data) && get_device(child))
3781 break;
3782 klist_iter_exit(&i);
3783 return child;
3784}
3785EXPORT_SYMBOL_GPL(device_find_child);
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797struct device *device_find_child_by_name(struct device *parent,
3798 const char *name)
3799{
3800 struct klist_iter i;
3801 struct device *child;
3802
3803 if (!parent)
3804 return NULL;
3805
3806 klist_iter_init(&parent->p->klist_children, &i);
3807 while ((child = next_device(&i)))
3808 if (sysfs_streq(dev_name(child), name) && get_device(child))
3809 break;
3810 klist_iter_exit(&i);
3811 return child;
3812}
3813EXPORT_SYMBOL_GPL(device_find_child_by_name);
3814
3815int __init devices_init(void)
3816{
3817 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
3818 if (!devices_kset)
3819 return -ENOMEM;
3820 dev_kobj = kobject_create_and_add("dev", NULL);
3821 if (!dev_kobj)
3822 goto dev_kobj_err;
3823 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
3824 if (!sysfs_dev_block_kobj)
3825 goto block_kobj_err;
3826 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
3827 if (!sysfs_dev_char_kobj)
3828 goto char_kobj_err;
3829
3830 return 0;
3831
3832 char_kobj_err:
3833 kobject_put(sysfs_dev_block_kobj);
3834 block_kobj_err:
3835 kobject_put(dev_kobj);
3836 dev_kobj_err:
3837 kset_unregister(devices_kset);
3838 return -ENOMEM;
3839}
3840
3841static int device_check_offline(struct device *dev, void *not_used)
3842{
3843 int ret;
3844
3845 ret = device_for_each_child(dev, NULL, device_check_offline);
3846 if (ret)
3847 return ret;
3848
3849 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
3850}
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863int device_offline(struct device *dev)
3864{
3865 int ret;
3866
3867 if (dev->offline_disabled)
3868 return -EPERM;
3869
3870 ret = device_for_each_child(dev, NULL, device_check_offline);
3871 if (ret)
3872 return ret;
3873
3874 device_lock(dev);
3875 if (device_supports_offline(dev)) {
3876 if (dev->offline) {
3877 ret = 1;
3878 } else {
3879 ret = dev->bus->offline(dev);
3880 if (!ret) {
3881 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
3882 dev->offline = true;
3883 }
3884 }
3885 }
3886 device_unlock(dev);
3887
3888 return ret;
3889}
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901int device_online(struct device *dev)
3902{
3903 int ret = 0;
3904
3905 device_lock(dev);
3906 if (device_supports_offline(dev)) {
3907 if (dev->offline) {
3908 ret = dev->bus->online(dev);
3909 if (!ret) {
3910 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
3911 dev->offline = false;
3912 }
3913 } else {
3914 ret = 1;
3915 }
3916 }
3917 device_unlock(dev);
3918
3919 return ret;
3920}
3921
3922struct root_device {
3923 struct device dev;
3924 struct module *owner;
3925};
3926
3927static inline struct root_device *to_root_device(struct device *d)
3928{
3929 return container_of(d, struct root_device, dev);
3930}
3931
3932static void root_device_release(struct device *dev)
3933{
3934 kfree(to_root_device(dev));
3935}
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959struct device *__root_device_register(const char *name, struct module *owner)
3960{
3961 struct root_device *root;
3962 int err = -ENOMEM;
3963
3964 root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
3965 if (!root)
3966 return ERR_PTR(err);
3967
3968 err = dev_set_name(&root->dev, "%s", name);
3969 if (err) {
3970 kfree(root);
3971 return ERR_PTR(err);
3972 }
3973
3974 root->dev.release = root_device_release;
3975
3976 err = device_register(&root->dev);
3977 if (err) {
3978 put_device(&root->dev);
3979 return ERR_PTR(err);
3980 }
3981
3982#ifdef CONFIG_MODULES
3983 if (owner) {
3984 struct module_kobject *mk = &owner->mkobj;
3985
3986 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
3987 if (err) {
3988 device_unregister(&root->dev);
3989 return ERR_PTR(err);
3990 }
3991 root->owner = owner;
3992 }
3993#endif
3994
3995 return &root->dev;
3996}
3997EXPORT_SYMBOL_GPL(__root_device_register);
3998
3999
4000
4001
4002
4003
4004
4005
4006void root_device_unregister(struct device *dev)
4007{
4008 struct root_device *root = to_root_device(dev);
4009
4010 if (root->owner)
4011 sysfs_remove_link(&root->dev.kobj, "module");
4012
4013 device_unregister(dev);
4014}
4015EXPORT_SYMBOL_GPL(root_device_unregister);
4016
4017
4018static void device_create_release(struct device *dev)
4019{
4020 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
4021 kfree(dev);
4022}
4023
4024static __printf(6, 0) struct device *
4025device_create_groups_vargs(struct class *class, struct device *parent,
4026 dev_t devt, void *drvdata,
4027 const struct attribute_group **groups,
4028 const char *fmt, va_list args)
4029{
4030 struct device *dev = NULL;
4031 int retval = -ENODEV;
4032
4033 if (class == NULL || IS_ERR(class))
4034 goto error;
4035
4036 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
4037 if (!dev) {
4038 retval = -ENOMEM;
4039 goto error;
4040 }
4041
4042 device_initialize(dev);
4043 dev->devt = devt;
4044 dev->class = class;
4045 dev->parent = parent;
4046 dev->groups = groups;
4047 dev->release = device_create_release;
4048 dev_set_drvdata(dev, drvdata);
4049
4050 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
4051 if (retval)
4052 goto error;
4053
4054 retval = device_add(dev);
4055 if (retval)
4056 goto error;
4057
4058 return dev;
4059
4060error:
4061 put_device(dev);
4062 return ERR_PTR(retval);
4063}
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089struct device *device_create(struct class *class, struct device *parent,
4090 dev_t devt, void *drvdata, const char *fmt, ...)
4091{
4092 va_list vargs;
4093 struct device *dev;
4094
4095 va_start(vargs, fmt);
4096 dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
4097 fmt, vargs);
4098 va_end(vargs);
4099 return dev;
4100}
4101EXPORT_SYMBOL_GPL(device_create);
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130struct device *device_create_with_groups(struct class *class,
4131 struct device *parent, dev_t devt,
4132 void *drvdata,
4133 const struct attribute_group **groups,
4134 const char *fmt, ...)
4135{
4136 va_list vargs;
4137 struct device *dev;
4138
4139 va_start(vargs, fmt);
4140 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
4141 fmt, vargs);
4142 va_end(vargs);
4143 return dev;
4144}
4145EXPORT_SYMBOL_GPL(device_create_with_groups);
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155void device_destroy(struct class *class, dev_t devt)
4156{
4157 struct device *dev;
4158
4159 dev = class_find_device_by_devt(class, devt);
4160 if (dev) {
4161 put_device(dev);
4162 device_unregister(dev);
4163 }
4164}
4165EXPORT_SYMBOL_GPL(device_destroy);
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206int device_rename(struct device *dev, const char *new_name)
4207{
4208 struct kobject *kobj = &dev->kobj;
4209 char *old_device_name = NULL;
4210 int error;
4211
4212 dev = get_device(dev);
4213 if (!dev)
4214 return -EINVAL;
4215
4216 dev_dbg(dev, "renaming to %s\n", new_name);
4217
4218 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
4219 if (!old_device_name) {
4220 error = -ENOMEM;
4221 goto out;
4222 }
4223
4224 if (dev->class) {
4225 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
4226 kobj, old_device_name,
4227 new_name, kobject_namespace(kobj));
4228 if (error)
4229 goto out;
4230 }
4231
4232 error = kobject_rename(kobj, new_name);
4233 if (error)
4234 goto out;
4235
4236out:
4237 put_device(dev);
4238
4239 kfree(old_device_name);
4240
4241 return error;
4242}
4243EXPORT_SYMBOL_GPL(device_rename);
4244
4245static int device_move_class_links(struct device *dev,
4246 struct device *old_parent,
4247 struct device *new_parent)
4248{
4249 int error = 0;
4250
4251 if (old_parent)
4252 sysfs_remove_link(&dev->kobj, "device");
4253 if (new_parent)
4254 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
4255 "device");
4256 return error;
4257}
4258
4259
4260
4261
4262
4263
4264
4265int device_move(struct device *dev, struct device *new_parent,
4266 enum dpm_order dpm_order)
4267{
4268 int error;
4269 struct device *old_parent;
4270 struct kobject *new_parent_kobj;
4271
4272 dev = get_device(dev);
4273 if (!dev)
4274 return -EINVAL;
4275
4276 device_pm_lock();
4277 new_parent = get_device(new_parent);
4278 new_parent_kobj = get_device_parent(dev, new_parent);
4279 if (IS_ERR(new_parent_kobj)) {
4280 error = PTR_ERR(new_parent_kobj);
4281 put_device(new_parent);
4282 goto out;
4283 }
4284
4285 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
4286 __func__, new_parent ? dev_name(new_parent) : "<NULL>");
4287 error = kobject_move(&dev->kobj, new_parent_kobj);
4288 if (error) {
4289 cleanup_glue_dir(dev, new_parent_kobj);
4290 put_device(new_parent);
4291 goto out;
4292 }
4293 old_parent = dev->parent;
4294 dev->parent = new_parent;
4295 if (old_parent)
4296 klist_remove(&dev->p->knode_parent);
4297 if (new_parent) {
4298 klist_add_tail(&dev->p->knode_parent,
4299 &new_parent->p->klist_children);
4300 set_dev_node(dev, dev_to_node(new_parent));
4301 }
4302
4303 if (dev->class) {
4304 error = device_move_class_links(dev, old_parent, new_parent);
4305 if (error) {
4306
4307 device_move_class_links(dev, new_parent, old_parent);
4308 if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
4309 if (new_parent)
4310 klist_remove(&dev->p->knode_parent);
4311 dev->parent = old_parent;
4312 if (old_parent) {
4313 klist_add_tail(&dev->p->knode_parent,
4314 &old_parent->p->klist_children);
4315 set_dev_node(dev, dev_to_node(old_parent));
4316 }
4317 }
4318 cleanup_glue_dir(dev, new_parent_kobj);
4319 put_device(new_parent);
4320 goto out;
4321 }
4322 }
4323 switch (dpm_order) {
4324 case DPM_ORDER_NONE:
4325 break;
4326 case DPM_ORDER_DEV_AFTER_PARENT:
4327 device_pm_move_after(dev, new_parent);
4328 devices_kset_move_after(dev, new_parent);
4329 break;
4330 case DPM_ORDER_PARENT_BEFORE_DEV:
4331 device_pm_move_before(new_parent, dev);
4332 devices_kset_move_before(new_parent, dev);
4333 break;
4334 case DPM_ORDER_DEV_LAST:
4335 device_pm_move_last(dev);
4336 devices_kset_move_last(dev);
4337 break;
4338 }
4339
4340 put_device(old_parent);
4341out:
4342 device_pm_unlock();
4343 put_device(dev);
4344 return error;
4345}
4346EXPORT_SYMBOL_GPL(device_move);
4347
4348static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
4349 kgid_t kgid)
4350{
4351 struct kobject *kobj = &dev->kobj;
4352 struct class *class = dev->class;
4353 const struct device_type *type = dev->type;
4354 int error;
4355
4356 if (class) {
4357
4358
4359
4360
4361 error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
4362 kgid);
4363 if (error)
4364 return error;
4365 }
4366
4367 if (type) {
4368
4369
4370
4371
4372 error = sysfs_groups_change_owner(kobj, type->groups, kuid,
4373 kgid);
4374 if (error)
4375 return error;
4376 }
4377
4378
4379 error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
4380 if (error)
4381 return error;
4382
4383 if (device_supports_offline(dev) && !dev->offline_disabled) {
4384
4385 error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
4386 kuid, kgid);
4387 if (error)
4388 return error;
4389 }
4390
4391 return 0;
4392}
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
4407{
4408 int error;
4409 struct kobject *kobj = &dev->kobj;
4410
4411 dev = get_device(dev);
4412 if (!dev)
4413 return -EINVAL;
4414
4415
4416
4417
4418
4419 error = sysfs_change_owner(kobj, kuid, kgid);
4420 if (error)
4421 goto out;
4422
4423
4424
4425
4426
4427
4428 error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
4429 kgid);
4430 if (error)
4431 goto out;
4432
4433
4434
4435
4436
4437
4438 error = device_attrs_change_owner(dev, kuid, kgid);
4439 if (error)
4440 goto out;
4441
4442 error = dpm_sysfs_change_owner(dev, kuid, kgid);
4443 if (error)
4444 goto out;
4445
4446#ifdef CONFIG_BLOCK
4447 if (sysfs_deprecated && dev->class == &block_class)
4448 goto out;
4449#endif
4450
4451
4452
4453
4454
4455
4456
4457 error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj,
4458 dev_name(dev), kuid, kgid);
4459 if (error)
4460 goto out;
4461
4462out:
4463 put_device(dev);
4464 return error;
4465}
4466EXPORT_SYMBOL_GPL(device_change_owner);
4467
4468
4469
4470
4471void device_shutdown(void)
4472{
4473 struct device *dev, *parent;
4474
4475 wait_for_device_probe();
4476 device_block_probing();
4477
4478 cpufreq_suspend();
4479
4480 spin_lock(&devices_kset->list_lock);
4481
4482
4483
4484
4485
4486 while (!list_empty(&devices_kset->list)) {
4487 dev = list_entry(devices_kset->list.prev, struct device,
4488 kobj.entry);
4489
4490
4491
4492
4493
4494
4495 parent = get_device(dev->parent);
4496 get_device(dev);
4497
4498
4499
4500
4501 list_del_init(&dev->kobj.entry);
4502 spin_unlock(&devices_kset->list_lock);
4503
4504
4505 if (parent)
4506 device_lock(parent);
4507 device_lock(dev);
4508
4509
4510 pm_runtime_get_noresume(dev);
4511 pm_runtime_barrier(dev);
4512
4513 if (dev->class && dev->class->shutdown_pre) {
4514 if (initcall_debug)
4515 dev_info(dev, "shutdown_pre\n");
4516 dev->class->shutdown_pre(dev);
4517 }
4518 if (dev->bus && dev->bus->shutdown) {
4519 if (initcall_debug)
4520 dev_info(dev, "shutdown\n");
4521 dev->bus->shutdown(dev);
4522 } else if (dev->driver && dev->driver->shutdown) {
4523 if (initcall_debug)
4524 dev_info(dev, "shutdown\n");
4525 dev->driver->shutdown(dev);
4526 }
4527
4528 device_unlock(dev);
4529 if (parent)
4530 device_unlock(parent);
4531
4532 put_device(dev);
4533 put_device(parent);
4534
4535 spin_lock(&devices_kset->list_lock);
4536 }
4537 spin_unlock(&devices_kset->list_lock);
4538}
4539
4540
4541
4542
4543
4544#ifdef CONFIG_PRINTK
4545static void
4546set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
4547{
4548 const char *subsys;
4549
4550 memset(dev_info, 0, sizeof(*dev_info));
4551
4552 if (dev->class)
4553 subsys = dev->class->name;
4554 else if (dev->bus)
4555 subsys = dev->bus->name;
4556 else
4557 return;
4558
4559 strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
4560
4561
4562
4563
4564
4565
4566
4567
4568 if (MAJOR(dev->devt)) {
4569 char c;
4570
4571 if (strcmp(subsys, "block") == 0)
4572 c = 'b';
4573 else
4574 c = 'c';
4575
4576 snprintf(dev_info->device, sizeof(dev_info->device),
4577 "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
4578 } else if (strcmp(subsys, "net") == 0) {
4579 struct net_device *net = to_net_dev(dev);
4580
4581 snprintf(dev_info->device, sizeof(dev_info->device),
4582 "n%u", net->ifindex);
4583 } else {
4584 snprintf(dev_info->device, sizeof(dev_info->device),
4585 "+%s:%s", subsys, dev_name(dev));
4586 }
4587}
4588
4589int dev_vprintk_emit(int level, const struct device *dev,
4590 const char *fmt, va_list args)
4591{
4592 struct dev_printk_info dev_info;
4593
4594 set_dev_info(dev, &dev_info);
4595
4596 return vprintk_emit(0, level, &dev_info, fmt, args);
4597}
4598EXPORT_SYMBOL(dev_vprintk_emit);
4599
4600int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
4601{
4602 va_list args;
4603 int r;
4604
4605 va_start(args, fmt);
4606
4607 r = dev_vprintk_emit(level, dev, fmt, args);
4608
4609 va_end(args);
4610
4611 return r;
4612}
4613EXPORT_SYMBOL(dev_printk_emit);
4614
4615static void __dev_printk(const char *level, const struct device *dev,
4616 struct va_format *vaf)
4617{
4618 if (dev)
4619 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
4620 dev_driver_string(dev), dev_name(dev), vaf);
4621 else
4622 printk("%s(NULL device *): %pV", level, vaf);
4623}
4624
4625void _dev_printk(const char *level, const struct device *dev,
4626 const char *fmt, ...)
4627{
4628 struct va_format vaf;
4629 va_list args;
4630
4631 va_start(args, fmt);
4632
4633 vaf.fmt = fmt;
4634 vaf.va = &args;
4635
4636 __dev_printk(level, dev, &vaf);
4637
4638 va_end(args);
4639}
4640EXPORT_SYMBOL(_dev_printk);
4641
4642#define define_dev_printk_level(func, kern_level) \
4643void func(const struct device *dev, const char *fmt, ...) \
4644{ \
4645 struct va_format vaf; \
4646 va_list args; \
4647 \
4648 va_start(args, fmt); \
4649 \
4650 vaf.fmt = fmt; \
4651 vaf.va = &args; \
4652 \
4653 __dev_printk(kern_level, dev, &vaf); \
4654 \
4655 va_end(args); \
4656} \
4657EXPORT_SYMBOL(func);
4658
4659define_dev_printk_level(_dev_emerg, KERN_EMERG);
4660define_dev_printk_level(_dev_alert, KERN_ALERT);
4661define_dev_printk_level(_dev_crit, KERN_CRIT);
4662define_dev_printk_level(_dev_err, KERN_ERR);
4663define_dev_printk_level(_dev_warn, KERN_WARNING);
4664define_dev_printk_level(_dev_notice, KERN_NOTICE);
4665define_dev_printk_level(_dev_info, KERN_INFO);
4666
4667#endif
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
4697{
4698 struct va_format vaf;
4699 va_list args;
4700
4701 va_start(args, fmt);
4702 vaf.fmt = fmt;
4703 vaf.va = &args;
4704
4705 if (err != -EPROBE_DEFER) {
4706 dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4707 } else {
4708 device_set_deferred_probe_reason(dev, &vaf);
4709 dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4710 }
4711
4712 va_end(args);
4713
4714 return err;
4715}
4716EXPORT_SYMBOL_GPL(dev_err_probe);
4717
4718static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
4719{
4720 return fwnode && !IS_ERR(fwnode->secondary);
4721}
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4738{
4739 struct device *parent = dev->parent;
4740 struct fwnode_handle *fn = dev->fwnode;
4741
4742 if (fwnode) {
4743 if (fwnode_is_primary(fn))
4744 fn = fn->secondary;
4745
4746 if (fn) {
4747 WARN_ON(fwnode->secondary);
4748 fwnode->secondary = fn;
4749 }
4750 dev->fwnode = fwnode;
4751 } else {
4752 if (fwnode_is_primary(fn)) {
4753 dev->fwnode = fn->secondary;
4754
4755 if (!(parent && fn == parent->fwnode))
4756 fn->secondary = NULL;
4757 } else {
4758 dev->fwnode = NULL;
4759 }
4760 }
4761}
4762EXPORT_SYMBOL_GPL(set_primary_fwnode);
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4774{
4775 if (fwnode)
4776 fwnode->secondary = ERR_PTR(-ENODEV);
4777
4778 if (fwnode_is_primary(dev->fwnode))
4779 dev->fwnode->secondary = fwnode;
4780 else
4781 dev->fwnode = fwnode;
4782}
4783EXPORT_SYMBOL_GPL(set_secondary_fwnode);
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
4794{
4795 of_node_put(dev->of_node);
4796 dev->of_node = of_node_get(dev2->of_node);
4797 dev->of_node_reused = true;
4798}
4799EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
4800
4801void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
4802{
4803 dev->fwnode = fwnode;
4804 dev->of_node = to_of_node(fwnode);
4805}
4806EXPORT_SYMBOL_GPL(device_set_node);
4807
4808int device_match_name(struct device *dev, const void *name)
4809{
4810 return sysfs_streq(dev_name(dev), name);
4811}
4812EXPORT_SYMBOL_GPL(device_match_name);
4813
4814int device_match_of_node(struct device *dev, const void *np)
4815{
4816 return dev->of_node == np;
4817}
4818EXPORT_SYMBOL_GPL(device_match_of_node);
4819
4820int device_match_fwnode(struct device *dev, const void *fwnode)
4821{
4822 return dev_fwnode(dev) == fwnode;
4823}
4824EXPORT_SYMBOL_GPL(device_match_fwnode);
4825
4826int device_match_devt(struct device *dev, const void *pdevt)
4827{
4828 return dev->devt == *(dev_t *)pdevt;
4829}
4830EXPORT_SYMBOL_GPL(device_match_devt);
4831
4832int device_match_acpi_dev(struct device *dev, const void *adev)
4833{
4834 return ACPI_COMPANION(dev) == adev;
4835}
4836EXPORT_SYMBOL(device_match_acpi_dev);
4837
4838int device_match_any(struct device *dev, const void *unused)
4839{
4840 return 1;
4841}
4842EXPORT_SYMBOL_GPL(device_match_any);
4843