1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/cpufreq.h>
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/fwnode.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/kdev_t.h>
21#include <linux/notifier.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/genhd.h>
25#include <linux/mutex.h>
26#include <linux/pm_runtime.h>
27#include <linux/netdevice.h>
28#include <linux/sched/signal.h>
29#include <linux/sched/mm.h>
30#include <linux/sysfs.h>
31
32#include "base.h"
33#include "power/power.h"
34
35#ifdef CONFIG_SYSFS_DEPRECATED
36#ifdef CONFIG_SYSFS_DEPRECATED_V2
37long sysfs_deprecated = 1;
38#else
39long sysfs_deprecated = 0;
40#endif
41static int __init sysfs_deprecated_setup(char *arg)
42{
43 return kstrtol(arg, 10, &sysfs_deprecated);
44}
45early_param("sysfs.deprecated", sysfs_deprecated_setup);
46#endif
47
48
49static LIST_HEAD(wait_for_suppliers);
50static DEFINE_MUTEX(wfs_lock);
51static LIST_HEAD(deferred_sync);
52static unsigned int defer_sync_state_count = 1;
53static unsigned int defer_fw_devlink_count;
54static LIST_HEAD(deferred_fw_devlink);
55static DEFINE_MUTEX(defer_fw_devlink_lock);
56static bool fw_devlink_is_permissive(void);
57
58#ifdef CONFIG_SRCU
59static DEFINE_MUTEX(device_links_lock);
60DEFINE_STATIC_SRCU(device_links_srcu);
61
62static inline void device_links_write_lock(void)
63{
64 mutex_lock(&device_links_lock);
65}
66
67static inline void device_links_write_unlock(void)
68{
69 mutex_unlock(&device_links_lock);
70}
71
72int device_links_read_lock(void) __acquires(&device_links_srcu)
73{
74 return srcu_read_lock(&device_links_srcu);
75}
76
77void device_links_read_unlock(int idx) __releases(&device_links_srcu)
78{
79 srcu_read_unlock(&device_links_srcu, idx);
80}
81
82int device_links_read_lock_held(void)
83{
84 return srcu_read_lock_held(&device_links_srcu);
85}
86#else
87static DECLARE_RWSEM(device_links_lock);
88
89static inline void device_links_write_lock(void)
90{
91 down_write(&device_links_lock);
92}
93
94static inline void device_links_write_unlock(void)
95{
96 up_write(&device_links_lock);
97}
98
99int device_links_read_lock(void)
100{
101 down_read(&device_links_lock);
102 return 0;
103}
104
105void device_links_read_unlock(int not_used)
106{
107 up_read(&device_links_lock);
108}
109
110#ifdef CONFIG_DEBUG_LOCK_ALLOC
111int device_links_read_lock_held(void)
112{
113 return lockdep_is_held(&device_links_lock);
114}
115#endif
116#endif
117
118
119
120
121
122
123
124
125
126int device_is_dependent(struct device *dev, void *target)
127{
128 struct device_link *link;
129 int ret;
130
131 if (dev == target)
132 return 1;
133
134 ret = device_for_each_child(dev, target, device_is_dependent);
135 if (ret)
136 return ret;
137
138 list_for_each_entry(link, &dev->links.consumers, s_node) {
139 if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
140 continue;
141
142 if (link->consumer == target)
143 return 1;
144
145 ret = device_is_dependent(link->consumer, target);
146 if (ret)
147 break;
148 }
149 return ret;
150}
151
152static void device_link_init_status(struct device_link *link,
153 struct device *consumer,
154 struct device *supplier)
155{
156 switch (supplier->links.status) {
157 case DL_DEV_PROBING:
158 switch (consumer->links.status) {
159 case DL_DEV_PROBING:
160
161
162
163
164
165
166
167 link->status = DL_STATE_CONSUMER_PROBE;
168 break;
169 default:
170 link->status = DL_STATE_DORMANT;
171 break;
172 }
173 break;
174 case DL_DEV_DRIVER_BOUND:
175 switch (consumer->links.status) {
176 case DL_DEV_PROBING:
177 link->status = DL_STATE_CONSUMER_PROBE;
178 break;
179 case DL_DEV_DRIVER_BOUND:
180 link->status = DL_STATE_ACTIVE;
181 break;
182 default:
183 link->status = DL_STATE_AVAILABLE;
184 break;
185 }
186 break;
187 case DL_DEV_UNBINDING:
188 link->status = DL_STATE_SUPPLIER_UNBIND;
189 break;
190 default:
191 link->status = DL_STATE_DORMANT;
192 break;
193 }
194}
195
196static int device_reorder_to_tail(struct device *dev, void *not_used)
197{
198 struct device_link *link;
199
200
201
202
203
204 if (device_is_registered(dev))
205 devices_kset_move_last(dev);
206
207 if (device_pm_initialized(dev))
208 device_pm_move_last(dev);
209
210 device_for_each_child(dev, NULL, device_reorder_to_tail);
211 list_for_each_entry(link, &dev->links.consumers, s_node) {
212 if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
213 continue;
214 device_reorder_to_tail(link->consumer, NULL);
215 }
216
217 return 0;
218}
219
220
221
222
223
224
225
226
227
228
229void device_pm_move_to_tail(struct device *dev)
230{
231 int idx;
232
233 idx = device_links_read_lock();
234 device_pm_lock();
235 device_reorder_to_tail(dev, NULL);
236 device_pm_unlock();
237 device_links_read_unlock(idx);
238}
239
240#define to_devlink(dev) container_of((dev), struct device_link, link_dev)
241
242static ssize_t status_show(struct device *dev,
243 struct device_attribute *attr, char *buf)
244{
245 const char *output;
246
247 switch (to_devlink(dev)->status) {
248 case DL_STATE_NONE:
249 output = "not tracked";
250 break;
251 case DL_STATE_DORMANT:
252 output = "dormant";
253 break;
254 case DL_STATE_AVAILABLE:
255 output = "available";
256 break;
257 case DL_STATE_CONSUMER_PROBE:
258 output = "consumer probing";
259 break;
260 case DL_STATE_ACTIVE:
261 output = "active";
262 break;
263 case DL_STATE_SUPPLIER_UNBIND:
264 output = "supplier unbinding";
265 break;
266 default:
267 output = "unknown";
268 break;
269 }
270
271 return sysfs_emit(buf, "%s\n", output);
272}
273static DEVICE_ATTR_RO(status);
274
275static ssize_t auto_remove_on_show(struct device *dev,
276 struct device_attribute *attr, char *buf)
277{
278 struct device_link *link = to_devlink(dev);
279 const char *output;
280
281 if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
282 output = "supplier unbind";
283 else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
284 output = "consumer unbind";
285 else
286 output = "never";
287
288 return sysfs_emit(buf, "%s\n", output);
289}
290static DEVICE_ATTR_RO(auto_remove_on);
291
292static ssize_t runtime_pm_show(struct device *dev,
293 struct device_attribute *attr, char *buf)
294{
295 struct device_link *link = to_devlink(dev);
296
297 return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
298}
299static DEVICE_ATTR_RO(runtime_pm);
300
301static ssize_t sync_state_only_show(struct device *dev,
302 struct device_attribute *attr, char *buf)
303{
304 struct device_link *link = to_devlink(dev);
305
306 return sysfs_emit(buf, "%d\n",
307 !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
308}
309static DEVICE_ATTR_RO(sync_state_only);
310
311static struct attribute *devlink_attrs[] = {
312 &dev_attr_status.attr,
313 &dev_attr_auto_remove_on.attr,
314 &dev_attr_runtime_pm.attr,
315 &dev_attr_sync_state_only.attr,
316 NULL,
317};
318ATTRIBUTE_GROUPS(devlink);
319
320static void device_link_free(struct device_link *link)
321{
322 while (refcount_dec_not_one(&link->rpm_active))
323 pm_runtime_put(link->supplier);
324
325 put_device(link->consumer);
326 put_device(link->supplier);
327 kfree(link);
328}
329
330#ifdef CONFIG_SRCU
331static void __device_link_free_srcu(struct rcu_head *rhead)
332{
333 device_link_free(container_of(rhead, struct device_link, rcu_head));
334}
335
336static void devlink_dev_release(struct device *dev)
337{
338 struct device_link *link = to_devlink(dev);
339
340 call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
341}
342#else
343static void devlink_dev_release(struct device *dev)
344{
345 device_link_free(to_devlink(dev));
346}
347#endif
348
349static struct class devlink_class = {
350 .name = "devlink",
351 .owner = THIS_MODULE,
352 .dev_groups = devlink_groups,
353 .dev_release = devlink_dev_release,
354};
355
356static int devlink_add_symlinks(struct device *dev,
357 struct class_interface *class_intf)
358{
359 int ret;
360 size_t len;
361 struct device_link *link = to_devlink(dev);
362 struct device *sup = link->supplier;
363 struct device *con = link->consumer;
364 char *buf;
365
366 len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
367 len += strlen("supplier:") + 1;
368 buf = kzalloc(len, GFP_KERNEL);
369 if (!buf)
370 return -ENOMEM;
371
372 ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
373 if (ret)
374 goto out;
375
376 ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer");
377 if (ret)
378 goto err_con;
379
380 snprintf(buf, len, "consumer:%s", dev_name(con));
381 ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
382 if (ret)
383 goto err_con_dev;
384
385 snprintf(buf, len, "supplier:%s", dev_name(sup));
386 ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
387 if (ret)
388 goto err_sup_dev;
389
390 goto out;
391
392err_sup_dev:
393 snprintf(buf, len, "consumer:%s", dev_name(con));
394 sysfs_remove_link(&sup->kobj, buf);
395err_con_dev:
396 sysfs_remove_link(&link->link_dev.kobj, "consumer");
397err_con:
398 sysfs_remove_link(&link->link_dev.kobj, "supplier");
399out:
400 kfree(buf);
401 return ret;
402}
403
404static void devlink_remove_symlinks(struct device *dev,
405 struct class_interface *class_intf)
406{
407 struct device_link *link = to_devlink(dev);
408 size_t len;
409 struct device *sup = link->supplier;
410 struct device *con = link->consumer;
411 char *buf;
412
413 sysfs_remove_link(&link->link_dev.kobj, "consumer");
414 sysfs_remove_link(&link->link_dev.kobj, "supplier");
415
416 len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
417 len += strlen("supplier:") + 1;
418 buf = kzalloc(len, GFP_KERNEL);
419 if (!buf) {
420 WARN(1, "Unable to properly free device link symlinks!\n");
421 return;
422 }
423
424 snprintf(buf, len, "supplier:%s", dev_name(sup));
425 sysfs_remove_link(&con->kobj, buf);
426 snprintf(buf, len, "consumer:%s", dev_name(con));
427 sysfs_remove_link(&sup->kobj, buf);
428 kfree(buf);
429}
430
431static struct class_interface devlink_class_intf = {
432 .class = &devlink_class,
433 .add_dev = devlink_add_symlinks,
434 .remove_dev = devlink_remove_symlinks,
435};
436
437static int __init devlink_class_init(void)
438{
439 int ret;
440
441 ret = class_register(&devlink_class);
442 if (ret)
443 return ret;
444
445 ret = class_interface_register(&devlink_class_intf);
446 if (ret)
447 class_unregister(&devlink_class);
448
449 return ret;
450}
451postcore_initcall(devlink_class_init);
452
453#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
454 DL_FLAG_AUTOREMOVE_SUPPLIER | \
455 DL_FLAG_AUTOPROBE_CONSUMER | \
456 DL_FLAG_SYNC_STATE_ONLY)
457
458#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
459 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517struct device_link *device_link_add(struct device *consumer,
518 struct device *supplier, u32 flags)
519{
520 struct device_link *link;
521
522 if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
523 (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
524 (flags & DL_FLAG_SYNC_STATE_ONLY &&
525 flags != DL_FLAG_SYNC_STATE_ONLY) ||
526 (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
527 flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
528 DL_FLAG_AUTOREMOVE_SUPPLIER)))
529 return NULL;
530
531 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
532 if (pm_runtime_get_sync(supplier) < 0) {
533 pm_runtime_put_noidle(supplier);
534 return NULL;
535 }
536 }
537
538 if (!(flags & DL_FLAG_STATELESS))
539 flags |= DL_FLAG_MANAGED;
540
541 device_links_write_lock();
542 device_pm_lock();
543
544
545
546
547
548
549
550
551 if (!device_pm_initialized(supplier)
552 || (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
553 device_is_dependent(consumer, supplier))) {
554 link = NULL;
555 goto out;
556 }
557
558
559
560
561
562
563 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
564 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
565
566 list_for_each_entry(link, &supplier->links.consumers, s_node) {
567 if (link->consumer != consumer)
568 continue;
569
570 if (flags & DL_FLAG_PM_RUNTIME) {
571 if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
572 pm_runtime_new_link(consumer);
573 link->flags |= DL_FLAG_PM_RUNTIME;
574 }
575 if (flags & DL_FLAG_RPM_ACTIVE)
576 refcount_inc(&link->rpm_active);
577 }
578
579 if (flags & DL_FLAG_STATELESS) {
580 kref_get(&link->kref);
581 if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
582 !(link->flags & DL_FLAG_STATELESS)) {
583 link->flags |= DL_FLAG_STATELESS;
584 goto reorder;
585 } else {
586 link->flags |= DL_FLAG_STATELESS;
587 goto out;
588 }
589 }
590
591
592
593
594
595
596 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
597 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
598 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
599 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
600 }
601 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
602 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
603 DL_FLAG_AUTOREMOVE_SUPPLIER);
604 }
605 if (!(link->flags & DL_FLAG_MANAGED)) {
606 kref_get(&link->kref);
607 link->flags |= DL_FLAG_MANAGED;
608 device_link_init_status(link, consumer, supplier);
609 }
610 if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
611 !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
612 link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
613 goto reorder;
614 }
615
616 goto out;
617 }
618
619 link = kzalloc(sizeof(*link), GFP_KERNEL);
620 if (!link)
621 goto out;
622
623 refcount_set(&link->rpm_active, 1);
624
625 get_device(supplier);
626 link->supplier = supplier;
627 INIT_LIST_HEAD(&link->s_node);
628 get_device(consumer);
629 link->consumer = consumer;
630 INIT_LIST_HEAD(&link->c_node);
631 link->flags = flags;
632 kref_init(&link->kref);
633
634 link->link_dev.class = &devlink_class;
635 device_set_pm_not_required(&link->link_dev);
636 dev_set_name(&link->link_dev, "%s--%s",
637 dev_name(supplier), dev_name(consumer));
638 if (device_register(&link->link_dev)) {
639 put_device(consumer);
640 put_device(supplier);
641 kfree(link);
642 link = NULL;
643 goto out;
644 }
645
646 if (flags & DL_FLAG_PM_RUNTIME) {
647 if (flags & DL_FLAG_RPM_ACTIVE)
648 refcount_inc(&link->rpm_active);
649
650 pm_runtime_new_link(consumer);
651 }
652
653
654 if (flags & DL_FLAG_STATELESS)
655 link->status = DL_STATE_NONE;
656 else
657 device_link_init_status(link, consumer, supplier);
658
659
660
661
662
663 if (link->status == DL_STATE_CONSUMER_PROBE &&
664 flags & DL_FLAG_PM_RUNTIME)
665 pm_runtime_resume(supplier);
666
667 list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
668 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
669
670 if (flags & DL_FLAG_SYNC_STATE_ONLY) {
671 dev_dbg(consumer,
672 "Linked as a sync state only consumer to %s\n",
673 dev_name(supplier));
674 goto out;
675 }
676
677reorder:
678
679
680
681
682
683
684
685 device_reorder_to_tail(consumer, NULL);
686
687 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
688
689out:
690 device_pm_unlock();
691 device_links_write_unlock();
692
693 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
694 pm_runtime_put(supplier);
695
696 return link;
697}
698EXPORT_SYMBOL_GPL(device_link_add);
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715static void device_link_wait_for_supplier(struct device *consumer,
716 bool need_for_probe)
717{
718 mutex_lock(&wfs_lock);
719 list_add_tail(&consumer->links.needs_suppliers, &wait_for_suppliers);
720 consumer->links.need_for_probe = need_for_probe;
721 mutex_unlock(&wfs_lock);
722}
723
724static void device_link_wait_for_mandatory_supplier(struct device *consumer)
725{
726 device_link_wait_for_supplier(consumer, true);
727}
728
729static void device_link_wait_for_optional_supplier(struct device *consumer)
730{
731 device_link_wait_for_supplier(consumer, false);
732}
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752static void device_link_add_missing_supplier_links(void)
753{
754 struct device *dev, *tmp;
755
756 mutex_lock(&wfs_lock);
757 list_for_each_entry_safe(dev, tmp, &wait_for_suppliers,
758 links.needs_suppliers) {
759 int ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
760 if (!ret)
761 list_del_init(&dev->links.needs_suppliers);
762 else if (ret != -ENODEV || fw_devlink_is_permissive())
763 dev->links.need_for_probe = false;
764 }
765 mutex_unlock(&wfs_lock);
766}
767
768#ifdef CONFIG_SRCU
769static void __device_link_del(struct kref *kref)
770{
771 struct device_link *link = container_of(kref, struct device_link, kref);
772
773 dev_dbg(link->consumer, "Dropping the link to %s\n",
774 dev_name(link->supplier));
775
776 pm_runtime_drop_link(link);
777
778 list_del_rcu(&link->s_node);
779 list_del_rcu(&link->c_node);
780 device_unregister(&link->link_dev);
781}
782#else
783static void __device_link_del(struct kref *kref)
784{
785 struct device_link *link = container_of(kref, struct device_link, kref);
786
787 dev_info(link->consumer, "Dropping the link to %s\n",
788 dev_name(link->supplier));
789
790 pm_runtime_drop_link(link);
791
792 list_del(&link->s_node);
793 list_del(&link->c_node);
794 device_unregister(&link->link_dev);
795}
796#endif
797
798static void device_link_put_kref(struct device_link *link)
799{
800 if (link->flags & DL_FLAG_STATELESS)
801 kref_put(&link->kref, __device_link_del);
802 else
803 WARN(1, "Unable to drop a managed device link reference\n");
804}
805
806
807
808
809
810
811
812
813
814
815void device_link_del(struct device_link *link)
816{
817 device_links_write_lock();
818 device_link_put_kref(link);
819 device_links_write_unlock();
820}
821EXPORT_SYMBOL_GPL(device_link_del);
822
823
824
825
826
827
828
829
830
831void device_link_remove(void *consumer, struct device *supplier)
832{
833 struct device_link *link;
834
835 if (WARN_ON(consumer == supplier))
836 return;
837
838 device_links_write_lock();
839
840 list_for_each_entry(link, &supplier->links.consumers, s_node) {
841 if (link->consumer == consumer) {
842 device_link_put_kref(link);
843 break;
844 }
845 }
846
847 device_links_write_unlock();
848}
849EXPORT_SYMBOL_GPL(device_link_remove);
850
851static void device_links_missing_supplier(struct device *dev)
852{
853 struct device_link *link;
854
855 list_for_each_entry(link, &dev->links.suppliers, c_node) {
856 if (link->status != DL_STATE_CONSUMER_PROBE)
857 continue;
858
859 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
860 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
861 } else {
862 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
863 WRITE_ONCE(link->status, DL_STATE_DORMANT);
864 }
865 }
866}
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884int device_links_check_suppliers(struct device *dev)
885{
886 struct device_link *link;
887 int ret = 0;
888
889
890
891
892
893 mutex_lock(&wfs_lock);
894 if (!list_empty(&dev->links.needs_suppliers) &&
895 dev->links.need_for_probe) {
896 mutex_unlock(&wfs_lock);
897 return -EPROBE_DEFER;
898 }
899 mutex_unlock(&wfs_lock);
900
901 device_links_write_lock();
902
903 list_for_each_entry(link, &dev->links.suppliers, c_node) {
904 if (!(link->flags & DL_FLAG_MANAGED))
905 continue;
906
907 if (link->status != DL_STATE_AVAILABLE &&
908 !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
909 device_links_missing_supplier(dev);
910 ret = -EPROBE_DEFER;
911 break;
912 }
913 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
914 }
915 dev->links.status = DL_DEV_PROBING;
916
917 device_links_write_unlock();
918 return ret;
919}
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939static void __device_links_queue_sync_state(struct device *dev,
940 struct list_head *list)
941{
942 struct device_link *link;
943
944 if (!dev_has_sync_state(dev))
945 return;
946 if (dev->state_synced)
947 return;
948
949 list_for_each_entry(link, &dev->links.consumers, s_node) {
950 if (!(link->flags & DL_FLAG_MANAGED))
951 continue;
952 if (link->status != DL_STATE_ACTIVE)
953 return;
954 }
955
956
957
958
959
960
961 dev->state_synced = true;
962
963 if (WARN_ON(!list_empty(&dev->links.defer_hook)))
964 return;
965
966 get_device(dev);
967 list_add_tail(&dev->links.defer_hook, list);
968}
969
970
971
972
973
974
975
976
977
978
979
980static void device_links_flush_sync_list(struct list_head *list,
981 struct device *dont_lock_dev)
982{
983 struct device *dev, *tmp;
984
985 list_for_each_entry_safe(dev, tmp, list, links.defer_hook) {
986 list_del_init(&dev->links.defer_hook);
987
988 if (dev != dont_lock_dev)
989 device_lock(dev);
990
991 if (dev->bus->sync_state)
992 dev->bus->sync_state(dev);
993 else if (dev->driver && dev->driver->sync_state)
994 dev->driver->sync_state(dev);
995
996 if (dev != dont_lock_dev)
997 device_unlock(dev);
998
999 put_device(dev);
1000 }
1001}
1002
1003void device_links_supplier_sync_state_pause(void)
1004{
1005 device_links_write_lock();
1006 defer_sync_state_count++;
1007 device_links_write_unlock();
1008}
1009
1010void device_links_supplier_sync_state_resume(void)
1011{
1012 struct device *dev, *tmp;
1013 LIST_HEAD(sync_list);
1014
1015 device_links_write_lock();
1016 if (!defer_sync_state_count) {
1017 WARN(true, "Unmatched sync_state pause/resume!");
1018 goto out;
1019 }
1020 defer_sync_state_count--;
1021 if (defer_sync_state_count)
1022 goto out;
1023
1024 list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) {
1025
1026
1027
1028
1029 list_del_init(&dev->links.defer_hook);
1030 __device_links_queue_sync_state(dev, &sync_list);
1031 }
1032out:
1033 device_links_write_unlock();
1034
1035 device_links_flush_sync_list(&sync_list, NULL);
1036}
1037
1038static int sync_state_resume_initcall(void)
1039{
1040 device_links_supplier_sync_state_resume();
1041 return 0;
1042}
1043late_initcall(sync_state_resume_initcall);
1044
1045static void __device_links_supplier_defer_sync(struct device *sup)
1046{
1047 if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup))
1048 list_add_tail(&sup->links.defer_hook, &deferred_sync);
1049}
1050
1051static void device_link_drop_managed(struct device_link *link)
1052{
1053 link->flags &= ~DL_FLAG_MANAGED;
1054 WRITE_ONCE(link->status, DL_STATE_NONE);
1055 kref_put(&link->kref, __device_link_del);
1056}
1057
1058static ssize_t waiting_for_supplier_show(struct device *dev,
1059 struct device_attribute *attr,
1060 char *buf)
1061{
1062 bool val;
1063
1064 device_lock(dev);
1065 mutex_lock(&wfs_lock);
1066 val = !list_empty(&dev->links.needs_suppliers)
1067 && dev->links.need_for_probe;
1068 mutex_unlock(&wfs_lock);
1069 device_unlock(dev);
1070 return sysfs_emit(buf, "%u\n", val);
1071}
1072static DEVICE_ATTR_RO(waiting_for_supplier);
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085void device_links_driver_bound(struct device *dev)
1086{
1087 struct device_link *link, *ln;
1088 LIST_HEAD(sync_list);
1089
1090
1091
1092
1093
1094
1095 mutex_lock(&wfs_lock);
1096 list_del_init(&dev->links.needs_suppliers);
1097 mutex_unlock(&wfs_lock);
1098 device_remove_file(dev, &dev_attr_waiting_for_supplier);
1099
1100 device_links_write_lock();
1101
1102 list_for_each_entry(link, &dev->links.consumers, s_node) {
1103 if (!(link->flags & DL_FLAG_MANAGED))
1104 continue;
1105
1106
1107
1108
1109
1110
1111
1112 if (link->status == DL_STATE_CONSUMER_PROBE ||
1113 link->status == DL_STATE_ACTIVE)
1114 continue;
1115
1116 WARN_ON(link->status != DL_STATE_DORMANT);
1117 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1118
1119 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
1120 driver_deferred_probe_add(link->consumer);
1121 }
1122
1123 if (defer_sync_state_count)
1124 __device_links_supplier_defer_sync(dev);
1125 else
1126 __device_links_queue_sync_state(dev, &sync_list);
1127
1128 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1129 struct device *supplier;
1130
1131 if (!(link->flags & DL_FLAG_MANAGED))
1132 continue;
1133
1134 supplier = link->supplier;
1135 if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
1136
1137
1138
1139
1140
1141 device_link_drop_managed(link);
1142 } else {
1143 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
1144 WRITE_ONCE(link->status, DL_STATE_ACTIVE);
1145 }
1146
1147
1148
1149
1150
1151
1152
1153 if (defer_sync_state_count)
1154 __device_links_supplier_defer_sync(supplier);
1155 else
1156 __device_links_queue_sync_state(supplier, &sync_list);
1157 }
1158
1159 dev->links.status = DL_DEV_DRIVER_BOUND;
1160
1161 device_links_write_unlock();
1162
1163 device_links_flush_sync_list(&sync_list, dev);
1164}
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178static void __device_links_no_driver(struct device *dev)
1179{
1180 struct device_link *link, *ln;
1181
1182 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1183 if (!(link->flags & DL_FLAG_MANAGED))
1184 continue;
1185
1186 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
1187 device_link_drop_managed(link);
1188 continue;
1189 }
1190
1191 if (link->status != DL_STATE_CONSUMER_PROBE &&
1192 link->status != DL_STATE_ACTIVE)
1193 continue;
1194
1195 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
1196 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1197 } else {
1198 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
1199 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1200 }
1201 }
1202
1203 dev->links.status = DL_DEV_NO_DRIVER;
1204}
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216void device_links_no_driver(struct device *dev)
1217{
1218 struct device_link *link;
1219
1220 device_links_write_lock();
1221
1222 list_for_each_entry(link, &dev->links.consumers, s_node) {
1223 if (!(link->flags & DL_FLAG_MANAGED))
1224 continue;
1225
1226
1227
1228
1229
1230
1231
1232
1233 if (link->status == DL_STATE_CONSUMER_PROBE ||
1234 link->status == DL_STATE_ACTIVE)
1235 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1236 }
1237
1238 __device_links_no_driver(dev);
1239
1240 device_links_write_unlock();
1241}
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253void device_links_driver_cleanup(struct device *dev)
1254{
1255 struct device_link *link, *ln;
1256
1257 device_links_write_lock();
1258
1259 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
1260 if (!(link->flags & DL_FLAG_MANAGED))
1261 continue;
1262
1263 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
1264 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
1265
1266
1267
1268
1269
1270
1271 if (link->status == DL_STATE_SUPPLIER_UNBIND &&
1272 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
1273 device_link_drop_managed(link);
1274
1275 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1276 }
1277
1278 list_del_init(&dev->links.defer_hook);
1279 __device_links_no_driver(dev);
1280
1281 device_links_write_unlock();
1282}
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298bool device_links_busy(struct device *dev)
1299{
1300 struct device_link *link;
1301 bool ret = false;
1302
1303 device_links_write_lock();
1304
1305 list_for_each_entry(link, &dev->links.consumers, s_node) {
1306 if (!(link->flags & DL_FLAG_MANAGED))
1307 continue;
1308
1309 if (link->status == DL_STATE_CONSUMER_PROBE
1310 || link->status == DL_STATE_ACTIVE) {
1311 ret = true;
1312 break;
1313 }
1314 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1315 }
1316
1317 dev->links.status = DL_DEV_UNBINDING;
1318
1319 device_links_write_unlock();
1320 return ret;
1321}
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338void device_links_unbind_consumers(struct device *dev)
1339{
1340 struct device_link *link;
1341
1342 start:
1343 device_links_write_lock();
1344
1345 list_for_each_entry(link, &dev->links.consumers, s_node) {
1346 enum device_link_state status;
1347
1348 if (!(link->flags & DL_FLAG_MANAGED) ||
1349 link->flags & DL_FLAG_SYNC_STATE_ONLY)
1350 continue;
1351
1352 status = link->status;
1353 if (status == DL_STATE_CONSUMER_PROBE) {
1354 device_links_write_unlock();
1355
1356 wait_for_device_probe();
1357 goto start;
1358 }
1359 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1360 if (status == DL_STATE_ACTIVE) {
1361 struct device *consumer = link->consumer;
1362
1363 get_device(consumer);
1364
1365 device_links_write_unlock();
1366
1367 device_release_driver_internal(consumer, NULL,
1368 consumer->parent);
1369 put_device(consumer);
1370 goto start;
1371 }
1372 }
1373
1374 device_links_write_unlock();
1375}
1376
1377
1378
1379
1380
1381static void device_links_purge(struct device *dev)
1382{
1383 struct device_link *link, *ln;
1384
1385 if (dev->class == &devlink_class)
1386 return;
1387
1388 mutex_lock(&wfs_lock);
1389 list_del(&dev->links.needs_suppliers);
1390 mutex_unlock(&wfs_lock);
1391
1392
1393
1394
1395
1396 device_links_write_lock();
1397
1398 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1399 WARN_ON(link->status == DL_STATE_ACTIVE);
1400 __device_link_del(&link->kref);
1401 }
1402
1403 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
1404 WARN_ON(link->status != DL_STATE_DORMANT &&
1405 link->status != DL_STATE_NONE);
1406 __device_link_del(&link->kref);
1407 }
1408
1409 device_links_write_unlock();
1410}
1411
1412static u32 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
1413static int __init fw_devlink_setup(char *arg)
1414{
1415 if (!arg)
1416 return -EINVAL;
1417
1418 if (strcmp(arg, "off") == 0) {
1419 fw_devlink_flags = 0;
1420 } else if (strcmp(arg, "permissive") == 0) {
1421 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
1422 } else if (strcmp(arg, "on") == 0) {
1423 fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER;
1424 } else if (strcmp(arg, "rpm") == 0) {
1425 fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER |
1426 DL_FLAG_PM_RUNTIME;
1427 }
1428 return 0;
1429}
1430early_param("fw_devlink", fw_devlink_setup);
1431
1432u32 fw_devlink_get_flags(void)
1433{
1434 return fw_devlink_flags;
1435}
1436
1437static bool fw_devlink_is_permissive(void)
1438{
1439 return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
1440}
1441
1442static void fw_devlink_link_device(struct device *dev)
1443{
1444 int fw_ret;
1445
1446 if (!fw_devlink_flags)
1447 return;
1448
1449 mutex_lock(&defer_fw_devlink_lock);
1450 if (!defer_fw_devlink_count)
1451 device_link_add_missing_supplier_links();
1452
1453
1454
1455
1456
1457
1458 if (!fwnode_has_op(dev->fwnode, add_links))
1459 goto out;
1460
1461
1462
1463
1464
1465
1466
1467 if (!defer_fw_devlink_count) {
1468 fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
1469 if (fw_ret == -ENODEV && fw_devlink_is_permissive())
1470 fw_ret = -EAGAIN;
1471 } else {
1472 fw_ret = -ENODEV;
1473
1474
1475
1476
1477
1478 list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink);
1479 }
1480
1481 if (fw_ret == -ENODEV)
1482 device_link_wait_for_mandatory_supplier(dev);
1483 else if (fw_ret)
1484 device_link_wait_for_optional_supplier(dev);
1485
1486out:
1487 mutex_unlock(&defer_fw_devlink_lock);
1488}
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533void fw_devlink_pause(void)
1534{
1535 mutex_lock(&defer_fw_devlink_lock);
1536 defer_fw_devlink_count++;
1537 mutex_unlock(&defer_fw_devlink_lock);
1538}
1539
1540
1541
1542
1543
1544
1545void fw_devlink_resume(void)
1546{
1547 struct device *dev, *tmp;
1548 LIST_HEAD(probe_list);
1549
1550 mutex_lock(&defer_fw_devlink_lock);
1551 if (!defer_fw_devlink_count) {
1552 WARN(true, "Unmatched fw_devlink pause/resume!");
1553 goto out;
1554 }
1555
1556 defer_fw_devlink_count--;
1557 if (defer_fw_devlink_count)
1558 goto out;
1559
1560 device_link_add_missing_supplier_links();
1561 list_splice_tail_init(&deferred_fw_devlink, &probe_list);
1562out:
1563 mutex_unlock(&defer_fw_devlink_lock);
1564
1565
1566
1567
1568
1569
1570 list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) {
1571 list_del_init(&dev->links.defer_hook);
1572 bus_probe_device(dev);
1573 }
1574}
1575
1576
1577int (*platform_notify)(struct device *dev) = NULL;
1578int (*platform_notify_remove)(struct device *dev) = NULL;
1579static struct kobject *dev_kobj;
1580struct kobject *sysfs_dev_char_kobj;
1581struct kobject *sysfs_dev_block_kobj;
1582
1583static DEFINE_MUTEX(device_hotplug_lock);
1584
1585void lock_device_hotplug(void)
1586{
1587 mutex_lock(&device_hotplug_lock);
1588}
1589
1590void unlock_device_hotplug(void)
1591{
1592 mutex_unlock(&device_hotplug_lock);
1593}
1594
1595int lock_device_hotplug_sysfs(void)
1596{
1597 if (mutex_trylock(&device_hotplug_lock))
1598 return 0;
1599
1600
1601 msleep(5);
1602 return restart_syscall();
1603}
1604
1605#ifdef CONFIG_BLOCK
1606static inline int device_is_not_partition(struct device *dev)
1607{
1608 return !(dev->type == &part_type);
1609}
1610#else
1611static inline int device_is_not_partition(struct device *dev)
1612{
1613 return 1;
1614}
1615#endif
1616
1617static int
1618device_platform_notify(struct device *dev, enum kobject_action action)
1619{
1620 int ret;
1621
1622 ret = acpi_platform_notify(dev, action);
1623 if (ret)
1624 return ret;
1625
1626 ret = software_node_notify(dev, action);
1627 if (ret)
1628 return ret;
1629
1630 if (platform_notify && action == KOBJ_ADD)
1631 platform_notify(dev);
1632 else if (platform_notify_remove && action == KOBJ_REMOVE)
1633 platform_notify_remove(dev);
1634 return 0;
1635}
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646const char *dev_driver_string(const struct device *dev)
1647{
1648 struct device_driver *drv;
1649
1650
1651
1652
1653
1654 drv = READ_ONCE(dev->driver);
1655 return drv ? drv->name :
1656 (dev->bus ? dev->bus->name :
1657 (dev->class ? dev->class->name : ""));
1658}
1659EXPORT_SYMBOL(dev_driver_string);
1660
1661#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1662
1663static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
1664 char *buf)
1665{
1666 struct device_attribute *dev_attr = to_dev_attr(attr);
1667 struct device *dev = kobj_to_dev(kobj);
1668 ssize_t ret = -EIO;
1669
1670 if (dev_attr->show)
1671 ret = dev_attr->show(dev, dev_attr, buf);
1672 if (ret >= (ssize_t)PAGE_SIZE) {
1673 printk("dev_attr_show: %pS returned bad count\n",
1674 dev_attr->show);
1675 }
1676 return ret;
1677}
1678
1679static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
1680 const char *buf, size_t count)
1681{
1682 struct device_attribute *dev_attr = to_dev_attr(attr);
1683 struct device *dev = kobj_to_dev(kobj);
1684 ssize_t ret = -EIO;
1685
1686 if (dev_attr->store)
1687 ret = dev_attr->store(dev, dev_attr, buf, count);
1688 return ret;
1689}
1690
1691static const struct sysfs_ops dev_sysfs_ops = {
1692 .show = dev_attr_show,
1693 .store = dev_attr_store,
1694};
1695
1696#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
1697
1698ssize_t device_store_ulong(struct device *dev,
1699 struct device_attribute *attr,
1700 const char *buf, size_t size)
1701{
1702 struct dev_ext_attribute *ea = to_ext_attr(attr);
1703 int ret;
1704 unsigned long new;
1705
1706 ret = kstrtoul(buf, 0, &new);
1707 if (ret)
1708 return ret;
1709 *(unsigned long *)(ea->var) = new;
1710
1711 return size;
1712}
1713EXPORT_SYMBOL_GPL(device_store_ulong);
1714
1715ssize_t device_show_ulong(struct device *dev,
1716 struct device_attribute *attr,
1717 char *buf)
1718{
1719 struct dev_ext_attribute *ea = to_ext_attr(attr);
1720 return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
1721}
1722EXPORT_SYMBOL_GPL(device_show_ulong);
1723
1724ssize_t device_store_int(struct device *dev,
1725 struct device_attribute *attr,
1726 const char *buf, size_t size)
1727{
1728 struct dev_ext_attribute *ea = to_ext_attr(attr);
1729 int ret;
1730 long new;
1731
1732 ret = kstrtol(buf, 0, &new);
1733 if (ret)
1734 return ret;
1735
1736 if (new > INT_MAX || new < INT_MIN)
1737 return -EINVAL;
1738 *(int *)(ea->var) = new;
1739
1740 return size;
1741}
1742EXPORT_SYMBOL_GPL(device_store_int);
1743
1744ssize_t device_show_int(struct device *dev,
1745 struct device_attribute *attr,
1746 char *buf)
1747{
1748 struct dev_ext_attribute *ea = to_ext_attr(attr);
1749
1750 return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
1751}
1752EXPORT_SYMBOL_GPL(device_show_int);
1753
1754ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
1755 const char *buf, size_t size)
1756{
1757 struct dev_ext_attribute *ea = to_ext_attr(attr);
1758
1759 if (strtobool(buf, ea->var) < 0)
1760 return -EINVAL;
1761
1762 return size;
1763}
1764EXPORT_SYMBOL_GPL(device_store_bool);
1765
1766ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
1767 char *buf)
1768{
1769 struct dev_ext_attribute *ea = to_ext_attr(attr);
1770
1771 return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
1772}
1773EXPORT_SYMBOL_GPL(device_show_bool);
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783static void device_release(struct kobject *kobj)
1784{
1785 struct device *dev = kobj_to_dev(kobj);
1786 struct device_private *p = dev->p;
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797 devres_release_all(dev);
1798
1799 kfree(dev->dma_range_map);
1800
1801 if (dev->release)
1802 dev->release(dev);
1803 else if (dev->type && dev->type->release)
1804 dev->type->release(dev);
1805 else if (dev->class && dev->class->dev_release)
1806 dev->class->dev_release(dev);
1807 else
1808 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
1809 dev_name(dev));
1810 kfree(p);
1811}
1812
1813static const void *device_namespace(struct kobject *kobj)
1814{
1815 struct device *dev = kobj_to_dev(kobj);
1816 const void *ns = NULL;
1817
1818 if (dev->class && dev->class->ns_type)
1819 ns = dev->class->namespace(dev);
1820
1821 return ns;
1822}
1823
1824static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
1825{
1826 struct device *dev = kobj_to_dev(kobj);
1827
1828 if (dev->class && dev->class->get_ownership)
1829 dev->class->get_ownership(dev, uid, gid);
1830}
1831
1832static struct kobj_type device_ktype = {
1833 .release = device_release,
1834 .sysfs_ops = &dev_sysfs_ops,
1835 .namespace = device_namespace,
1836 .get_ownership = device_get_ownership,
1837};
1838
1839
1840static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
1841{
1842 struct kobj_type *ktype = get_ktype(kobj);
1843
1844 if (ktype == &device_ktype) {
1845 struct device *dev = kobj_to_dev(kobj);
1846 if (dev->bus)
1847 return 1;
1848 if (dev->class)
1849 return 1;
1850 }
1851 return 0;
1852}
1853
1854static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
1855{
1856 struct device *dev = kobj_to_dev(kobj);
1857
1858 if (dev->bus)
1859 return dev->bus->name;
1860 if (dev->class)
1861 return dev->class->name;
1862 return NULL;
1863}
1864
1865static int dev_uevent(struct kset *kset, struct kobject *kobj,
1866 struct kobj_uevent_env *env)
1867{
1868 struct device *dev = kobj_to_dev(kobj);
1869 int retval = 0;
1870
1871
1872 if (MAJOR(dev->devt)) {
1873 const char *tmp;
1874 const char *name;
1875 umode_t mode = 0;
1876 kuid_t uid = GLOBAL_ROOT_UID;
1877 kgid_t gid = GLOBAL_ROOT_GID;
1878
1879 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
1880 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
1881 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
1882 if (name) {
1883 add_uevent_var(env, "DEVNAME=%s", name);
1884 if (mode)
1885 add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
1886 if (!uid_eq(uid, GLOBAL_ROOT_UID))
1887 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
1888 if (!gid_eq(gid, GLOBAL_ROOT_GID))
1889 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
1890 kfree(tmp);
1891 }
1892 }
1893
1894 if (dev->type && dev->type->name)
1895 add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
1896
1897 if (dev->driver)
1898 add_uevent_var(env, "DRIVER=%s", dev->driver->name);
1899
1900
1901 of_device_uevent(dev, env);
1902
1903
1904 if (dev->bus && dev->bus->uevent) {
1905 retval = dev->bus->uevent(dev, env);
1906 if (retval)
1907 pr_debug("device: '%s': %s: bus uevent() returned %d\n",
1908 dev_name(dev), __func__, retval);
1909 }
1910
1911
1912 if (dev->class && dev->class->dev_uevent) {
1913 retval = dev->class->dev_uevent(dev, env);
1914 if (retval)
1915 pr_debug("device: '%s': %s: class uevent() "
1916 "returned %d\n", dev_name(dev),
1917 __func__, retval);
1918 }
1919
1920
1921 if (dev->type && dev->type->uevent) {
1922 retval = dev->type->uevent(dev, env);
1923 if (retval)
1924 pr_debug("device: '%s': %s: dev_type uevent() "
1925 "returned %d\n", dev_name(dev),
1926 __func__, retval);
1927 }
1928
1929 return retval;
1930}
1931
1932static const struct kset_uevent_ops device_uevent_ops = {
1933 .filter = dev_uevent_filter,
1934 .name = dev_uevent_name,
1935 .uevent = dev_uevent,
1936};
1937
1938static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
1939 char *buf)
1940{
1941 struct kobject *top_kobj;
1942 struct kset *kset;
1943 struct kobj_uevent_env *env = NULL;
1944 int i;
1945 int len = 0;
1946 int retval;
1947
1948
1949 top_kobj = &dev->kobj;
1950 while (!top_kobj->kset && top_kobj->parent)
1951 top_kobj = top_kobj->parent;
1952 if (!top_kobj->kset)
1953 goto out;
1954
1955 kset = top_kobj->kset;
1956 if (!kset->uevent_ops || !kset->uevent_ops->uevent)
1957 goto out;
1958
1959
1960 if (kset->uevent_ops && kset->uevent_ops->filter)
1961 if (!kset->uevent_ops->filter(kset, &dev->kobj))
1962 goto out;
1963
1964 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
1965 if (!env)
1966 return -ENOMEM;
1967
1968
1969 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
1970 if (retval)
1971 goto out;
1972
1973
1974 for (i = 0; i < env->envp_idx; i++)
1975 len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
1976out:
1977 kfree(env);
1978 return len;
1979}
1980
1981static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
1982 const char *buf, size_t count)
1983{
1984 int rc;
1985
1986 rc = kobject_synth_uevent(&dev->kobj, buf, count);
1987
1988 if (rc) {
1989 dev_err(dev, "uevent: failed to send synthetic uevent\n");
1990 return rc;
1991 }
1992
1993 return count;
1994}
1995static DEVICE_ATTR_RW(uevent);
1996
1997static ssize_t online_show(struct device *dev, struct device_attribute *attr,
1998 char *buf)
1999{
2000 bool val;
2001
2002 device_lock(dev);
2003 val = !dev->offline;
2004 device_unlock(dev);
2005 return sysfs_emit(buf, "%u\n", val);
2006}
2007
2008static ssize_t online_store(struct device *dev, struct device_attribute *attr,
2009 const char *buf, size_t count)
2010{
2011 bool val;
2012 int ret;
2013
2014 ret = strtobool(buf, &val);
2015 if (ret < 0)
2016 return ret;
2017
2018 ret = lock_device_hotplug_sysfs();
2019 if (ret)
2020 return ret;
2021
2022 ret = val ? device_online(dev) : device_offline(dev);
2023 unlock_device_hotplug();
2024 return ret < 0 ? ret : count;
2025}
2026static DEVICE_ATTR_RW(online);
2027
2028int device_add_groups(struct device *dev, const struct attribute_group **groups)
2029{
2030 return sysfs_create_groups(&dev->kobj, groups);
2031}
2032EXPORT_SYMBOL_GPL(device_add_groups);
2033
2034void device_remove_groups(struct device *dev,
2035 const struct attribute_group **groups)
2036{
2037 sysfs_remove_groups(&dev->kobj, groups);
2038}
2039EXPORT_SYMBOL_GPL(device_remove_groups);
2040
2041union device_attr_group_devres {
2042 const struct attribute_group *group;
2043 const struct attribute_group **groups;
2044};
2045
2046static int devm_attr_group_match(struct device *dev, void *res, void *data)
2047{
2048 return ((union device_attr_group_devres *)res)->group == data;
2049}
2050
2051static void devm_attr_group_remove(struct device *dev, void *res)
2052{
2053 union device_attr_group_devres *devres = res;
2054 const struct attribute_group *group = devres->group;
2055
2056 dev_dbg(dev, "%s: removing group %p\n", __func__, group);
2057 sysfs_remove_group(&dev->kobj, group);
2058}
2059
2060static void devm_attr_groups_remove(struct device *dev, void *res)
2061{
2062 union device_attr_group_devres *devres = res;
2063 const struct attribute_group **groups = devres->groups;
2064
2065 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
2066 sysfs_remove_groups(&dev->kobj, groups);
2067}
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
2080{
2081 union device_attr_group_devres *devres;
2082 int error;
2083
2084 devres = devres_alloc(devm_attr_group_remove,
2085 sizeof(*devres), GFP_KERNEL);
2086 if (!devres)
2087 return -ENOMEM;
2088
2089 error = sysfs_create_group(&dev->kobj, grp);
2090 if (error) {
2091 devres_free(devres);
2092 return error;
2093 }
2094
2095 devres->group = grp;
2096 devres_add(dev, devres);
2097 return 0;
2098}
2099EXPORT_SYMBOL_GPL(devm_device_add_group);
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109void devm_device_remove_group(struct device *dev,
2110 const struct attribute_group *grp)
2111{
2112 WARN_ON(devres_release(dev, devm_attr_group_remove,
2113 devm_attr_group_match,
2114 (void *)grp));
2115}
2116EXPORT_SYMBOL_GPL(devm_device_remove_group);
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131int devm_device_add_groups(struct device *dev,
2132 const struct attribute_group **groups)
2133{
2134 union device_attr_group_devres *devres;
2135 int error;
2136
2137 devres = devres_alloc(devm_attr_groups_remove,
2138 sizeof(*devres), GFP_KERNEL);
2139 if (!devres)
2140 return -ENOMEM;
2141
2142 error = sysfs_create_groups(&dev->kobj, groups);
2143 if (error) {
2144 devres_free(devres);
2145 return error;
2146 }
2147
2148 devres->groups = groups;
2149 devres_add(dev, devres);
2150 return 0;
2151}
2152EXPORT_SYMBOL_GPL(devm_device_add_groups);
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162void devm_device_remove_groups(struct device *dev,
2163 const struct attribute_group **groups)
2164{
2165 WARN_ON(devres_release(dev, devm_attr_groups_remove,
2166 devm_attr_group_match,
2167 (void *)groups));
2168}
2169EXPORT_SYMBOL_GPL(devm_device_remove_groups);
2170
2171static int device_add_attrs(struct device *dev)
2172{
2173 struct class *class = dev->class;
2174 const struct device_type *type = dev->type;
2175 int error;
2176
2177 if (class) {
2178 error = device_add_groups(dev, class->dev_groups);
2179 if (error)
2180 return error;
2181 }
2182
2183 if (type) {
2184 error = device_add_groups(dev, type->groups);
2185 if (error)
2186 goto err_remove_class_groups;
2187 }
2188
2189 error = device_add_groups(dev, dev->groups);
2190 if (error)
2191 goto err_remove_type_groups;
2192
2193 if (device_supports_offline(dev) && !dev->offline_disabled) {
2194 error = device_create_file(dev, &dev_attr_online);
2195 if (error)
2196 goto err_remove_dev_groups;
2197 }
2198
2199 if (fw_devlink_flags && !fw_devlink_is_permissive()) {
2200 error = device_create_file(dev, &dev_attr_waiting_for_supplier);
2201 if (error)
2202 goto err_remove_dev_online;
2203 }
2204
2205 return 0;
2206
2207 err_remove_dev_online:
2208 device_remove_file(dev, &dev_attr_online);
2209 err_remove_dev_groups:
2210 device_remove_groups(dev, dev->groups);
2211 err_remove_type_groups:
2212 if (type)
2213 device_remove_groups(dev, type->groups);
2214 err_remove_class_groups:
2215 if (class)
2216 device_remove_groups(dev, class->dev_groups);
2217
2218 return error;
2219}
2220
2221static void device_remove_attrs(struct device *dev)
2222{
2223 struct class *class = dev->class;
2224 const struct device_type *type = dev->type;
2225
2226 device_remove_file(dev, &dev_attr_waiting_for_supplier);
2227 device_remove_file(dev, &dev_attr_online);
2228 device_remove_groups(dev, dev->groups);
2229
2230 if (type)
2231 device_remove_groups(dev, type->groups);
2232
2233 if (class)
2234 device_remove_groups(dev, class->dev_groups);
2235}
2236
2237static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
2238 char *buf)
2239{
2240 return print_dev_t(buf, dev->devt);
2241}
2242static DEVICE_ATTR_RO(dev);
2243
2244
2245struct kset *devices_kset;
2246
2247
2248
2249
2250
2251
2252static void devices_kset_move_before(struct device *deva, struct device *devb)
2253{
2254 if (!devices_kset)
2255 return;
2256 pr_debug("devices_kset: Moving %s before %s\n",
2257 dev_name(deva), dev_name(devb));
2258 spin_lock(&devices_kset->list_lock);
2259 list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
2260 spin_unlock(&devices_kset->list_lock);
2261}
2262
2263
2264
2265
2266
2267
2268static void devices_kset_move_after(struct device *deva, struct device *devb)
2269{
2270 if (!devices_kset)
2271 return;
2272 pr_debug("devices_kset: Moving %s after %s\n",
2273 dev_name(deva), dev_name(devb));
2274 spin_lock(&devices_kset->list_lock);
2275 list_move(&deva->kobj.entry, &devb->kobj.entry);
2276 spin_unlock(&devices_kset->list_lock);
2277}
2278
2279
2280
2281
2282
2283void devices_kset_move_last(struct device *dev)
2284{
2285 if (!devices_kset)
2286 return;
2287 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
2288 spin_lock(&devices_kset->list_lock);
2289 list_move_tail(&dev->kobj.entry, &devices_kset->list);
2290 spin_unlock(&devices_kset->list_lock);
2291}
2292
2293
2294
2295
2296
2297
2298int device_create_file(struct device *dev,
2299 const struct device_attribute *attr)
2300{
2301 int error = 0;
2302
2303 if (dev) {
2304 WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
2305 "Attribute %s: write permission without 'store'\n",
2306 attr->attr.name);
2307 WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
2308 "Attribute %s: read permission without 'show'\n",
2309 attr->attr.name);
2310 error = sysfs_create_file(&dev->kobj, &attr->attr);
2311 }
2312
2313 return error;
2314}
2315EXPORT_SYMBOL_GPL(device_create_file);
2316
2317
2318
2319
2320
2321
2322void device_remove_file(struct device *dev,
2323 const struct device_attribute *attr)
2324{
2325 if (dev)
2326 sysfs_remove_file(&dev->kobj, &attr->attr);
2327}
2328EXPORT_SYMBOL_GPL(device_remove_file);
2329
2330
2331
2332
2333
2334
2335
2336
2337bool device_remove_file_self(struct device *dev,
2338 const struct device_attribute *attr)
2339{
2340 if (dev)
2341 return sysfs_remove_file_self(&dev->kobj, &attr->attr);
2342 else
2343 return false;
2344}
2345EXPORT_SYMBOL_GPL(device_remove_file_self);
2346
2347
2348
2349
2350
2351
2352int device_create_bin_file(struct device *dev,
2353 const struct bin_attribute *attr)
2354{
2355 int error = -EINVAL;
2356 if (dev)
2357 error = sysfs_create_bin_file(&dev->kobj, attr);
2358 return error;
2359}
2360EXPORT_SYMBOL_GPL(device_create_bin_file);
2361
2362
2363
2364
2365
2366
2367void device_remove_bin_file(struct device *dev,
2368 const struct bin_attribute *attr)
2369{
2370 if (dev)
2371 sysfs_remove_bin_file(&dev->kobj, attr);
2372}
2373EXPORT_SYMBOL_GPL(device_remove_bin_file);
2374
2375static void klist_children_get(struct klist_node *n)
2376{
2377 struct device_private *p = to_device_private_parent(n);
2378 struct device *dev = p->device;
2379
2380 get_device(dev);
2381}
2382
2383static void klist_children_put(struct klist_node *n)
2384{
2385 struct device_private *p = to_device_private_parent(n);
2386 struct device *dev = p->device;
2387
2388 put_device(dev);
2389}
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411void device_initialize(struct device *dev)
2412{
2413 dev->kobj.kset = devices_kset;
2414 kobject_init(&dev->kobj, &device_ktype);
2415 INIT_LIST_HEAD(&dev->dma_pools);
2416 mutex_init(&dev->mutex);
2417#ifdef CONFIG_PROVE_LOCKING
2418 mutex_init(&dev->lockdep_mutex);
2419#endif
2420 lockdep_set_novalidate_class(&dev->mutex);
2421 spin_lock_init(&dev->devres_lock);
2422 INIT_LIST_HEAD(&dev->devres_head);
2423 device_pm_init(dev);
2424 set_dev_node(dev, -1);
2425#ifdef CONFIG_GENERIC_MSI_IRQ
2426 INIT_LIST_HEAD(&dev->msi_list);
2427#endif
2428 INIT_LIST_HEAD(&dev->links.consumers);
2429 INIT_LIST_HEAD(&dev->links.suppliers);
2430 INIT_LIST_HEAD(&dev->links.needs_suppliers);
2431 INIT_LIST_HEAD(&dev->links.defer_hook);
2432 dev->links.status = DL_DEV_NO_DRIVER;
2433}
2434EXPORT_SYMBOL_GPL(device_initialize);
2435
2436struct kobject *virtual_device_parent(struct device *dev)
2437{
2438 static struct kobject *virtual_dir = NULL;
2439
2440 if (!virtual_dir)
2441 virtual_dir = kobject_create_and_add("virtual",
2442 &devices_kset->kobj);
2443
2444 return virtual_dir;
2445}
2446
2447struct class_dir {
2448 struct kobject kobj;
2449 struct class *class;
2450};
2451
2452#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
2453
2454static void class_dir_release(struct kobject *kobj)
2455{
2456 struct class_dir *dir = to_class_dir(kobj);
2457 kfree(dir);
2458}
2459
2460static const
2461struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
2462{
2463 struct class_dir *dir = to_class_dir(kobj);
2464 return dir->class->ns_type;
2465}
2466
2467static struct kobj_type class_dir_ktype = {
2468 .release = class_dir_release,
2469 .sysfs_ops = &kobj_sysfs_ops,
2470 .child_ns_type = class_dir_child_ns_type
2471};
2472
2473static struct kobject *
2474class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
2475{
2476 struct class_dir *dir;
2477 int retval;
2478
2479 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
2480 if (!dir)
2481 return ERR_PTR(-ENOMEM);
2482
2483 dir->class = class;
2484 kobject_init(&dir->kobj, &class_dir_ktype);
2485
2486 dir->kobj.kset = &class->p->glue_dirs;
2487
2488 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
2489 if (retval < 0) {
2490 kobject_put(&dir->kobj);
2491 return ERR_PTR(retval);
2492 }
2493 return &dir->kobj;
2494}
2495
2496static DEFINE_MUTEX(gdp_mutex);
2497
2498static struct kobject *get_device_parent(struct device *dev,
2499 struct device *parent)
2500{
2501 if (dev->class) {
2502 struct kobject *kobj = NULL;
2503 struct kobject *parent_kobj;
2504 struct kobject *k;
2505
2506#ifdef CONFIG_BLOCK
2507
2508 if (sysfs_deprecated && dev->class == &block_class) {
2509 if (parent && parent->class == &block_class)
2510 return &parent->kobj;
2511 return &block_class.p->subsys.kobj;
2512 }
2513#endif
2514
2515
2516
2517
2518
2519
2520 if (parent == NULL)
2521 parent_kobj = virtual_device_parent(dev);
2522 else if (parent->class && !dev->class->ns_type)
2523 return &parent->kobj;
2524 else
2525 parent_kobj = &parent->kobj;
2526
2527 mutex_lock(&gdp_mutex);
2528
2529
2530 spin_lock(&dev->class->p->glue_dirs.list_lock);
2531 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
2532 if (k->parent == parent_kobj) {
2533 kobj = kobject_get(k);
2534 break;
2535 }
2536 spin_unlock(&dev->class->p->glue_dirs.list_lock);
2537 if (kobj) {
2538 mutex_unlock(&gdp_mutex);
2539 return kobj;
2540 }
2541
2542
2543 k = class_dir_create_and_add(dev->class, parent_kobj);
2544
2545 mutex_unlock(&gdp_mutex);
2546 return k;
2547 }
2548
2549
2550 if (!parent && dev->bus && dev->bus->dev_root)
2551 return &dev->bus->dev_root->kobj;
2552
2553 if (parent)
2554 return &parent->kobj;
2555 return NULL;
2556}
2557
2558static inline bool live_in_glue_dir(struct kobject *kobj,
2559 struct device *dev)
2560{
2561 if (!kobj || !dev->class ||
2562 kobj->kset != &dev->class->p->glue_dirs)
2563 return false;
2564 return true;
2565}
2566
2567static inline struct kobject *get_glue_dir(struct device *dev)
2568{
2569 return dev->kobj.parent;
2570}
2571
2572
2573
2574
2575
2576
2577static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
2578{
2579 unsigned int ref;
2580
2581
2582 if (!live_in_glue_dir(glue_dir, dev))
2583 return;
2584
2585 mutex_lock(&gdp_mutex);
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634 ref = kref_read(&glue_dir->kref);
2635 if (!kobject_has_children(glue_dir) && !--ref)
2636 kobject_del(glue_dir);
2637 kobject_put(glue_dir);
2638 mutex_unlock(&gdp_mutex);
2639}
2640
2641static int device_add_class_symlinks(struct device *dev)
2642{
2643 struct device_node *of_node = dev_of_node(dev);
2644 int error;
2645
2646 if (of_node) {
2647 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
2648 if (error)
2649 dev_warn(dev, "Error %d creating of_node link\n",error);
2650
2651 }
2652
2653 if (!dev->class)
2654 return 0;
2655
2656 error = sysfs_create_link(&dev->kobj,
2657 &dev->class->p->subsys.kobj,
2658 "subsystem");
2659 if (error)
2660 goto out_devnode;
2661
2662 if (dev->parent && device_is_not_partition(dev)) {
2663 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
2664 "device");
2665 if (error)
2666 goto out_subsys;
2667 }
2668
2669#ifdef CONFIG_BLOCK
2670
2671 if (sysfs_deprecated && dev->class == &block_class)
2672 return 0;
2673#endif
2674
2675
2676 error = sysfs_create_link(&dev->class->p->subsys.kobj,
2677 &dev->kobj, dev_name(dev));
2678 if (error)
2679 goto out_device;
2680
2681 return 0;
2682
2683out_device:
2684 sysfs_remove_link(&dev->kobj, "device");
2685
2686out_subsys:
2687 sysfs_remove_link(&dev->kobj, "subsystem");
2688out_devnode:
2689 sysfs_remove_link(&dev->kobj, "of_node");
2690 return error;
2691}
2692
2693static void device_remove_class_symlinks(struct device *dev)
2694{
2695 if (dev_of_node(dev))
2696 sysfs_remove_link(&dev->kobj, "of_node");
2697
2698 if (!dev->class)
2699 return;
2700
2701 if (dev->parent && device_is_not_partition(dev))
2702 sysfs_remove_link(&dev->kobj, "device");
2703 sysfs_remove_link(&dev->kobj, "subsystem");
2704#ifdef CONFIG_BLOCK
2705 if (sysfs_deprecated && dev->class == &block_class)
2706 return;
2707#endif
2708 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
2709}
2710
2711
2712
2713
2714
2715
2716int dev_set_name(struct device *dev, const char *fmt, ...)
2717{
2718 va_list vargs;
2719 int err;
2720
2721 va_start(vargs, fmt);
2722 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
2723 va_end(vargs);
2724 return err;
2725}
2726EXPORT_SYMBOL_GPL(dev_set_name);
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739static struct kobject *device_to_dev_kobj(struct device *dev)
2740{
2741 struct kobject *kobj;
2742
2743 if (dev->class)
2744 kobj = dev->class->dev_kobj;
2745 else
2746 kobj = sysfs_dev_char_kobj;
2747
2748 return kobj;
2749}
2750
2751static int device_create_sys_dev_entry(struct device *dev)
2752{
2753 struct kobject *kobj = device_to_dev_kobj(dev);
2754 int error = 0;
2755 char devt_str[15];
2756
2757 if (kobj) {
2758 format_dev_t(devt_str, dev->devt);
2759 error = sysfs_create_link(kobj, &dev->kobj, devt_str);
2760 }
2761
2762 return error;
2763}
2764
2765static void device_remove_sys_dev_entry(struct device *dev)
2766{
2767 struct kobject *kobj = device_to_dev_kobj(dev);
2768 char devt_str[15];
2769
2770 if (kobj) {
2771 format_dev_t(devt_str, dev->devt);
2772 sysfs_remove_link(kobj, devt_str);
2773 }
2774}
2775
2776static int device_private_init(struct device *dev)
2777{
2778 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
2779 if (!dev->p)
2780 return -ENOMEM;
2781 dev->p->device = dev;
2782 klist_init(&dev->p->klist_children, klist_children_get,
2783 klist_children_put);
2784 INIT_LIST_HEAD(&dev->p->deferred_probe);
2785 return 0;
2786}
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815int device_add(struct device *dev)
2816{
2817 struct device *parent;
2818 struct kobject *kobj;
2819 struct class_interface *class_intf;
2820 int error = -EINVAL;
2821 struct kobject *glue_dir = NULL;
2822
2823 dev = get_device(dev);
2824 if (!dev)
2825 goto done;
2826
2827 if (!dev->p) {
2828 error = device_private_init(dev);
2829 if (error)
2830 goto done;
2831 }
2832
2833
2834
2835
2836
2837
2838 if (dev->init_name) {
2839 dev_set_name(dev, "%s", dev->init_name);
2840 dev->init_name = NULL;
2841 }
2842
2843
2844 if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
2845 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
2846
2847 if (!dev_name(dev)) {
2848 error = -EINVAL;
2849 goto name_error;
2850 }
2851
2852 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
2853
2854 parent = get_device(dev->parent);
2855 kobj = get_device_parent(dev, parent);
2856 if (IS_ERR(kobj)) {
2857 error = PTR_ERR(kobj);
2858 goto parent_error;
2859 }
2860 if (kobj)
2861 dev->kobj.parent = kobj;
2862
2863
2864 if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
2865 set_dev_node(dev, dev_to_node(parent));
2866
2867
2868
2869 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
2870 if (error) {
2871 glue_dir = get_glue_dir(dev);
2872 goto Error;
2873 }
2874
2875
2876 error = device_platform_notify(dev, KOBJ_ADD);
2877 if (error)
2878 goto platform_error;
2879
2880 error = device_create_file(dev, &dev_attr_uevent);
2881 if (error)
2882 goto attrError;
2883
2884 error = device_add_class_symlinks(dev);
2885 if (error)
2886 goto SymlinkError;
2887 error = device_add_attrs(dev);
2888 if (error)
2889 goto AttrsError;
2890 error = bus_add_device(dev);
2891 if (error)
2892 goto BusError;
2893 error = dpm_sysfs_add(dev);
2894 if (error)
2895 goto DPMError;
2896 device_pm_add(dev);
2897
2898 if (MAJOR(dev->devt)) {
2899 error = device_create_file(dev, &dev_attr_dev);
2900 if (error)
2901 goto DevAttrError;
2902
2903 error = device_create_sys_dev_entry(dev);
2904 if (error)
2905 goto SysEntryError;
2906
2907 devtmpfs_create_node(dev);
2908 }
2909
2910
2911
2912
2913 if (dev->bus)
2914 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
2915 BUS_NOTIFY_ADD_DEVICE, dev);
2916
2917 kobject_uevent(&dev->kobj, KOBJ_ADD);
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931 if (dev->fwnode && !dev->fwnode->dev) {
2932 dev->fwnode->dev = dev;
2933 fw_devlink_link_device(dev);
2934 }
2935
2936 bus_probe_device(dev);
2937 if (parent)
2938 klist_add_tail(&dev->p->knode_parent,
2939 &parent->p->klist_children);
2940
2941 if (dev->class) {
2942 mutex_lock(&dev->class->p->mutex);
2943
2944 klist_add_tail(&dev->p->knode_class,
2945 &dev->class->p->klist_devices);
2946
2947
2948 list_for_each_entry(class_intf,
2949 &dev->class->p->interfaces, node)
2950 if (class_intf->add_dev)
2951 class_intf->add_dev(dev, class_intf);
2952 mutex_unlock(&dev->class->p->mutex);
2953 }
2954done:
2955 put_device(dev);
2956 return error;
2957 SysEntryError:
2958 if (MAJOR(dev->devt))
2959 device_remove_file(dev, &dev_attr_dev);
2960 DevAttrError:
2961 device_pm_remove(dev);
2962 dpm_sysfs_remove(dev);
2963 DPMError:
2964 bus_remove_device(dev);
2965 BusError:
2966 device_remove_attrs(dev);
2967 AttrsError:
2968 device_remove_class_symlinks(dev);
2969 SymlinkError:
2970 device_remove_file(dev, &dev_attr_uevent);
2971 attrError:
2972 device_platform_notify(dev, KOBJ_REMOVE);
2973platform_error:
2974 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
2975 glue_dir = get_glue_dir(dev);
2976 kobject_del(&dev->kobj);
2977 Error:
2978 cleanup_glue_dir(dev, glue_dir);
2979parent_error:
2980 put_device(parent);
2981name_error:
2982 kfree(dev->p);
2983 dev->p = NULL;
2984 goto done;
2985}
2986EXPORT_SYMBOL_GPL(device_add);
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006int device_register(struct device *dev)
3007{
3008 device_initialize(dev);
3009 return device_add(dev);
3010}
3011EXPORT_SYMBOL_GPL(device_register);
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021struct device *get_device(struct device *dev)
3022{
3023 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
3024}
3025EXPORT_SYMBOL_GPL(get_device);
3026
3027
3028
3029
3030
3031void put_device(struct device *dev)
3032{
3033
3034 if (dev)
3035 kobject_put(&dev->kobj);
3036}
3037EXPORT_SYMBOL_GPL(put_device);
3038
3039bool kill_device(struct device *dev)
3040{
3041
3042
3043
3044
3045
3046
3047
3048 lockdep_assert_held(&dev->mutex);
3049
3050 if (dev->p->dead)
3051 return false;
3052 dev->p->dead = true;
3053 return true;
3054}
3055EXPORT_SYMBOL_GPL(kill_device);
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070void device_del(struct device *dev)
3071{
3072 struct device *parent = dev->parent;
3073 struct kobject *glue_dir = NULL;
3074 struct class_interface *class_intf;
3075 unsigned int noio_flag;
3076
3077 device_lock(dev);
3078 kill_device(dev);
3079 device_unlock(dev);
3080
3081 if (dev->fwnode && dev->fwnode->dev == dev)
3082 dev->fwnode->dev = NULL;
3083
3084
3085
3086
3087 noio_flag = memalloc_noio_save();
3088 if (dev->bus)
3089 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3090 BUS_NOTIFY_DEL_DEVICE, dev);
3091
3092 dpm_sysfs_remove(dev);
3093 if (parent)
3094 klist_del(&dev->p->knode_parent);
3095 if (MAJOR(dev->devt)) {
3096 devtmpfs_delete_node(dev);
3097 device_remove_sys_dev_entry(dev);
3098 device_remove_file(dev, &dev_attr_dev);
3099 }
3100 if (dev->class) {
3101 device_remove_class_symlinks(dev);
3102
3103 mutex_lock(&dev->class->p->mutex);
3104
3105 list_for_each_entry(class_intf,
3106 &dev->class->p->interfaces, node)
3107 if (class_intf->remove_dev)
3108 class_intf->remove_dev(dev, class_intf);
3109
3110 klist_del(&dev->p->knode_class);
3111 mutex_unlock(&dev->class->p->mutex);
3112 }
3113 device_remove_file(dev, &dev_attr_uevent);
3114 device_remove_attrs(dev);
3115 bus_remove_device(dev);
3116 device_pm_remove(dev);
3117 driver_deferred_probe_del(dev);
3118 device_platform_notify(dev, KOBJ_REMOVE);
3119 device_remove_properties(dev);
3120 device_links_purge(dev);
3121
3122 if (dev->bus)
3123 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3124 BUS_NOTIFY_REMOVED_DEVICE, dev);
3125 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3126 glue_dir = get_glue_dir(dev);
3127 kobject_del(&dev->kobj);
3128 cleanup_glue_dir(dev, glue_dir);
3129 memalloc_noio_restore(noio_flag);
3130 put_device(parent);
3131}
3132EXPORT_SYMBOL_GPL(device_del);
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145void device_unregister(struct device *dev)
3146{
3147 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3148 device_del(dev);
3149 put_device(dev);
3150}
3151EXPORT_SYMBOL_GPL(device_unregister);
3152
3153static struct device *prev_device(struct klist_iter *i)
3154{
3155 struct klist_node *n = klist_prev(i);
3156 struct device *dev = NULL;
3157 struct device_private *p;
3158
3159 if (n) {
3160 p = to_device_private_parent(n);
3161 dev = p->device;
3162 }
3163 return dev;
3164}
3165
3166static struct device *next_device(struct klist_iter *i)
3167{
3168 struct klist_node *n = klist_next(i);
3169 struct device *dev = NULL;
3170 struct device_private *p;
3171
3172 if (n) {
3173 p = to_device_private_parent(n);
3174 dev = p->device;
3175 }
3176 return dev;
3177}
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192const char *device_get_devnode(struct device *dev,
3193 umode_t *mode, kuid_t *uid, kgid_t *gid,
3194 const char **tmp)
3195{
3196 char *s;
3197
3198 *tmp = NULL;
3199
3200
3201 if (dev->type && dev->type->devnode)
3202 *tmp = dev->type->devnode(dev, mode, uid, gid);
3203 if (*tmp)
3204 return *tmp;
3205
3206
3207 if (dev->class && dev->class->devnode)
3208 *tmp = dev->class->devnode(dev, mode);
3209 if (*tmp)
3210 return *tmp;
3211
3212
3213 if (strchr(dev_name(dev), '!') == NULL)
3214 return dev_name(dev);
3215
3216
3217 s = kstrdup(dev_name(dev), GFP_KERNEL);
3218 if (!s)
3219 return NULL;
3220 strreplace(s, '!', '/');
3221 return *tmp = s;
3222}
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236int device_for_each_child(struct device *parent, void *data,
3237 int (*fn)(struct device *dev, void *data))
3238{
3239 struct klist_iter i;
3240 struct device *child;
3241 int error = 0;
3242
3243 if (!parent->p)
3244 return 0;
3245
3246 klist_iter_init(&parent->p->klist_children, &i);
3247 while (!error && (child = next_device(&i)))
3248 error = fn(child, data);
3249 klist_iter_exit(&i);
3250 return error;
3251}
3252EXPORT_SYMBOL_GPL(device_for_each_child);
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266int device_for_each_child_reverse(struct device *parent, void *data,
3267 int (*fn)(struct device *dev, void *data))
3268{
3269 struct klist_iter i;
3270 struct device *child;
3271 int error = 0;
3272
3273 if (!parent->p)
3274 return 0;
3275
3276 klist_iter_init(&parent->p->klist_children, &i);
3277 while ((child = prev_device(&i)) && !error)
3278 error = fn(child, data);
3279 klist_iter_exit(&i);
3280 return error;
3281}
3282EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301struct device *device_find_child(struct device *parent, void *data,
3302 int (*match)(struct device *dev, void *data))
3303{
3304 struct klist_iter i;
3305 struct device *child;
3306
3307 if (!parent)
3308 return NULL;
3309
3310 klist_iter_init(&parent->p->klist_children, &i);
3311 while ((child = next_device(&i)))
3312 if (match(child, data) && get_device(child))
3313 break;
3314 klist_iter_exit(&i);
3315 return child;
3316}
3317EXPORT_SYMBOL_GPL(device_find_child);
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329struct device *device_find_child_by_name(struct device *parent,
3330 const char *name)
3331{
3332 struct klist_iter i;
3333 struct device *child;
3334
3335 if (!parent)
3336 return NULL;
3337
3338 klist_iter_init(&parent->p->klist_children, &i);
3339 while ((child = next_device(&i)))
3340 if (sysfs_streq(dev_name(child), name) && get_device(child))
3341 break;
3342 klist_iter_exit(&i);
3343 return child;
3344}
3345EXPORT_SYMBOL_GPL(device_find_child_by_name);
3346
3347int __init devices_init(void)
3348{
3349 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
3350 if (!devices_kset)
3351 return -ENOMEM;
3352 dev_kobj = kobject_create_and_add("dev", NULL);
3353 if (!dev_kobj)
3354 goto dev_kobj_err;
3355 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
3356 if (!sysfs_dev_block_kobj)
3357 goto block_kobj_err;
3358 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
3359 if (!sysfs_dev_char_kobj)
3360 goto char_kobj_err;
3361
3362 return 0;
3363
3364 char_kobj_err:
3365 kobject_put(sysfs_dev_block_kobj);
3366 block_kobj_err:
3367 kobject_put(dev_kobj);
3368 dev_kobj_err:
3369 kset_unregister(devices_kset);
3370 return -ENOMEM;
3371}
3372
3373static int device_check_offline(struct device *dev, void *not_used)
3374{
3375 int ret;
3376
3377 ret = device_for_each_child(dev, NULL, device_check_offline);
3378 if (ret)
3379 return ret;
3380
3381 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
3382}
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395int device_offline(struct device *dev)
3396{
3397 int ret;
3398
3399 if (dev->offline_disabled)
3400 return -EPERM;
3401
3402 ret = device_for_each_child(dev, NULL, device_check_offline);
3403 if (ret)
3404 return ret;
3405
3406 device_lock(dev);
3407 if (device_supports_offline(dev)) {
3408 if (dev->offline) {
3409 ret = 1;
3410 } else {
3411 ret = dev->bus->offline(dev);
3412 if (!ret) {
3413 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
3414 dev->offline = true;
3415 }
3416 }
3417 }
3418 device_unlock(dev);
3419
3420 return ret;
3421}
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433int device_online(struct device *dev)
3434{
3435 int ret = 0;
3436
3437 device_lock(dev);
3438 if (device_supports_offline(dev)) {
3439 if (dev->offline) {
3440 ret = dev->bus->online(dev);
3441 if (!ret) {
3442 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
3443 dev->offline = false;
3444 }
3445 } else {
3446 ret = 1;
3447 }
3448 }
3449 device_unlock(dev);
3450
3451 return ret;
3452}
3453
3454struct root_device {
3455 struct device dev;
3456 struct module *owner;
3457};
3458
3459static inline struct root_device *to_root_device(struct device *d)
3460{
3461 return container_of(d, struct root_device, dev);
3462}
3463
3464static void root_device_release(struct device *dev)
3465{
3466 kfree(to_root_device(dev));
3467}
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491struct device *__root_device_register(const char *name, struct module *owner)
3492{
3493 struct root_device *root;
3494 int err = -ENOMEM;
3495
3496 root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
3497 if (!root)
3498 return ERR_PTR(err);
3499
3500 err = dev_set_name(&root->dev, "%s", name);
3501 if (err) {
3502 kfree(root);
3503 return ERR_PTR(err);
3504 }
3505
3506 root->dev.release = root_device_release;
3507
3508 err = device_register(&root->dev);
3509 if (err) {
3510 put_device(&root->dev);
3511 return ERR_PTR(err);
3512 }
3513
3514#ifdef CONFIG_MODULES
3515 if (owner) {
3516 struct module_kobject *mk = &owner->mkobj;
3517
3518 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
3519 if (err) {
3520 device_unregister(&root->dev);
3521 return ERR_PTR(err);
3522 }
3523 root->owner = owner;
3524 }
3525#endif
3526
3527 return &root->dev;
3528}
3529EXPORT_SYMBOL_GPL(__root_device_register);
3530
3531
3532
3533
3534
3535
3536
3537
3538void root_device_unregister(struct device *dev)
3539{
3540 struct root_device *root = to_root_device(dev);
3541
3542 if (root->owner)
3543 sysfs_remove_link(&root->dev.kobj, "module");
3544
3545 device_unregister(dev);
3546}
3547EXPORT_SYMBOL_GPL(root_device_unregister);
3548
3549
3550static void device_create_release(struct device *dev)
3551{
3552 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3553 kfree(dev);
3554}
3555
3556static __printf(6, 0) struct device *
3557device_create_groups_vargs(struct class *class, struct device *parent,
3558 dev_t devt, void *drvdata,
3559 const struct attribute_group **groups,
3560 const char *fmt, va_list args)
3561{
3562 struct device *dev = NULL;
3563 int retval = -ENODEV;
3564
3565 if (class == NULL || IS_ERR(class))
3566 goto error;
3567
3568 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3569 if (!dev) {
3570 retval = -ENOMEM;
3571 goto error;
3572 }
3573
3574 device_initialize(dev);
3575 dev->devt = devt;
3576 dev->class = class;
3577 dev->parent = parent;
3578 dev->groups = groups;
3579 dev->release = device_create_release;
3580 dev_set_drvdata(dev, drvdata);
3581
3582 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
3583 if (retval)
3584 goto error;
3585
3586 retval = device_add(dev);
3587 if (retval)
3588 goto error;
3589
3590 return dev;
3591
3592error:
3593 put_device(dev);
3594 return ERR_PTR(retval);
3595}
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621struct device *device_create(struct class *class, struct device *parent,
3622 dev_t devt, void *drvdata, const char *fmt, ...)
3623{
3624 va_list vargs;
3625 struct device *dev;
3626
3627 va_start(vargs, fmt);
3628 dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
3629 fmt, vargs);
3630 va_end(vargs);
3631 return dev;
3632}
3633EXPORT_SYMBOL_GPL(device_create);
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662struct device *device_create_with_groups(struct class *class,
3663 struct device *parent, dev_t devt,
3664 void *drvdata,
3665 const struct attribute_group **groups,
3666 const char *fmt, ...)
3667{
3668 va_list vargs;
3669 struct device *dev;
3670
3671 va_start(vargs, fmt);
3672 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
3673 fmt, vargs);
3674 va_end(vargs);
3675 return dev;
3676}
3677EXPORT_SYMBOL_GPL(device_create_with_groups);
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687void device_destroy(struct class *class, dev_t devt)
3688{
3689 struct device *dev;
3690
3691 dev = class_find_device_by_devt(class, devt);
3692 if (dev) {
3693 put_device(dev);
3694 device_unregister(dev);
3695 }
3696}
3697EXPORT_SYMBOL_GPL(device_destroy);
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738int device_rename(struct device *dev, const char *new_name)
3739{
3740 struct kobject *kobj = &dev->kobj;
3741 char *old_device_name = NULL;
3742 int error;
3743
3744 dev = get_device(dev);
3745 if (!dev)
3746 return -EINVAL;
3747
3748 dev_dbg(dev, "renaming to %s\n", new_name);
3749
3750 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
3751 if (!old_device_name) {
3752 error = -ENOMEM;
3753 goto out;
3754 }
3755
3756 if (dev->class) {
3757 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
3758 kobj, old_device_name,
3759 new_name, kobject_namespace(kobj));
3760 if (error)
3761 goto out;
3762 }
3763
3764 error = kobject_rename(kobj, new_name);
3765 if (error)
3766 goto out;
3767
3768out:
3769 put_device(dev);
3770
3771 kfree(old_device_name);
3772
3773 return error;
3774}
3775EXPORT_SYMBOL_GPL(device_rename);
3776
3777static int device_move_class_links(struct device *dev,
3778 struct device *old_parent,
3779 struct device *new_parent)
3780{
3781 int error = 0;
3782
3783 if (old_parent)
3784 sysfs_remove_link(&dev->kobj, "device");
3785 if (new_parent)
3786 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
3787 "device");
3788 return error;
3789}
3790
3791
3792
3793
3794
3795
3796
3797int device_move(struct device *dev, struct device *new_parent,
3798 enum dpm_order dpm_order)
3799{
3800 int error;
3801 struct device *old_parent;
3802 struct kobject *new_parent_kobj;
3803
3804 dev = get_device(dev);
3805 if (!dev)
3806 return -EINVAL;
3807
3808 device_pm_lock();
3809 new_parent = get_device(new_parent);
3810 new_parent_kobj = get_device_parent(dev, new_parent);
3811 if (IS_ERR(new_parent_kobj)) {
3812 error = PTR_ERR(new_parent_kobj);
3813 put_device(new_parent);
3814 goto out;
3815 }
3816
3817 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
3818 __func__, new_parent ? dev_name(new_parent) : "<NULL>");
3819 error = kobject_move(&dev->kobj, new_parent_kobj);
3820 if (error) {
3821 cleanup_glue_dir(dev, new_parent_kobj);
3822 put_device(new_parent);
3823 goto out;
3824 }
3825 old_parent = dev->parent;
3826 dev->parent = new_parent;
3827 if (old_parent)
3828 klist_remove(&dev->p->knode_parent);
3829 if (new_parent) {
3830 klist_add_tail(&dev->p->knode_parent,
3831 &new_parent->p->klist_children);
3832 set_dev_node(dev, dev_to_node(new_parent));
3833 }
3834
3835 if (dev->class) {
3836 error = device_move_class_links(dev, old_parent, new_parent);
3837 if (error) {
3838
3839 device_move_class_links(dev, new_parent, old_parent);
3840 if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
3841 if (new_parent)
3842 klist_remove(&dev->p->knode_parent);
3843 dev->parent = old_parent;
3844 if (old_parent) {
3845 klist_add_tail(&dev->p->knode_parent,
3846 &old_parent->p->klist_children);
3847 set_dev_node(dev, dev_to_node(old_parent));
3848 }
3849 }
3850 cleanup_glue_dir(dev, new_parent_kobj);
3851 put_device(new_parent);
3852 goto out;
3853 }
3854 }
3855 switch (dpm_order) {
3856 case DPM_ORDER_NONE:
3857 break;
3858 case DPM_ORDER_DEV_AFTER_PARENT:
3859 device_pm_move_after(dev, new_parent);
3860 devices_kset_move_after(dev, new_parent);
3861 break;
3862 case DPM_ORDER_PARENT_BEFORE_DEV:
3863 device_pm_move_before(new_parent, dev);
3864 devices_kset_move_before(new_parent, dev);
3865 break;
3866 case DPM_ORDER_DEV_LAST:
3867 device_pm_move_last(dev);
3868 devices_kset_move_last(dev);
3869 break;
3870 }
3871
3872 put_device(old_parent);
3873out:
3874 device_pm_unlock();
3875 put_device(dev);
3876 return error;
3877}
3878EXPORT_SYMBOL_GPL(device_move);
3879
3880static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
3881 kgid_t kgid)
3882{
3883 struct kobject *kobj = &dev->kobj;
3884 struct class *class = dev->class;
3885 const struct device_type *type = dev->type;
3886 int error;
3887
3888 if (class) {
3889
3890
3891
3892
3893 error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
3894 kgid);
3895 if (error)
3896 return error;
3897 }
3898
3899 if (type) {
3900
3901
3902
3903
3904 error = sysfs_groups_change_owner(kobj, type->groups, kuid,
3905 kgid);
3906 if (error)
3907 return error;
3908 }
3909
3910
3911 error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
3912 if (error)
3913 return error;
3914
3915 if (device_supports_offline(dev) && !dev->offline_disabled) {
3916
3917 error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
3918 kuid, kgid);
3919 if (error)
3920 return error;
3921 }
3922
3923 return 0;
3924}
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
3939{
3940 int error;
3941 struct kobject *kobj = &dev->kobj;
3942
3943 dev = get_device(dev);
3944 if (!dev)
3945 return -EINVAL;
3946
3947
3948
3949
3950
3951 error = sysfs_change_owner(kobj, kuid, kgid);
3952 if (error)
3953 goto out;
3954
3955
3956
3957
3958
3959
3960 error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
3961 kgid);
3962 if (error)
3963 goto out;
3964
3965
3966
3967
3968
3969
3970 error = device_attrs_change_owner(dev, kuid, kgid);
3971 if (error)
3972 goto out;
3973
3974 error = dpm_sysfs_change_owner(dev, kuid, kgid);
3975 if (error)
3976 goto out;
3977
3978#ifdef CONFIG_BLOCK
3979 if (sysfs_deprecated && dev->class == &block_class)
3980 goto out;
3981#endif
3982
3983
3984
3985
3986
3987
3988
3989 error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj,
3990 dev_name(dev), kuid, kgid);
3991 if (error)
3992 goto out;
3993
3994out:
3995 put_device(dev);
3996 return error;
3997}
3998EXPORT_SYMBOL_GPL(device_change_owner);
3999
4000
4001
4002
4003void device_shutdown(void)
4004{
4005 struct device *dev, *parent;
4006
4007 wait_for_device_probe();
4008 device_block_probing();
4009
4010 cpufreq_suspend();
4011
4012 spin_lock(&devices_kset->list_lock);
4013
4014
4015
4016
4017
4018 while (!list_empty(&devices_kset->list)) {
4019 dev = list_entry(devices_kset->list.prev, struct device,
4020 kobj.entry);
4021
4022
4023
4024
4025
4026
4027 parent = get_device(dev->parent);
4028 get_device(dev);
4029
4030
4031
4032
4033 list_del_init(&dev->kobj.entry);
4034 spin_unlock(&devices_kset->list_lock);
4035
4036
4037 if (parent)
4038 device_lock(parent);
4039 device_lock(dev);
4040
4041
4042 pm_runtime_get_noresume(dev);
4043 pm_runtime_barrier(dev);
4044
4045 if (dev->class && dev->class->shutdown_pre) {
4046 if (initcall_debug)
4047 dev_info(dev, "shutdown_pre\n");
4048 dev->class->shutdown_pre(dev);
4049 }
4050 if (dev->bus && dev->bus->shutdown) {
4051 if (initcall_debug)
4052 dev_info(dev, "shutdown\n");
4053 dev->bus->shutdown(dev);
4054 } else if (dev->driver && dev->driver->shutdown) {
4055 if (initcall_debug)
4056 dev_info(dev, "shutdown\n");
4057 dev->driver->shutdown(dev);
4058 }
4059
4060 device_unlock(dev);
4061 if (parent)
4062 device_unlock(parent);
4063
4064 put_device(dev);
4065 put_device(parent);
4066
4067 spin_lock(&devices_kset->list_lock);
4068 }
4069 spin_unlock(&devices_kset->list_lock);
4070}
4071
4072
4073
4074
4075
4076#ifdef CONFIG_PRINTK
4077static void
4078set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
4079{
4080 const char *subsys;
4081
4082 memset(dev_info, 0, sizeof(*dev_info));
4083
4084 if (dev->class)
4085 subsys = dev->class->name;
4086 else if (dev->bus)
4087 subsys = dev->bus->name;
4088 else
4089 return;
4090
4091 strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
4092
4093
4094
4095
4096
4097
4098
4099
4100 if (MAJOR(dev->devt)) {
4101 char c;
4102
4103 if (strcmp(subsys, "block") == 0)
4104 c = 'b';
4105 else
4106 c = 'c';
4107
4108 snprintf(dev_info->device, sizeof(dev_info->device),
4109 "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
4110 } else if (strcmp(subsys, "net") == 0) {
4111 struct net_device *net = to_net_dev(dev);
4112
4113 snprintf(dev_info->device, sizeof(dev_info->device),
4114 "n%u", net->ifindex);
4115 } else {
4116 snprintf(dev_info->device, sizeof(dev_info->device),
4117 "+%s:%s", subsys, dev_name(dev));
4118 }
4119}
4120
4121int dev_vprintk_emit(int level, const struct device *dev,
4122 const char *fmt, va_list args)
4123{
4124 struct dev_printk_info dev_info;
4125
4126 set_dev_info(dev, &dev_info);
4127
4128 return vprintk_emit(0, level, &dev_info, fmt, args);
4129}
4130EXPORT_SYMBOL(dev_vprintk_emit);
4131
4132int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
4133{
4134 va_list args;
4135 int r;
4136
4137 va_start(args, fmt);
4138
4139 r = dev_vprintk_emit(level, dev, fmt, args);
4140
4141 va_end(args);
4142
4143 return r;
4144}
4145EXPORT_SYMBOL(dev_printk_emit);
4146
4147static void __dev_printk(const char *level, const struct device *dev,
4148 struct va_format *vaf)
4149{
4150 if (dev)
4151 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
4152 dev_driver_string(dev), dev_name(dev), vaf);
4153 else
4154 printk("%s(NULL device *): %pV", level, vaf);
4155}
4156
4157void dev_printk(const char *level, const struct device *dev,
4158 const char *fmt, ...)
4159{
4160 struct va_format vaf;
4161 va_list args;
4162
4163 va_start(args, fmt);
4164
4165 vaf.fmt = fmt;
4166 vaf.va = &args;
4167
4168 __dev_printk(level, dev, &vaf);
4169
4170 va_end(args);
4171}
4172EXPORT_SYMBOL(dev_printk);
4173
4174#define define_dev_printk_level(func, kern_level) \
4175void func(const struct device *dev, const char *fmt, ...) \
4176{ \
4177 struct va_format vaf; \
4178 va_list args; \
4179 \
4180 va_start(args, fmt); \
4181 \
4182 vaf.fmt = fmt; \
4183 vaf.va = &args; \
4184 \
4185 __dev_printk(kern_level, dev, &vaf); \
4186 \
4187 va_end(args); \
4188} \
4189EXPORT_SYMBOL(func);
4190
4191define_dev_printk_level(_dev_emerg, KERN_EMERG);
4192define_dev_printk_level(_dev_alert, KERN_ALERT);
4193define_dev_printk_level(_dev_crit, KERN_CRIT);
4194define_dev_printk_level(_dev_err, KERN_ERR);
4195define_dev_printk_level(_dev_warn, KERN_WARNING);
4196define_dev_printk_level(_dev_notice, KERN_NOTICE);
4197define_dev_printk_level(_dev_info, KERN_INFO);
4198
4199#endif
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
4229{
4230 struct va_format vaf;
4231 va_list args;
4232
4233 va_start(args, fmt);
4234 vaf.fmt = fmt;
4235 vaf.va = &args;
4236
4237 if (err != -EPROBE_DEFER) {
4238 dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4239 } else {
4240 device_set_deferred_probe_reason(dev, &vaf);
4241 dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4242 }
4243
4244 va_end(args);
4245
4246 return err;
4247}
4248EXPORT_SYMBOL_GPL(dev_err_probe);
4249
4250static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
4251{
4252 return fwnode && !IS_ERR(fwnode->secondary);
4253}
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4264{
4265 struct device *parent = dev->parent;
4266 struct fwnode_handle *fn = dev->fwnode;
4267
4268 if (fwnode) {
4269 if (fwnode_is_primary(fn))
4270 fn = fn->secondary;
4271
4272 if (fn) {
4273 WARN_ON(fwnode->secondary);
4274 fwnode->secondary = fn;
4275 }
4276 dev->fwnode = fwnode;
4277 } else {
4278 if (fwnode_is_primary(fn)) {
4279 dev->fwnode = fn->secondary;
4280 if (!(parent && fn == parent->fwnode))
4281 fn->secondary = ERR_PTR(-ENODEV);
4282 } else {
4283 dev->fwnode = NULL;
4284 }
4285 }
4286}
4287EXPORT_SYMBOL_GPL(set_primary_fwnode);
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4299{
4300 if (fwnode)
4301 fwnode->secondary = ERR_PTR(-ENODEV);
4302
4303 if (fwnode_is_primary(dev->fwnode))
4304 dev->fwnode->secondary = fwnode;
4305 else
4306 dev->fwnode = fwnode;
4307}
4308EXPORT_SYMBOL_GPL(set_secondary_fwnode);
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
4319{
4320 of_node_put(dev->of_node);
4321 dev->of_node = of_node_get(dev2->of_node);
4322 dev->of_node_reused = true;
4323}
4324EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
4325
4326int device_match_name(struct device *dev, const void *name)
4327{
4328 return sysfs_streq(dev_name(dev), name);
4329}
4330EXPORT_SYMBOL_GPL(device_match_name);
4331
4332int device_match_of_node(struct device *dev, const void *np)
4333{
4334 return dev->of_node == np;
4335}
4336EXPORT_SYMBOL_GPL(device_match_of_node);
4337
4338int device_match_fwnode(struct device *dev, const void *fwnode)
4339{
4340 return dev_fwnode(dev) == fwnode;
4341}
4342EXPORT_SYMBOL_GPL(device_match_fwnode);
4343
4344int device_match_devt(struct device *dev, const void *pdevt)
4345{
4346 return dev->devt == *(dev_t *)pdevt;
4347}
4348EXPORT_SYMBOL_GPL(device_match_devt);
4349
4350int device_match_acpi_dev(struct device *dev, const void *adev)
4351{
4352 return ACPI_COMPANION(dev) == adev;
4353}
4354EXPORT_SYMBOL(device_match_acpi_dev);
4355
4356int device_match_any(struct device *dev, const void *unused)
4357{
4358 return 1;
4359}
4360EXPORT_SYMBOL_GPL(device_match_any);
4361