1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/device.h>
13#include <linux/err.h>
14#include <linux/fwnode.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/string.h>
19#include <linux/kdev_t.h>
20#include <linux/notifier.h>
21#include <linux/of.h>
22#include <linux/of_device.h>
23#include <linux/genhd.h>
24#include <linux/mutex.h>
25#include <linux/pm_runtime.h>
26#include <linux/netdevice.h>
27#include <linux/sched/signal.h>
28#include <linux/sysfs.h>
29
30#include "base.h"
31#include "power/power.h"
32
33#ifdef CONFIG_SYSFS_DEPRECATED
34#ifdef CONFIG_SYSFS_DEPRECATED_V2
35long sysfs_deprecated = 1;
36#else
37long sysfs_deprecated = 0;
38#endif
39static int __init sysfs_deprecated_setup(char *arg)
40{
41 return kstrtol(arg, 10, &sysfs_deprecated);
42}
43early_param("sysfs.deprecated", sysfs_deprecated_setup);
44#endif
45
46
47
48#ifdef CONFIG_SRCU
49static DEFINE_MUTEX(device_links_lock);
50DEFINE_STATIC_SRCU(device_links_srcu);
51
52static inline void device_links_write_lock(void)
53{
54 mutex_lock(&device_links_lock);
55}
56
57static inline void device_links_write_unlock(void)
58{
59 mutex_unlock(&device_links_lock);
60}
61
62int device_links_read_lock(void)
63{
64 return srcu_read_lock(&device_links_srcu);
65}
66
67void device_links_read_unlock(int idx)
68{
69 srcu_read_unlock(&device_links_srcu, idx);
70}
71#else
72static DECLARE_RWSEM(device_links_lock);
73
74static inline void device_links_write_lock(void)
75{
76 down_write(&device_links_lock);
77}
78
79static inline void device_links_write_unlock(void)
80{
81 up_write(&device_links_lock);
82}
83
84int device_links_read_lock(void)
85{
86 down_read(&device_links_lock);
87 return 0;
88}
89
90void device_links_read_unlock(int not_used)
91{
92 up_read(&device_links_lock);
93}
94#endif
95
96
97
98
99
100
101
102
103
104static int device_is_dependent(struct device *dev, void *target)
105{
106 struct device_link *link;
107 int ret;
108
109 if (dev == target)
110 return 1;
111
112 ret = device_for_each_child(dev, target, device_is_dependent);
113 if (ret)
114 return ret;
115
116 list_for_each_entry(link, &dev->links.consumers, s_node) {
117 if (link->consumer == target)
118 return 1;
119
120 ret = device_is_dependent(link->consumer, target);
121 if (ret)
122 break;
123 }
124 return ret;
125}
126
127static int device_reorder_to_tail(struct device *dev, void *not_used)
128{
129 struct device_link *link;
130
131
132
133
134
135 if (device_is_registered(dev))
136 devices_kset_move_last(dev);
137
138 if (device_pm_initialized(dev))
139 device_pm_move_last(dev);
140
141 device_for_each_child(dev, NULL, device_reorder_to_tail);
142 list_for_each_entry(link, &dev->links.consumers, s_node)
143 device_reorder_to_tail(link->consumer, NULL);
144
145 return 0;
146}
147
148
149
150
151
152
153
154
155
156
157void device_pm_move_to_tail(struct device *dev)
158{
159 int idx;
160
161 idx = device_links_read_lock();
162 device_pm_lock();
163 device_reorder_to_tail(dev, NULL);
164 device_pm_unlock();
165 device_links_read_unlock(idx);
166}
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217struct device_link *device_link_add(struct device *consumer,
218 struct device *supplier, u32 flags)
219{
220 struct device_link *link;
221
222 if (!consumer || !supplier ||
223 (flags & DL_FLAG_STATELESS &&
224 flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
225 DL_FLAG_AUTOREMOVE_SUPPLIER |
226 DL_FLAG_AUTOPROBE_CONSUMER)) ||
227 (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
228 flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
229 DL_FLAG_AUTOREMOVE_SUPPLIER)))
230 return NULL;
231
232 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
233 if (pm_runtime_get_sync(supplier) < 0) {
234 pm_runtime_put_noidle(supplier);
235 return NULL;
236 }
237 }
238
239 device_links_write_lock();
240 device_pm_lock();
241
242
243
244
245
246
247 if (!device_pm_initialized(supplier)
248 || device_is_dependent(consumer, supplier)) {
249 link = NULL;
250 goto out;
251 }
252
253
254
255
256
257
258 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
259 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
260
261 list_for_each_entry(link, &supplier->links.consumers, s_node) {
262 if (link->consumer != consumer)
263 continue;
264
265
266
267
268
269 if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & DL_FLAG_STATELESS))) {
270 link = NULL;
271 goto out;
272 }
273
274 if (flags & DL_FLAG_PM_RUNTIME) {
275 if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
276 pm_runtime_new_link(consumer);
277 link->flags |= DL_FLAG_PM_RUNTIME;
278 }
279 if (flags & DL_FLAG_RPM_ACTIVE)
280 refcount_inc(&link->rpm_active);
281 }
282
283 if (flags & DL_FLAG_STATELESS) {
284 kref_get(&link->kref);
285 goto out;
286 }
287
288
289
290
291
292
293 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
294 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
295 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
296 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
297 }
298 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
299 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
300 DL_FLAG_AUTOREMOVE_SUPPLIER);
301 }
302 goto out;
303 }
304
305 link = kzalloc(sizeof(*link), GFP_KERNEL);
306 if (!link)
307 goto out;
308
309 refcount_set(&link->rpm_active, 1);
310
311 if (flags & DL_FLAG_PM_RUNTIME) {
312 if (flags & DL_FLAG_RPM_ACTIVE)
313 refcount_inc(&link->rpm_active);
314
315 pm_runtime_new_link(consumer);
316 }
317
318 get_device(supplier);
319 link->supplier = supplier;
320 INIT_LIST_HEAD(&link->s_node);
321 get_device(consumer);
322 link->consumer = consumer;
323 INIT_LIST_HEAD(&link->c_node);
324 link->flags = flags;
325 kref_init(&link->kref);
326
327
328 if (flags & DL_FLAG_STATELESS) {
329 link->status = DL_STATE_NONE;
330 } else {
331 switch (supplier->links.status) {
332 case DL_DEV_PROBING:
333 switch (consumer->links.status) {
334 case DL_DEV_PROBING:
335
336
337
338
339
340
341
342 link->status = DL_STATE_CONSUMER_PROBE;
343 break;
344 default:
345 link->status = DL_STATE_DORMANT;
346 break;
347 }
348 break;
349 case DL_DEV_DRIVER_BOUND:
350 switch (consumer->links.status) {
351 case DL_DEV_PROBING:
352 link->status = DL_STATE_CONSUMER_PROBE;
353 break;
354 case DL_DEV_DRIVER_BOUND:
355 link->status = DL_STATE_ACTIVE;
356 break;
357 default:
358 link->status = DL_STATE_AVAILABLE;
359 break;
360 }
361 break;
362 case DL_DEV_UNBINDING:
363 link->status = DL_STATE_SUPPLIER_UNBIND;
364 break;
365 default:
366 link->status = DL_STATE_DORMANT;
367 break;
368 }
369 }
370
371
372
373
374
375 if (link->status == DL_STATE_CONSUMER_PROBE &&
376 flags & DL_FLAG_PM_RUNTIME)
377 pm_runtime_resume(supplier);
378
379
380
381
382
383
384
385
386 device_reorder_to_tail(consumer, NULL);
387
388 list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
389 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
390
391 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
392
393 out:
394 device_pm_unlock();
395 device_links_write_unlock();
396
397 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
398 pm_runtime_put(supplier);
399
400 return link;
401}
402EXPORT_SYMBOL_GPL(device_link_add);
403
404static void device_link_free(struct device_link *link)
405{
406 while (refcount_dec_not_one(&link->rpm_active))
407 pm_runtime_put(link->supplier);
408
409 put_device(link->consumer);
410 put_device(link->supplier);
411 kfree(link);
412}
413
414#ifdef CONFIG_SRCU
415static void __device_link_free_srcu(struct rcu_head *rhead)
416{
417 device_link_free(container_of(rhead, struct device_link, rcu_head));
418}
419
420static void __device_link_del(struct kref *kref)
421{
422 struct device_link *link = container_of(kref, struct device_link, kref);
423
424 dev_dbg(link->consumer, "Dropping the link to %s\n",
425 dev_name(link->supplier));
426
427 if (link->flags & DL_FLAG_PM_RUNTIME)
428 pm_runtime_drop_link(link->consumer);
429
430 list_del_rcu(&link->s_node);
431 list_del_rcu(&link->c_node);
432 call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
433}
434#else
435static void __device_link_del(struct kref *kref)
436{
437 struct device_link *link = container_of(kref, struct device_link, kref);
438
439 dev_info(link->consumer, "Dropping the link to %s\n",
440 dev_name(link->supplier));
441
442 if (link->flags & DL_FLAG_PM_RUNTIME)
443 pm_runtime_drop_link(link->consumer);
444
445 list_del(&link->s_node);
446 list_del(&link->c_node);
447 device_link_free(link);
448}
449#endif
450
451static void device_link_put_kref(struct device_link *link)
452{
453 if (link->flags & DL_FLAG_STATELESS)
454 kref_put(&link->kref, __device_link_del);
455 else
456 WARN(1, "Unable to drop a managed device link reference\n");
457}
458
459
460
461
462
463
464
465
466
467
468void device_link_del(struct device_link *link)
469{
470 device_links_write_lock();
471 device_pm_lock();
472 device_link_put_kref(link);
473 device_pm_unlock();
474 device_links_write_unlock();
475}
476EXPORT_SYMBOL_GPL(device_link_del);
477
478
479
480
481
482
483
484
485
486void device_link_remove(void *consumer, struct device *supplier)
487{
488 struct device_link *link;
489
490 if (WARN_ON(consumer == supplier))
491 return;
492
493 device_links_write_lock();
494 device_pm_lock();
495
496 list_for_each_entry(link, &supplier->links.consumers, s_node) {
497 if (link->consumer == consumer) {
498 device_link_put_kref(link);
499 break;
500 }
501 }
502
503 device_pm_unlock();
504 device_links_write_unlock();
505}
506EXPORT_SYMBOL_GPL(device_link_remove);
507
508static void device_links_missing_supplier(struct device *dev)
509{
510 struct device_link *link;
511
512 list_for_each_entry(link, &dev->links.suppliers, c_node)
513 if (link->status == DL_STATE_CONSUMER_PROBE)
514 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
515}
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533int device_links_check_suppliers(struct device *dev)
534{
535 struct device_link *link;
536 int ret = 0;
537
538 device_links_write_lock();
539
540 list_for_each_entry(link, &dev->links.suppliers, c_node) {
541 if (link->flags & DL_FLAG_STATELESS)
542 continue;
543
544 if (link->status != DL_STATE_AVAILABLE) {
545 device_links_missing_supplier(dev);
546 ret = -EPROBE_DEFER;
547 break;
548 }
549 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
550 }
551 dev->links.status = DL_DEV_PROBING;
552
553 device_links_write_unlock();
554 return ret;
555}
556
557
558
559
560
561
562
563
564
565
566
567
568void device_links_driver_bound(struct device *dev)
569{
570 struct device_link *link;
571
572 device_links_write_lock();
573
574 list_for_each_entry(link, &dev->links.consumers, s_node) {
575 if (link->flags & DL_FLAG_STATELESS)
576 continue;
577
578
579
580
581
582
583
584 if (link->status == DL_STATE_CONSUMER_PROBE ||
585 link->status == DL_STATE_ACTIVE)
586 continue;
587
588 WARN_ON(link->status != DL_STATE_DORMANT);
589 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
590
591 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
592 driver_deferred_probe_add(link->consumer);
593 }
594
595 list_for_each_entry(link, &dev->links.suppliers, c_node) {
596 if (link->flags & DL_FLAG_STATELESS)
597 continue;
598
599 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
600 WRITE_ONCE(link->status, DL_STATE_ACTIVE);
601 }
602
603 dev->links.status = DL_DEV_DRIVER_BOUND;
604
605 device_links_write_unlock();
606}
607
608
609
610
611
612
613
614
615
616
617
618
619
620static void __device_links_no_driver(struct device *dev)
621{
622 struct device_link *link, *ln;
623
624 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
625 if (link->flags & DL_FLAG_STATELESS)
626 continue;
627
628 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
629 __device_link_del(&link->kref);
630 else if (link->status == DL_STATE_CONSUMER_PROBE ||
631 link->status == DL_STATE_ACTIVE)
632 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
633 }
634
635 dev->links.status = DL_DEV_NO_DRIVER;
636}
637
638
639
640
641
642
643
644
645
646
647
648void device_links_no_driver(struct device *dev)
649{
650 struct device_link *link;
651
652 device_links_write_lock();
653
654 list_for_each_entry(link, &dev->links.consumers, s_node) {
655 if (link->flags & DL_FLAG_STATELESS)
656 continue;
657
658
659
660
661
662
663
664
665 if (link->status == DL_STATE_CONSUMER_PROBE ||
666 link->status == DL_STATE_ACTIVE)
667 WRITE_ONCE(link->status, DL_STATE_DORMANT);
668 }
669
670 __device_links_no_driver(dev);
671
672 device_links_write_unlock();
673}
674
675
676
677
678
679
680
681
682
683
684
685void device_links_driver_cleanup(struct device *dev)
686{
687 struct device_link *link, *ln;
688
689 device_links_write_lock();
690
691 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
692 if (link->flags & DL_FLAG_STATELESS)
693 continue;
694
695 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
696 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
697
698
699
700
701
702
703 if (link->status == DL_STATE_SUPPLIER_UNBIND &&
704 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
705 __device_link_del(&link->kref);
706
707 WRITE_ONCE(link->status, DL_STATE_DORMANT);
708 }
709
710 __device_links_no_driver(dev);
711
712 device_links_write_unlock();
713}
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729bool device_links_busy(struct device *dev)
730{
731 struct device_link *link;
732 bool ret = false;
733
734 device_links_write_lock();
735
736 list_for_each_entry(link, &dev->links.consumers, s_node) {
737 if (link->flags & DL_FLAG_STATELESS)
738 continue;
739
740 if (link->status == DL_STATE_CONSUMER_PROBE
741 || link->status == DL_STATE_ACTIVE) {
742 ret = true;
743 break;
744 }
745 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
746 }
747
748 dev->links.status = DL_DEV_UNBINDING;
749
750 device_links_write_unlock();
751 return ret;
752}
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769void device_links_unbind_consumers(struct device *dev)
770{
771 struct device_link *link;
772
773 start:
774 device_links_write_lock();
775
776 list_for_each_entry(link, &dev->links.consumers, s_node) {
777 enum device_link_state status;
778
779 if (link->flags & DL_FLAG_STATELESS)
780 continue;
781
782 status = link->status;
783 if (status == DL_STATE_CONSUMER_PROBE) {
784 device_links_write_unlock();
785
786 wait_for_device_probe();
787 goto start;
788 }
789 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
790 if (status == DL_STATE_ACTIVE) {
791 struct device *consumer = link->consumer;
792
793 get_device(consumer);
794
795 device_links_write_unlock();
796
797 device_release_driver_internal(consumer, NULL,
798 consumer->parent);
799 put_device(consumer);
800 goto start;
801 }
802 }
803
804 device_links_write_unlock();
805}
806
807
808
809
810
811static void device_links_purge(struct device *dev)
812{
813 struct device_link *link, *ln;
814
815
816
817
818
819 device_links_write_lock();
820
821 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
822 WARN_ON(link->status == DL_STATE_ACTIVE);
823 __device_link_del(&link->kref);
824 }
825
826 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
827 WARN_ON(link->status != DL_STATE_DORMANT &&
828 link->status != DL_STATE_NONE);
829 __device_link_del(&link->kref);
830 }
831
832 device_links_write_unlock();
833}
834
835
836
837int (*platform_notify)(struct device *dev) = NULL;
838int (*platform_notify_remove)(struct device *dev) = NULL;
839static struct kobject *dev_kobj;
840struct kobject *sysfs_dev_char_kobj;
841struct kobject *sysfs_dev_block_kobj;
842
843static DEFINE_MUTEX(device_hotplug_lock);
844
845void lock_device_hotplug(void)
846{
847 mutex_lock(&device_hotplug_lock);
848}
849
850void unlock_device_hotplug(void)
851{
852 mutex_unlock(&device_hotplug_lock);
853}
854
855int lock_device_hotplug_sysfs(void)
856{
857 if (mutex_trylock(&device_hotplug_lock))
858 return 0;
859
860
861 msleep(5);
862 return restart_syscall();
863}
864
865#ifdef CONFIG_BLOCK
866static inline int device_is_not_partition(struct device *dev)
867{
868 return !(dev->type == &part_type);
869}
870#else
871static inline int device_is_not_partition(struct device *dev)
872{
873 return 1;
874}
875#endif
876
877static int
878device_platform_notify(struct device *dev, enum kobject_action action)
879{
880 int ret;
881
882 ret = acpi_platform_notify(dev, action);
883 if (ret)
884 return ret;
885
886 ret = software_node_notify(dev, action);
887 if (ret)
888 return ret;
889
890 if (platform_notify && action == KOBJ_ADD)
891 platform_notify(dev);
892 else if (platform_notify_remove && action == KOBJ_REMOVE)
893 platform_notify_remove(dev);
894 return 0;
895}
896
897
898
899
900
901
902
903
904
905
906const char *dev_driver_string(const struct device *dev)
907{
908 struct device_driver *drv;
909
910
911
912
913
914 drv = READ_ONCE(dev->driver);
915 return drv ? drv->name :
916 (dev->bus ? dev->bus->name :
917 (dev->class ? dev->class->name : ""));
918}
919EXPORT_SYMBOL(dev_driver_string);
920
921#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
922
923static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
924 char *buf)
925{
926 struct device_attribute *dev_attr = to_dev_attr(attr);
927 struct device *dev = kobj_to_dev(kobj);
928 ssize_t ret = -EIO;
929
930 if (dev_attr->show)
931 ret = dev_attr->show(dev, dev_attr, buf);
932 if (ret >= (ssize_t)PAGE_SIZE) {
933 printk("dev_attr_show: %pS returned bad count\n",
934 dev_attr->show);
935 }
936 return ret;
937}
938
939static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
940 const char *buf, size_t count)
941{
942 struct device_attribute *dev_attr = to_dev_attr(attr);
943 struct device *dev = kobj_to_dev(kobj);
944 ssize_t ret = -EIO;
945
946 if (dev_attr->store)
947 ret = dev_attr->store(dev, dev_attr, buf, count);
948 return ret;
949}
950
951static const struct sysfs_ops dev_sysfs_ops = {
952 .show = dev_attr_show,
953 .store = dev_attr_store,
954};
955
956#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
957
958ssize_t device_store_ulong(struct device *dev,
959 struct device_attribute *attr,
960 const char *buf, size_t size)
961{
962 struct dev_ext_attribute *ea = to_ext_attr(attr);
963 int ret;
964 unsigned long new;
965
966 ret = kstrtoul(buf, 0, &new);
967 if (ret)
968 return ret;
969 *(unsigned long *)(ea->var) = new;
970
971 return size;
972}
973EXPORT_SYMBOL_GPL(device_store_ulong);
974
975ssize_t device_show_ulong(struct device *dev,
976 struct device_attribute *attr,
977 char *buf)
978{
979 struct dev_ext_attribute *ea = to_ext_attr(attr);
980 return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
981}
982EXPORT_SYMBOL_GPL(device_show_ulong);
983
984ssize_t device_store_int(struct device *dev,
985 struct device_attribute *attr,
986 const char *buf, size_t size)
987{
988 struct dev_ext_attribute *ea = to_ext_attr(attr);
989 int ret;
990 long new;
991
992 ret = kstrtol(buf, 0, &new);
993 if (ret)
994 return ret;
995
996 if (new > INT_MAX || new < INT_MIN)
997 return -EINVAL;
998 *(int *)(ea->var) = new;
999
1000 return size;
1001}
1002EXPORT_SYMBOL_GPL(device_store_int);
1003
1004ssize_t device_show_int(struct device *dev,
1005 struct device_attribute *attr,
1006 char *buf)
1007{
1008 struct dev_ext_attribute *ea = to_ext_attr(attr);
1009
1010 return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
1011}
1012EXPORT_SYMBOL_GPL(device_show_int);
1013
1014ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
1015 const char *buf, size_t size)
1016{
1017 struct dev_ext_attribute *ea = to_ext_attr(attr);
1018
1019 if (strtobool(buf, ea->var) < 0)
1020 return -EINVAL;
1021
1022 return size;
1023}
1024EXPORT_SYMBOL_GPL(device_store_bool);
1025
1026ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
1027 char *buf)
1028{
1029 struct dev_ext_attribute *ea = to_ext_attr(attr);
1030
1031 return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
1032}
1033EXPORT_SYMBOL_GPL(device_show_bool);
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043static void device_release(struct kobject *kobj)
1044{
1045 struct device *dev = kobj_to_dev(kobj);
1046 struct device_private *p = dev->p;
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057 devres_release_all(dev);
1058
1059 if (dev->release)
1060 dev->release(dev);
1061 else if (dev->type && dev->type->release)
1062 dev->type->release(dev);
1063 else if (dev->class && dev->class->dev_release)
1064 dev->class->dev_release(dev);
1065 else
1066 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n",
1067 dev_name(dev));
1068 kfree(p);
1069}
1070
1071static const void *device_namespace(struct kobject *kobj)
1072{
1073 struct device *dev = kobj_to_dev(kobj);
1074 const void *ns = NULL;
1075
1076 if (dev->class && dev->class->ns_type)
1077 ns = dev->class->namespace(dev);
1078
1079 return ns;
1080}
1081
1082static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
1083{
1084 struct device *dev = kobj_to_dev(kobj);
1085
1086 if (dev->class && dev->class->get_ownership)
1087 dev->class->get_ownership(dev, uid, gid);
1088}
1089
1090static struct kobj_type device_ktype = {
1091 .release = device_release,
1092 .sysfs_ops = &dev_sysfs_ops,
1093 .namespace = device_namespace,
1094 .get_ownership = device_get_ownership,
1095};
1096
1097
1098static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
1099{
1100 struct kobj_type *ktype = get_ktype(kobj);
1101
1102 if (ktype == &device_ktype) {
1103 struct device *dev = kobj_to_dev(kobj);
1104 if (dev->bus)
1105 return 1;
1106 if (dev->class)
1107 return 1;
1108 }
1109 return 0;
1110}
1111
1112static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
1113{
1114 struct device *dev = kobj_to_dev(kobj);
1115
1116 if (dev->bus)
1117 return dev->bus->name;
1118 if (dev->class)
1119 return dev->class->name;
1120 return NULL;
1121}
1122
1123static int dev_uevent(struct kset *kset, struct kobject *kobj,
1124 struct kobj_uevent_env *env)
1125{
1126 struct device *dev = kobj_to_dev(kobj);
1127 int retval = 0;
1128
1129
1130 if (MAJOR(dev->devt)) {
1131 const char *tmp;
1132 const char *name;
1133 umode_t mode = 0;
1134 kuid_t uid = GLOBAL_ROOT_UID;
1135 kgid_t gid = GLOBAL_ROOT_GID;
1136
1137 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
1138 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
1139 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
1140 if (name) {
1141 add_uevent_var(env, "DEVNAME=%s", name);
1142 if (mode)
1143 add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
1144 if (!uid_eq(uid, GLOBAL_ROOT_UID))
1145 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
1146 if (!gid_eq(gid, GLOBAL_ROOT_GID))
1147 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
1148 kfree(tmp);
1149 }
1150 }
1151
1152 if (dev->type && dev->type->name)
1153 add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
1154
1155 if (dev->driver)
1156 add_uevent_var(env, "DRIVER=%s", dev->driver->name);
1157
1158
1159 of_device_uevent(dev, env);
1160
1161
1162 if (dev->bus && dev->bus->uevent) {
1163 retval = dev->bus->uevent(dev, env);
1164 if (retval)
1165 pr_debug("device: '%s': %s: bus uevent() returned %d\n",
1166 dev_name(dev), __func__, retval);
1167 }
1168
1169
1170 if (dev->class && dev->class->dev_uevent) {
1171 retval = dev->class->dev_uevent(dev, env);
1172 if (retval)
1173 pr_debug("device: '%s': %s: class uevent() "
1174 "returned %d\n", dev_name(dev),
1175 __func__, retval);
1176 }
1177
1178
1179 if (dev->type && dev->type->uevent) {
1180 retval = dev->type->uevent(dev, env);
1181 if (retval)
1182 pr_debug("device: '%s': %s: dev_type uevent() "
1183 "returned %d\n", dev_name(dev),
1184 __func__, retval);
1185 }
1186
1187 return retval;
1188}
1189
1190static const struct kset_uevent_ops device_uevent_ops = {
1191 .filter = dev_uevent_filter,
1192 .name = dev_uevent_name,
1193 .uevent = dev_uevent,
1194};
1195
1196static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
1197 char *buf)
1198{
1199 struct kobject *top_kobj;
1200 struct kset *kset;
1201 struct kobj_uevent_env *env = NULL;
1202 int i;
1203 size_t count = 0;
1204 int retval;
1205
1206
1207 top_kobj = &dev->kobj;
1208 while (!top_kobj->kset && top_kobj->parent)
1209 top_kobj = top_kobj->parent;
1210 if (!top_kobj->kset)
1211 goto out;
1212
1213 kset = top_kobj->kset;
1214 if (!kset->uevent_ops || !kset->uevent_ops->uevent)
1215 goto out;
1216
1217
1218 if (kset->uevent_ops && kset->uevent_ops->filter)
1219 if (!kset->uevent_ops->filter(kset, &dev->kobj))
1220 goto out;
1221
1222 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
1223 if (!env)
1224 return -ENOMEM;
1225
1226
1227 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
1228 if (retval)
1229 goto out;
1230
1231
1232 for (i = 0; i < env->envp_idx; i++)
1233 count += sprintf(&buf[count], "%s\n", env->envp[i]);
1234out:
1235 kfree(env);
1236 return count;
1237}
1238
1239static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
1240 const char *buf, size_t count)
1241{
1242 int rc;
1243
1244 rc = kobject_synth_uevent(&dev->kobj, buf, count);
1245
1246 if (rc) {
1247 dev_err(dev, "uevent: failed to send synthetic uevent\n");
1248 return rc;
1249 }
1250
1251 return count;
1252}
1253static DEVICE_ATTR_RW(uevent);
1254
1255static ssize_t online_show(struct device *dev, struct device_attribute *attr,
1256 char *buf)
1257{
1258 bool val;
1259
1260 device_lock(dev);
1261 val = !dev->offline;
1262 device_unlock(dev);
1263 return sprintf(buf, "%u\n", val);
1264}
1265
1266static ssize_t online_store(struct device *dev, struct device_attribute *attr,
1267 const char *buf, size_t count)
1268{
1269 bool val;
1270 int ret;
1271
1272 ret = strtobool(buf, &val);
1273 if (ret < 0)
1274 return ret;
1275
1276 ret = lock_device_hotplug_sysfs();
1277 if (ret)
1278 return ret;
1279
1280 ret = val ? device_online(dev) : device_offline(dev);
1281 unlock_device_hotplug();
1282 return ret < 0 ? ret : count;
1283}
1284static DEVICE_ATTR_RW(online);
1285
1286int device_add_groups(struct device *dev, const struct attribute_group **groups)
1287{
1288 return sysfs_create_groups(&dev->kobj, groups);
1289}
1290EXPORT_SYMBOL_GPL(device_add_groups);
1291
1292void device_remove_groups(struct device *dev,
1293 const struct attribute_group **groups)
1294{
1295 sysfs_remove_groups(&dev->kobj, groups);
1296}
1297EXPORT_SYMBOL_GPL(device_remove_groups);
1298
1299union device_attr_group_devres {
1300 const struct attribute_group *group;
1301 const struct attribute_group **groups;
1302};
1303
1304static int devm_attr_group_match(struct device *dev, void *res, void *data)
1305{
1306 return ((union device_attr_group_devres *)res)->group == data;
1307}
1308
1309static void devm_attr_group_remove(struct device *dev, void *res)
1310{
1311 union device_attr_group_devres *devres = res;
1312 const struct attribute_group *group = devres->group;
1313
1314 dev_dbg(dev, "%s: removing group %p\n", __func__, group);
1315 sysfs_remove_group(&dev->kobj, group);
1316}
1317
1318static void devm_attr_groups_remove(struct device *dev, void *res)
1319{
1320 union device_attr_group_devres *devres = res;
1321 const struct attribute_group **groups = devres->groups;
1322
1323 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
1324 sysfs_remove_groups(&dev->kobj, groups);
1325}
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
1338{
1339 union device_attr_group_devres *devres;
1340 int error;
1341
1342 devres = devres_alloc(devm_attr_group_remove,
1343 sizeof(*devres), GFP_KERNEL);
1344 if (!devres)
1345 return -ENOMEM;
1346
1347 error = sysfs_create_group(&dev->kobj, grp);
1348 if (error) {
1349 devres_free(devres);
1350 return error;
1351 }
1352
1353 devres->group = grp;
1354 devres_add(dev, devres);
1355 return 0;
1356}
1357EXPORT_SYMBOL_GPL(devm_device_add_group);
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367void devm_device_remove_group(struct device *dev,
1368 const struct attribute_group *grp)
1369{
1370 WARN_ON(devres_release(dev, devm_attr_group_remove,
1371 devm_attr_group_match,
1372 (void *)grp));
1373}
1374EXPORT_SYMBOL_GPL(devm_device_remove_group);
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389int devm_device_add_groups(struct device *dev,
1390 const struct attribute_group **groups)
1391{
1392 union device_attr_group_devres *devres;
1393 int error;
1394
1395 devres = devres_alloc(devm_attr_groups_remove,
1396 sizeof(*devres), GFP_KERNEL);
1397 if (!devres)
1398 return -ENOMEM;
1399
1400 error = sysfs_create_groups(&dev->kobj, groups);
1401 if (error) {
1402 devres_free(devres);
1403 return error;
1404 }
1405
1406 devres->groups = groups;
1407 devres_add(dev, devres);
1408 return 0;
1409}
1410EXPORT_SYMBOL_GPL(devm_device_add_groups);
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420void devm_device_remove_groups(struct device *dev,
1421 const struct attribute_group **groups)
1422{
1423 WARN_ON(devres_release(dev, devm_attr_groups_remove,
1424 devm_attr_group_match,
1425 (void *)groups));
1426}
1427EXPORT_SYMBOL_GPL(devm_device_remove_groups);
1428
1429static int device_add_attrs(struct device *dev)
1430{
1431 struct class *class = dev->class;
1432 const struct device_type *type = dev->type;
1433 int error;
1434
1435 if (class) {
1436 error = device_add_groups(dev, class->dev_groups);
1437 if (error)
1438 return error;
1439 }
1440
1441 if (type) {
1442 error = device_add_groups(dev, type->groups);
1443 if (error)
1444 goto err_remove_class_groups;
1445 }
1446
1447 error = device_add_groups(dev, dev->groups);
1448 if (error)
1449 goto err_remove_type_groups;
1450
1451 if (device_supports_offline(dev) && !dev->offline_disabled) {
1452 error = device_create_file(dev, &dev_attr_online);
1453 if (error)
1454 goto err_remove_dev_groups;
1455 }
1456
1457 return 0;
1458
1459 err_remove_dev_groups:
1460 device_remove_groups(dev, dev->groups);
1461 err_remove_type_groups:
1462 if (type)
1463 device_remove_groups(dev, type->groups);
1464 err_remove_class_groups:
1465 if (class)
1466 device_remove_groups(dev, class->dev_groups);
1467
1468 return error;
1469}
1470
1471static void device_remove_attrs(struct device *dev)
1472{
1473 struct class *class = dev->class;
1474 const struct device_type *type = dev->type;
1475
1476 device_remove_file(dev, &dev_attr_online);
1477 device_remove_groups(dev, dev->groups);
1478
1479 if (type)
1480 device_remove_groups(dev, type->groups);
1481
1482 if (class)
1483 device_remove_groups(dev, class->dev_groups);
1484}
1485
1486static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
1487 char *buf)
1488{
1489 return print_dev_t(buf, dev->devt);
1490}
1491static DEVICE_ATTR_RO(dev);
1492
1493
1494struct kset *devices_kset;
1495
1496
1497
1498
1499
1500
1501static void devices_kset_move_before(struct device *deva, struct device *devb)
1502{
1503 if (!devices_kset)
1504 return;
1505 pr_debug("devices_kset: Moving %s before %s\n",
1506 dev_name(deva), dev_name(devb));
1507 spin_lock(&devices_kset->list_lock);
1508 list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
1509 spin_unlock(&devices_kset->list_lock);
1510}
1511
1512
1513
1514
1515
1516
1517static void devices_kset_move_after(struct device *deva, struct device *devb)
1518{
1519 if (!devices_kset)
1520 return;
1521 pr_debug("devices_kset: Moving %s after %s\n",
1522 dev_name(deva), dev_name(devb));
1523 spin_lock(&devices_kset->list_lock);
1524 list_move(&deva->kobj.entry, &devb->kobj.entry);
1525 spin_unlock(&devices_kset->list_lock);
1526}
1527
1528
1529
1530
1531
1532void devices_kset_move_last(struct device *dev)
1533{
1534 if (!devices_kset)
1535 return;
1536 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
1537 spin_lock(&devices_kset->list_lock);
1538 list_move_tail(&dev->kobj.entry, &devices_kset->list);
1539 spin_unlock(&devices_kset->list_lock);
1540}
1541
1542
1543
1544
1545
1546
1547int device_create_file(struct device *dev,
1548 const struct device_attribute *attr)
1549{
1550 int error = 0;
1551
1552 if (dev) {
1553 WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
1554 "Attribute %s: write permission without 'store'\n",
1555 attr->attr.name);
1556 WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
1557 "Attribute %s: read permission without 'show'\n",
1558 attr->attr.name);
1559 error = sysfs_create_file(&dev->kobj, &attr->attr);
1560 }
1561
1562 return error;
1563}
1564EXPORT_SYMBOL_GPL(device_create_file);
1565
1566
1567
1568
1569
1570
1571void device_remove_file(struct device *dev,
1572 const struct device_attribute *attr)
1573{
1574 if (dev)
1575 sysfs_remove_file(&dev->kobj, &attr->attr);
1576}
1577EXPORT_SYMBOL_GPL(device_remove_file);
1578
1579
1580
1581
1582
1583
1584
1585
1586bool device_remove_file_self(struct device *dev,
1587 const struct device_attribute *attr)
1588{
1589 if (dev)
1590 return sysfs_remove_file_self(&dev->kobj, &attr->attr);
1591 else
1592 return false;
1593}
1594EXPORT_SYMBOL_GPL(device_remove_file_self);
1595
1596
1597
1598
1599
1600
1601int device_create_bin_file(struct device *dev,
1602 const struct bin_attribute *attr)
1603{
1604 int error = -EINVAL;
1605 if (dev)
1606 error = sysfs_create_bin_file(&dev->kobj, attr);
1607 return error;
1608}
1609EXPORT_SYMBOL_GPL(device_create_bin_file);
1610
1611
1612
1613
1614
1615
1616void device_remove_bin_file(struct device *dev,
1617 const struct bin_attribute *attr)
1618{
1619 if (dev)
1620 sysfs_remove_bin_file(&dev->kobj, attr);
1621}
1622EXPORT_SYMBOL_GPL(device_remove_bin_file);
1623
1624static void klist_children_get(struct klist_node *n)
1625{
1626 struct device_private *p = to_device_private_parent(n);
1627 struct device *dev = p->device;
1628
1629 get_device(dev);
1630}
1631
1632static void klist_children_put(struct klist_node *n)
1633{
1634 struct device_private *p = to_device_private_parent(n);
1635 struct device *dev = p->device;
1636
1637 put_device(dev);
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660void device_initialize(struct device *dev)
1661{
1662 dev->kobj.kset = devices_kset;
1663 kobject_init(&dev->kobj, &device_ktype);
1664 INIT_LIST_HEAD(&dev->dma_pools);
1665 mutex_init(&dev->mutex);
1666#ifdef CONFIG_PROVE_LOCKING
1667 mutex_init(&dev->lockdep_mutex);
1668#endif
1669 lockdep_set_novalidate_class(&dev->mutex);
1670 spin_lock_init(&dev->devres_lock);
1671 INIT_LIST_HEAD(&dev->devres_head);
1672 device_pm_init(dev);
1673 set_dev_node(dev, -1);
1674#ifdef CONFIG_GENERIC_MSI_IRQ
1675 INIT_LIST_HEAD(&dev->msi_list);
1676#endif
1677 INIT_LIST_HEAD(&dev->links.consumers);
1678 INIT_LIST_HEAD(&dev->links.suppliers);
1679 dev->links.status = DL_DEV_NO_DRIVER;
1680}
1681EXPORT_SYMBOL_GPL(device_initialize);
1682
1683struct kobject *virtual_device_parent(struct device *dev)
1684{
1685 static struct kobject *virtual_dir = NULL;
1686
1687 if (!virtual_dir)
1688 virtual_dir = kobject_create_and_add("virtual",
1689 &devices_kset->kobj);
1690
1691 return virtual_dir;
1692}
1693
1694struct class_dir {
1695 struct kobject kobj;
1696 struct class *class;
1697};
1698
1699#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
1700
1701static void class_dir_release(struct kobject *kobj)
1702{
1703 struct class_dir *dir = to_class_dir(kobj);
1704 kfree(dir);
1705}
1706
1707static const
1708struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
1709{
1710 struct class_dir *dir = to_class_dir(kobj);
1711 return dir->class->ns_type;
1712}
1713
1714static struct kobj_type class_dir_ktype = {
1715 .release = class_dir_release,
1716 .sysfs_ops = &kobj_sysfs_ops,
1717 .child_ns_type = class_dir_child_ns_type
1718};
1719
1720static struct kobject *
1721class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
1722{
1723 struct class_dir *dir;
1724 int retval;
1725
1726 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1727 if (!dir)
1728 return ERR_PTR(-ENOMEM);
1729
1730 dir->class = class;
1731 kobject_init(&dir->kobj, &class_dir_ktype);
1732
1733 dir->kobj.kset = &class->p->glue_dirs;
1734
1735 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
1736 if (retval < 0) {
1737 kobject_put(&dir->kobj);
1738 return ERR_PTR(retval);
1739 }
1740 return &dir->kobj;
1741}
1742
1743static DEFINE_MUTEX(gdp_mutex);
1744
1745static struct kobject *get_device_parent(struct device *dev,
1746 struct device *parent)
1747{
1748 if (dev->class) {
1749 struct kobject *kobj = NULL;
1750 struct kobject *parent_kobj;
1751 struct kobject *k;
1752
1753#ifdef CONFIG_BLOCK
1754
1755 if (sysfs_deprecated && dev->class == &block_class) {
1756 if (parent && parent->class == &block_class)
1757 return &parent->kobj;
1758 return &block_class.p->subsys.kobj;
1759 }
1760#endif
1761
1762
1763
1764
1765
1766
1767 if (parent == NULL)
1768 parent_kobj = virtual_device_parent(dev);
1769 else if (parent->class && !dev->class->ns_type)
1770 return &parent->kobj;
1771 else
1772 parent_kobj = &parent->kobj;
1773
1774 mutex_lock(&gdp_mutex);
1775
1776
1777 spin_lock(&dev->class->p->glue_dirs.list_lock);
1778 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
1779 if (k->parent == parent_kobj) {
1780 kobj = kobject_get(k);
1781 break;
1782 }
1783 spin_unlock(&dev->class->p->glue_dirs.list_lock);
1784 if (kobj) {
1785 mutex_unlock(&gdp_mutex);
1786 return kobj;
1787 }
1788
1789
1790 k = class_dir_create_and_add(dev->class, parent_kobj);
1791
1792 mutex_unlock(&gdp_mutex);
1793 return k;
1794 }
1795
1796
1797 if (!parent && dev->bus && dev->bus->dev_root)
1798 return &dev->bus->dev_root->kobj;
1799
1800 if (parent)
1801 return &parent->kobj;
1802 return NULL;
1803}
1804
1805static inline bool live_in_glue_dir(struct kobject *kobj,
1806 struct device *dev)
1807{
1808 if (!kobj || !dev->class ||
1809 kobj->kset != &dev->class->p->glue_dirs)
1810 return false;
1811 return true;
1812}
1813
1814static inline struct kobject *get_glue_dir(struct device *dev)
1815{
1816 return dev->kobj.parent;
1817}
1818
1819
1820
1821
1822
1823
1824static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
1825{
1826 unsigned int ref;
1827
1828
1829 if (!live_in_glue_dir(glue_dir, dev))
1830 return;
1831
1832 mutex_lock(&gdp_mutex);
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881 ref = kref_read(&glue_dir->kref);
1882 if (!kobject_has_children(glue_dir) && !--ref)
1883 kobject_del(glue_dir);
1884 kobject_put(glue_dir);
1885 mutex_unlock(&gdp_mutex);
1886}
1887
1888static int device_add_class_symlinks(struct device *dev)
1889{
1890 struct device_node *of_node = dev_of_node(dev);
1891 int error;
1892
1893 if (of_node) {
1894 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
1895 if (error)
1896 dev_warn(dev, "Error %d creating of_node link\n",error);
1897
1898 }
1899
1900 if (!dev->class)
1901 return 0;
1902
1903 error = sysfs_create_link(&dev->kobj,
1904 &dev->class->p->subsys.kobj,
1905 "subsystem");
1906 if (error)
1907 goto out_devnode;
1908
1909 if (dev->parent && device_is_not_partition(dev)) {
1910 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
1911 "device");
1912 if (error)
1913 goto out_subsys;
1914 }
1915
1916#ifdef CONFIG_BLOCK
1917
1918 if (sysfs_deprecated && dev->class == &block_class)
1919 return 0;
1920#endif
1921
1922
1923 error = sysfs_create_link(&dev->class->p->subsys.kobj,
1924 &dev->kobj, dev_name(dev));
1925 if (error)
1926 goto out_device;
1927
1928 return 0;
1929
1930out_device:
1931 sysfs_remove_link(&dev->kobj, "device");
1932
1933out_subsys:
1934 sysfs_remove_link(&dev->kobj, "subsystem");
1935out_devnode:
1936 sysfs_remove_link(&dev->kobj, "of_node");
1937 return error;
1938}
1939
1940static void device_remove_class_symlinks(struct device *dev)
1941{
1942 if (dev_of_node(dev))
1943 sysfs_remove_link(&dev->kobj, "of_node");
1944
1945 if (!dev->class)
1946 return;
1947
1948 if (dev->parent && device_is_not_partition(dev))
1949 sysfs_remove_link(&dev->kobj, "device");
1950 sysfs_remove_link(&dev->kobj, "subsystem");
1951#ifdef CONFIG_BLOCK
1952 if (sysfs_deprecated && dev->class == &block_class)
1953 return;
1954#endif
1955 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
1956}
1957
1958
1959
1960
1961
1962
1963int dev_set_name(struct device *dev, const char *fmt, ...)
1964{
1965 va_list vargs;
1966 int err;
1967
1968 va_start(vargs, fmt);
1969 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
1970 va_end(vargs);
1971 return err;
1972}
1973EXPORT_SYMBOL_GPL(dev_set_name);
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986static struct kobject *device_to_dev_kobj(struct device *dev)
1987{
1988 struct kobject *kobj;
1989
1990 if (dev->class)
1991 kobj = dev->class->dev_kobj;
1992 else
1993 kobj = sysfs_dev_char_kobj;
1994
1995 return kobj;
1996}
1997
1998static int device_create_sys_dev_entry(struct device *dev)
1999{
2000 struct kobject *kobj = device_to_dev_kobj(dev);
2001 int error = 0;
2002 char devt_str[15];
2003
2004 if (kobj) {
2005 format_dev_t(devt_str, dev->devt);
2006 error = sysfs_create_link(kobj, &dev->kobj, devt_str);
2007 }
2008
2009 return error;
2010}
2011
2012static void device_remove_sys_dev_entry(struct device *dev)
2013{
2014 struct kobject *kobj = device_to_dev_kobj(dev);
2015 char devt_str[15];
2016
2017 if (kobj) {
2018 format_dev_t(devt_str, dev->devt);
2019 sysfs_remove_link(kobj, devt_str);
2020 }
2021}
2022
2023static int device_private_init(struct device *dev)
2024{
2025 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
2026 if (!dev->p)
2027 return -ENOMEM;
2028 dev->p->device = dev;
2029 klist_init(&dev->p->klist_children, klist_children_get,
2030 klist_children_put);
2031 INIT_LIST_HEAD(&dev->p->deferred_probe);
2032 return 0;
2033}
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062int device_add(struct device *dev)
2063{
2064 struct device *parent;
2065 struct kobject *kobj;
2066 struct class_interface *class_intf;
2067 int error = -EINVAL;
2068 struct kobject *glue_dir = NULL;
2069
2070 dev = get_device(dev);
2071 if (!dev)
2072 goto done;
2073
2074 if (!dev->p) {
2075 error = device_private_init(dev);
2076 if (error)
2077 goto done;
2078 }
2079
2080
2081
2082
2083
2084
2085 if (dev->init_name) {
2086 dev_set_name(dev, "%s", dev->init_name);
2087 dev->init_name = NULL;
2088 }
2089
2090
2091 if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
2092 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
2093
2094 if (!dev_name(dev)) {
2095 error = -EINVAL;
2096 goto name_error;
2097 }
2098
2099 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
2100
2101 parent = get_device(dev->parent);
2102 kobj = get_device_parent(dev, parent);
2103 if (IS_ERR(kobj)) {
2104 error = PTR_ERR(kobj);
2105 goto parent_error;
2106 }
2107 if (kobj)
2108 dev->kobj.parent = kobj;
2109
2110
2111 if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
2112 set_dev_node(dev, dev_to_node(parent));
2113
2114
2115
2116 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
2117 if (error) {
2118 glue_dir = get_glue_dir(dev);
2119 goto Error;
2120 }
2121
2122
2123 error = device_platform_notify(dev, KOBJ_ADD);
2124 if (error)
2125 goto platform_error;
2126
2127 error = device_create_file(dev, &dev_attr_uevent);
2128 if (error)
2129 goto attrError;
2130
2131 error = device_add_class_symlinks(dev);
2132 if (error)
2133 goto SymlinkError;
2134 error = device_add_attrs(dev);
2135 if (error)
2136 goto AttrsError;
2137 error = bus_add_device(dev);
2138 if (error)
2139 goto BusError;
2140 error = dpm_sysfs_add(dev);
2141 if (error)
2142 goto DPMError;
2143 device_pm_add(dev);
2144
2145 if (MAJOR(dev->devt)) {
2146 error = device_create_file(dev, &dev_attr_dev);
2147 if (error)
2148 goto DevAttrError;
2149
2150 error = device_create_sys_dev_entry(dev);
2151 if (error)
2152 goto SysEntryError;
2153
2154 devtmpfs_create_node(dev);
2155 }
2156
2157
2158
2159
2160 if (dev->bus)
2161 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
2162 BUS_NOTIFY_ADD_DEVICE, dev);
2163
2164 kobject_uevent(&dev->kobj, KOBJ_ADD);
2165 bus_probe_device(dev);
2166 if (parent)
2167 klist_add_tail(&dev->p->knode_parent,
2168 &parent->p->klist_children);
2169
2170 if (dev->class) {
2171 mutex_lock(&dev->class->p->mutex);
2172
2173 klist_add_tail(&dev->p->knode_class,
2174 &dev->class->p->klist_devices);
2175
2176
2177 list_for_each_entry(class_intf,
2178 &dev->class->p->interfaces, node)
2179 if (class_intf->add_dev)
2180 class_intf->add_dev(dev, class_intf);
2181 mutex_unlock(&dev->class->p->mutex);
2182 }
2183done:
2184 put_device(dev);
2185 return error;
2186 SysEntryError:
2187 if (MAJOR(dev->devt))
2188 device_remove_file(dev, &dev_attr_dev);
2189 DevAttrError:
2190 device_pm_remove(dev);
2191 dpm_sysfs_remove(dev);
2192 DPMError:
2193 bus_remove_device(dev);
2194 BusError:
2195 device_remove_attrs(dev);
2196 AttrsError:
2197 device_remove_class_symlinks(dev);
2198 SymlinkError:
2199 device_remove_file(dev, &dev_attr_uevent);
2200 attrError:
2201 device_platform_notify(dev, KOBJ_REMOVE);
2202platform_error:
2203 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
2204 glue_dir = get_glue_dir(dev);
2205 kobject_del(&dev->kobj);
2206 Error:
2207 cleanup_glue_dir(dev, glue_dir);
2208parent_error:
2209 put_device(parent);
2210name_error:
2211 kfree(dev->p);
2212 dev->p = NULL;
2213 goto done;
2214}
2215EXPORT_SYMBOL_GPL(device_add);
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235int device_register(struct device *dev)
2236{
2237 device_initialize(dev);
2238 return device_add(dev);
2239}
2240EXPORT_SYMBOL_GPL(device_register);
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250struct device *get_device(struct device *dev)
2251{
2252 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
2253}
2254EXPORT_SYMBOL_GPL(get_device);
2255
2256
2257
2258
2259
2260void put_device(struct device *dev)
2261{
2262
2263 if (dev)
2264 kobject_put(&dev->kobj);
2265}
2266EXPORT_SYMBOL_GPL(put_device);
2267
2268bool kill_device(struct device *dev)
2269{
2270
2271
2272
2273
2274
2275
2276
2277 lockdep_assert_held(&dev->mutex);
2278
2279 if (dev->p->dead)
2280 return false;
2281 dev->p->dead = true;
2282 return true;
2283}
2284EXPORT_SYMBOL_GPL(kill_device);
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299void device_del(struct device *dev)
2300{
2301 struct device *parent = dev->parent;
2302 struct kobject *glue_dir = NULL;
2303 struct class_interface *class_intf;
2304
2305 device_lock(dev);
2306 kill_device(dev);
2307 device_unlock(dev);
2308
2309
2310
2311
2312 if (dev->bus)
2313 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
2314 BUS_NOTIFY_DEL_DEVICE, dev);
2315
2316 dpm_sysfs_remove(dev);
2317 if (parent)
2318 klist_del(&dev->p->knode_parent);
2319 if (MAJOR(dev->devt)) {
2320 devtmpfs_delete_node(dev);
2321 device_remove_sys_dev_entry(dev);
2322 device_remove_file(dev, &dev_attr_dev);
2323 }
2324 if (dev->class) {
2325 device_remove_class_symlinks(dev);
2326
2327 mutex_lock(&dev->class->p->mutex);
2328
2329 list_for_each_entry(class_intf,
2330 &dev->class->p->interfaces, node)
2331 if (class_intf->remove_dev)
2332 class_intf->remove_dev(dev, class_intf);
2333
2334 klist_del(&dev->p->knode_class);
2335 mutex_unlock(&dev->class->p->mutex);
2336 }
2337 device_remove_file(dev, &dev_attr_uevent);
2338 device_remove_attrs(dev);
2339 bus_remove_device(dev);
2340 device_pm_remove(dev);
2341 driver_deferred_probe_del(dev);
2342 device_platform_notify(dev, KOBJ_REMOVE);
2343 device_remove_properties(dev);
2344 device_links_purge(dev);
2345
2346 if (dev->bus)
2347 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
2348 BUS_NOTIFY_REMOVED_DEVICE, dev);
2349 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
2350 glue_dir = get_glue_dir(dev);
2351 kobject_del(&dev->kobj);
2352 cleanup_glue_dir(dev, glue_dir);
2353 put_device(parent);
2354}
2355EXPORT_SYMBOL_GPL(device_del);
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368void device_unregister(struct device *dev)
2369{
2370 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
2371 device_del(dev);
2372 put_device(dev);
2373}
2374EXPORT_SYMBOL_GPL(device_unregister);
2375
2376static struct device *prev_device(struct klist_iter *i)
2377{
2378 struct klist_node *n = klist_prev(i);
2379 struct device *dev = NULL;
2380 struct device_private *p;
2381
2382 if (n) {
2383 p = to_device_private_parent(n);
2384 dev = p->device;
2385 }
2386 return dev;
2387}
2388
2389static struct device *next_device(struct klist_iter *i)
2390{
2391 struct klist_node *n = klist_next(i);
2392 struct device *dev = NULL;
2393 struct device_private *p;
2394
2395 if (n) {
2396 p = to_device_private_parent(n);
2397 dev = p->device;
2398 }
2399 return dev;
2400}
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415const char *device_get_devnode(struct device *dev,
2416 umode_t *mode, kuid_t *uid, kgid_t *gid,
2417 const char **tmp)
2418{
2419 char *s;
2420
2421 *tmp = NULL;
2422
2423
2424 if (dev->type && dev->type->devnode)
2425 *tmp = dev->type->devnode(dev, mode, uid, gid);
2426 if (*tmp)
2427 return *tmp;
2428
2429
2430 if (dev->class && dev->class->devnode)
2431 *tmp = dev->class->devnode(dev, mode);
2432 if (*tmp)
2433 return *tmp;
2434
2435
2436 if (strchr(dev_name(dev), '!') == NULL)
2437 return dev_name(dev);
2438
2439
2440 s = kstrdup(dev_name(dev), GFP_KERNEL);
2441 if (!s)
2442 return NULL;
2443 strreplace(s, '!', '/');
2444 return *tmp = s;
2445}
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459int device_for_each_child(struct device *parent, void *data,
2460 int (*fn)(struct device *dev, void *data))
2461{
2462 struct klist_iter i;
2463 struct device *child;
2464 int error = 0;
2465
2466 if (!parent->p)
2467 return 0;
2468
2469 klist_iter_init(&parent->p->klist_children, &i);
2470 while (!error && (child = next_device(&i)))
2471 error = fn(child, data);
2472 klist_iter_exit(&i);
2473 return error;
2474}
2475EXPORT_SYMBOL_GPL(device_for_each_child);
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489int device_for_each_child_reverse(struct device *parent, void *data,
2490 int (*fn)(struct device *dev, void *data))
2491{
2492 struct klist_iter i;
2493 struct device *child;
2494 int error = 0;
2495
2496 if (!parent->p)
2497 return 0;
2498
2499 klist_iter_init(&parent->p->klist_children, &i);
2500 while ((child = prev_device(&i)) && !error)
2501 error = fn(child, data);
2502 klist_iter_exit(&i);
2503 return error;
2504}
2505EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524struct device *device_find_child(struct device *parent, void *data,
2525 int (*match)(struct device *dev, void *data))
2526{
2527 struct klist_iter i;
2528 struct device *child;
2529
2530 if (!parent)
2531 return NULL;
2532
2533 klist_iter_init(&parent->p->klist_children, &i);
2534 while ((child = next_device(&i)))
2535 if (match(child, data) && get_device(child))
2536 break;
2537 klist_iter_exit(&i);
2538 return child;
2539}
2540EXPORT_SYMBOL_GPL(device_find_child);
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552struct device *device_find_child_by_name(struct device *parent,
2553 const char *name)
2554{
2555 struct klist_iter i;
2556 struct device *child;
2557
2558 if (!parent)
2559 return NULL;
2560
2561 klist_iter_init(&parent->p->klist_children, &i);
2562 while ((child = next_device(&i)))
2563 if (!strcmp(dev_name(child), name) && get_device(child))
2564 break;
2565 klist_iter_exit(&i);
2566 return child;
2567}
2568EXPORT_SYMBOL_GPL(device_find_child_by_name);
2569
2570int __init devices_init(void)
2571{
2572 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
2573 if (!devices_kset)
2574 return -ENOMEM;
2575 dev_kobj = kobject_create_and_add("dev", NULL);
2576 if (!dev_kobj)
2577 goto dev_kobj_err;
2578 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
2579 if (!sysfs_dev_block_kobj)
2580 goto block_kobj_err;
2581 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
2582 if (!sysfs_dev_char_kobj)
2583 goto char_kobj_err;
2584
2585 return 0;
2586
2587 char_kobj_err:
2588 kobject_put(sysfs_dev_block_kobj);
2589 block_kobj_err:
2590 kobject_put(dev_kobj);
2591 dev_kobj_err:
2592 kset_unregister(devices_kset);
2593 return -ENOMEM;
2594}
2595
2596static int device_check_offline(struct device *dev, void *not_used)
2597{
2598 int ret;
2599
2600 ret = device_for_each_child(dev, NULL, device_check_offline);
2601 if (ret)
2602 return ret;
2603
2604 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
2605}
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618int device_offline(struct device *dev)
2619{
2620 int ret;
2621
2622 if (dev->offline_disabled)
2623 return -EPERM;
2624
2625 ret = device_for_each_child(dev, NULL, device_check_offline);
2626 if (ret)
2627 return ret;
2628
2629 device_lock(dev);
2630 if (device_supports_offline(dev)) {
2631 if (dev->offline) {
2632 ret = 1;
2633 } else {
2634 ret = dev->bus->offline(dev);
2635 if (!ret) {
2636 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2637 dev->offline = true;
2638 }
2639 }
2640 }
2641 device_unlock(dev);
2642
2643 return ret;
2644}
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656int device_online(struct device *dev)
2657{
2658 int ret = 0;
2659
2660 device_lock(dev);
2661 if (device_supports_offline(dev)) {
2662 if (dev->offline) {
2663 ret = dev->bus->online(dev);
2664 if (!ret) {
2665 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2666 dev->offline = false;
2667 }
2668 } else {
2669 ret = 1;
2670 }
2671 }
2672 device_unlock(dev);
2673
2674 return ret;
2675}
2676
2677struct root_device {
2678 struct device dev;
2679 struct module *owner;
2680};
2681
2682static inline struct root_device *to_root_device(struct device *d)
2683{
2684 return container_of(d, struct root_device, dev);
2685}
2686
2687static void root_device_release(struct device *dev)
2688{
2689 kfree(to_root_device(dev));
2690}
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714struct device *__root_device_register(const char *name, struct module *owner)
2715{
2716 struct root_device *root;
2717 int err = -ENOMEM;
2718
2719 root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
2720 if (!root)
2721 return ERR_PTR(err);
2722
2723 err = dev_set_name(&root->dev, "%s", name);
2724 if (err) {
2725 kfree(root);
2726 return ERR_PTR(err);
2727 }
2728
2729 root->dev.release = root_device_release;
2730
2731 err = device_register(&root->dev);
2732 if (err) {
2733 put_device(&root->dev);
2734 return ERR_PTR(err);
2735 }
2736
2737#ifdef CONFIG_MODULES
2738 if (owner) {
2739 struct module_kobject *mk = &owner->mkobj;
2740
2741 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
2742 if (err) {
2743 device_unregister(&root->dev);
2744 return ERR_PTR(err);
2745 }
2746 root->owner = owner;
2747 }
2748#endif
2749
2750 return &root->dev;
2751}
2752EXPORT_SYMBOL_GPL(__root_device_register);
2753
2754
2755
2756
2757
2758
2759
2760
2761void root_device_unregister(struct device *dev)
2762{
2763 struct root_device *root = to_root_device(dev);
2764
2765 if (root->owner)
2766 sysfs_remove_link(&root->dev.kobj, "module");
2767
2768 device_unregister(dev);
2769}
2770EXPORT_SYMBOL_GPL(root_device_unregister);
2771
2772
2773static void device_create_release(struct device *dev)
2774{
2775 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
2776 kfree(dev);
2777}
2778
2779static __printf(6, 0) struct device *
2780device_create_groups_vargs(struct class *class, struct device *parent,
2781 dev_t devt, void *drvdata,
2782 const struct attribute_group **groups,
2783 const char *fmt, va_list args)
2784{
2785 struct device *dev = NULL;
2786 int retval = -ENODEV;
2787
2788 if (class == NULL || IS_ERR(class))
2789 goto error;
2790
2791 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2792 if (!dev) {
2793 retval = -ENOMEM;
2794 goto error;
2795 }
2796
2797 device_initialize(dev);
2798 dev->devt = devt;
2799 dev->class = class;
2800 dev->parent = parent;
2801 dev->groups = groups;
2802 dev->release = device_create_release;
2803 dev_set_drvdata(dev, drvdata);
2804
2805 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
2806 if (retval)
2807 goto error;
2808
2809 retval = device_add(dev);
2810 if (retval)
2811 goto error;
2812
2813 return dev;
2814
2815error:
2816 put_device(dev);
2817 return ERR_PTR(retval);
2818}
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845struct device *device_create_vargs(struct class *class, struct device *parent,
2846 dev_t devt, void *drvdata, const char *fmt,
2847 va_list args)
2848{
2849 return device_create_groups_vargs(class, parent, devt, drvdata, NULL,
2850 fmt, args);
2851}
2852EXPORT_SYMBOL_GPL(device_create_vargs);
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878struct device *device_create(struct class *class, struct device *parent,
2879 dev_t devt, void *drvdata, const char *fmt, ...)
2880{
2881 va_list vargs;
2882 struct device *dev;
2883
2884 va_start(vargs, fmt);
2885 dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs);
2886 va_end(vargs);
2887 return dev;
2888}
2889EXPORT_SYMBOL_GPL(device_create);
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918struct device *device_create_with_groups(struct class *class,
2919 struct device *parent, dev_t devt,
2920 void *drvdata,
2921 const struct attribute_group **groups,
2922 const char *fmt, ...)
2923{
2924 va_list vargs;
2925 struct device *dev;
2926
2927 va_start(vargs, fmt);
2928 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
2929 fmt, vargs);
2930 va_end(vargs);
2931 return dev;
2932}
2933EXPORT_SYMBOL_GPL(device_create_with_groups);
2934
2935static int __match_devt(struct device *dev, const void *data)
2936{
2937 const dev_t *devt = data;
2938
2939 return dev->devt == *devt;
2940}
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950void device_destroy(struct class *class, dev_t devt)
2951{
2952 struct device *dev;
2953
2954 dev = class_find_device(class, NULL, &devt, __match_devt);
2955 if (dev) {
2956 put_device(dev);
2957 device_unregister(dev);
2958 }
2959}
2960EXPORT_SYMBOL_GPL(device_destroy);
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001int device_rename(struct device *dev, const char *new_name)
3002{
3003 struct kobject *kobj = &dev->kobj;
3004 char *old_device_name = NULL;
3005 int error;
3006
3007 dev = get_device(dev);
3008 if (!dev)
3009 return -EINVAL;
3010
3011 dev_dbg(dev, "renaming to %s\n", new_name);
3012
3013 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
3014 if (!old_device_name) {
3015 error = -ENOMEM;
3016 goto out;
3017 }
3018
3019 if (dev->class) {
3020 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
3021 kobj, old_device_name,
3022 new_name, kobject_namespace(kobj));
3023 if (error)
3024 goto out;
3025 }
3026
3027 error = kobject_rename(kobj, new_name);
3028 if (error)
3029 goto out;
3030
3031out:
3032 put_device(dev);
3033
3034 kfree(old_device_name);
3035
3036 return error;
3037}
3038EXPORT_SYMBOL_GPL(device_rename);
3039
3040static int device_move_class_links(struct device *dev,
3041 struct device *old_parent,
3042 struct device *new_parent)
3043{
3044 int error = 0;
3045
3046 if (old_parent)
3047 sysfs_remove_link(&dev->kobj, "device");
3048 if (new_parent)
3049 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
3050 "device");
3051 return error;
3052}
3053
3054
3055
3056
3057
3058
3059
3060int device_move(struct device *dev, struct device *new_parent,
3061 enum dpm_order dpm_order)
3062{
3063 int error;
3064 struct device *old_parent;
3065 struct kobject *new_parent_kobj;
3066
3067 dev = get_device(dev);
3068 if (!dev)
3069 return -EINVAL;
3070
3071 device_pm_lock();
3072 new_parent = get_device(new_parent);
3073 new_parent_kobj = get_device_parent(dev, new_parent);
3074 if (IS_ERR(new_parent_kobj)) {
3075 error = PTR_ERR(new_parent_kobj);
3076 put_device(new_parent);
3077 goto out;
3078 }
3079
3080 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
3081 __func__, new_parent ? dev_name(new_parent) : "<NULL>");
3082 error = kobject_move(&dev->kobj, new_parent_kobj);
3083 if (error) {
3084 cleanup_glue_dir(dev, new_parent_kobj);
3085 put_device(new_parent);
3086 goto out;
3087 }
3088 old_parent = dev->parent;
3089 dev->parent = new_parent;
3090 if (old_parent)
3091 klist_remove(&dev->p->knode_parent);
3092 if (new_parent) {
3093 klist_add_tail(&dev->p->knode_parent,
3094 &new_parent->p->klist_children);
3095 set_dev_node(dev, dev_to_node(new_parent));
3096 }
3097
3098 if (dev->class) {
3099 error = device_move_class_links(dev, old_parent, new_parent);
3100 if (error) {
3101
3102 device_move_class_links(dev, new_parent, old_parent);
3103 if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
3104 if (new_parent)
3105 klist_remove(&dev->p->knode_parent);
3106 dev->parent = old_parent;
3107 if (old_parent) {
3108 klist_add_tail(&dev->p->knode_parent,
3109 &old_parent->p->klist_children);
3110 set_dev_node(dev, dev_to_node(old_parent));
3111 }
3112 }
3113 cleanup_glue_dir(dev, new_parent_kobj);
3114 put_device(new_parent);
3115 goto out;
3116 }
3117 }
3118 switch (dpm_order) {
3119 case DPM_ORDER_NONE:
3120 break;
3121 case DPM_ORDER_DEV_AFTER_PARENT:
3122 device_pm_move_after(dev, new_parent);
3123 devices_kset_move_after(dev, new_parent);
3124 break;
3125 case DPM_ORDER_PARENT_BEFORE_DEV:
3126 device_pm_move_before(new_parent, dev);
3127 devices_kset_move_before(new_parent, dev);
3128 break;
3129 case DPM_ORDER_DEV_LAST:
3130 device_pm_move_last(dev);
3131 devices_kset_move_last(dev);
3132 break;
3133 }
3134
3135 put_device(old_parent);
3136out:
3137 device_pm_unlock();
3138 put_device(dev);
3139 return error;
3140}
3141EXPORT_SYMBOL_GPL(device_move);
3142
3143
3144
3145
3146void device_shutdown(void)
3147{
3148 struct device *dev, *parent;
3149
3150 wait_for_device_probe();
3151 device_block_probing();
3152
3153 spin_lock(&devices_kset->list_lock);
3154
3155
3156
3157
3158
3159 while (!list_empty(&devices_kset->list)) {
3160 dev = list_entry(devices_kset->list.prev, struct device,
3161 kobj.entry);
3162
3163
3164
3165
3166
3167
3168 parent = get_device(dev->parent);
3169 get_device(dev);
3170
3171
3172
3173
3174 list_del_init(&dev->kobj.entry);
3175 spin_unlock(&devices_kset->list_lock);
3176
3177
3178 if (parent)
3179 device_lock(parent);
3180 device_lock(dev);
3181
3182
3183 pm_runtime_get_noresume(dev);
3184 pm_runtime_barrier(dev);
3185
3186 if (dev->class && dev->class->shutdown_pre) {
3187 if (initcall_debug)
3188 dev_info(dev, "shutdown_pre\n");
3189 dev->class->shutdown_pre(dev);
3190 }
3191 if (dev->bus && dev->bus->shutdown) {
3192 if (initcall_debug)
3193 dev_info(dev, "shutdown\n");
3194 dev->bus->shutdown(dev);
3195 } else if (dev->driver && dev->driver->shutdown) {
3196 if (initcall_debug)
3197 dev_info(dev, "shutdown\n");
3198 dev->driver->shutdown(dev);
3199 }
3200
3201 device_unlock(dev);
3202 if (parent)
3203 device_unlock(parent);
3204
3205 put_device(dev);
3206 put_device(parent);
3207
3208 spin_lock(&devices_kset->list_lock);
3209 }
3210 spin_unlock(&devices_kset->list_lock);
3211}
3212
3213
3214
3215
3216
3217#ifdef CONFIG_PRINTK
3218static int
3219create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
3220{
3221 const char *subsys;
3222 size_t pos = 0;
3223
3224 if (dev->class)
3225 subsys = dev->class->name;
3226 else if (dev->bus)
3227 subsys = dev->bus->name;
3228 else
3229 return 0;
3230
3231 pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys);
3232 if (pos >= hdrlen)
3233 goto overflow;
3234
3235
3236
3237
3238
3239
3240
3241
3242 if (MAJOR(dev->devt)) {
3243 char c;
3244
3245 if (strcmp(subsys, "block") == 0)
3246 c = 'b';
3247 else
3248 c = 'c';
3249 pos++;
3250 pos += snprintf(hdr + pos, hdrlen - pos,
3251 "DEVICE=%c%u:%u",
3252 c, MAJOR(dev->devt), MINOR(dev->devt));
3253 } else if (strcmp(subsys, "net") == 0) {
3254 struct net_device *net = to_net_dev(dev);
3255
3256 pos++;
3257 pos += snprintf(hdr + pos, hdrlen - pos,
3258 "DEVICE=n%u", net->ifindex);
3259 } else {
3260 pos++;
3261 pos += snprintf(hdr + pos, hdrlen - pos,
3262 "DEVICE=+%s:%s", subsys, dev_name(dev));
3263 }
3264
3265 if (pos >= hdrlen)
3266 goto overflow;
3267
3268 return pos;
3269
3270overflow:
3271 dev_WARN(dev, "device/subsystem name too long");
3272 return 0;
3273}
3274
3275int dev_vprintk_emit(int level, const struct device *dev,
3276 const char *fmt, va_list args)
3277{
3278 char hdr[128];
3279 size_t hdrlen;
3280
3281 hdrlen = create_syslog_header(dev, hdr, sizeof(hdr));
3282
3283 return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args);
3284}
3285EXPORT_SYMBOL(dev_vprintk_emit);
3286
3287int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
3288{
3289 va_list args;
3290 int r;
3291
3292 va_start(args, fmt);
3293
3294 r = dev_vprintk_emit(level, dev, fmt, args);
3295
3296 va_end(args);
3297
3298 return r;
3299}
3300EXPORT_SYMBOL(dev_printk_emit);
3301
3302static void __dev_printk(const char *level, const struct device *dev,
3303 struct va_format *vaf)
3304{
3305 if (dev)
3306 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
3307 dev_driver_string(dev), dev_name(dev), vaf);
3308 else
3309 printk("%s(NULL device *): %pV", level, vaf);
3310}
3311
3312void dev_printk(const char *level, const struct device *dev,
3313 const char *fmt, ...)
3314{
3315 struct va_format vaf;
3316 va_list args;
3317
3318 va_start(args, fmt);
3319
3320 vaf.fmt = fmt;
3321 vaf.va = &args;
3322
3323 __dev_printk(level, dev, &vaf);
3324
3325 va_end(args);
3326}
3327EXPORT_SYMBOL(dev_printk);
3328
3329#define define_dev_printk_level(func, kern_level) \
3330void func(const struct device *dev, const char *fmt, ...) \
3331{ \
3332 struct va_format vaf; \
3333 va_list args; \
3334 \
3335 va_start(args, fmt); \
3336 \
3337 vaf.fmt = fmt; \
3338 vaf.va = &args; \
3339 \
3340 __dev_printk(kern_level, dev, &vaf); \
3341 \
3342 va_end(args); \
3343} \
3344EXPORT_SYMBOL(func);
3345
3346define_dev_printk_level(_dev_emerg, KERN_EMERG);
3347define_dev_printk_level(_dev_alert, KERN_ALERT);
3348define_dev_printk_level(_dev_crit, KERN_CRIT);
3349define_dev_printk_level(_dev_err, KERN_ERR);
3350define_dev_printk_level(_dev_warn, KERN_WARNING);
3351define_dev_printk_level(_dev_notice, KERN_NOTICE);
3352define_dev_printk_level(_dev_info, KERN_INFO);
3353
3354#endif
3355
3356static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
3357{
3358 return fwnode && !IS_ERR(fwnode->secondary);
3359}
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
3370{
3371 if (fwnode) {
3372 struct fwnode_handle *fn = dev->fwnode;
3373
3374 if (fwnode_is_primary(fn))
3375 fn = fn->secondary;
3376
3377 if (fn) {
3378 WARN_ON(fwnode->secondary);
3379 fwnode->secondary = fn;
3380 }
3381 dev->fwnode = fwnode;
3382 } else {
3383 dev->fwnode = fwnode_is_primary(dev->fwnode) ?
3384 dev->fwnode->secondary : NULL;
3385 }
3386}
3387EXPORT_SYMBOL_GPL(set_primary_fwnode);
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
3399{
3400 if (fwnode)
3401 fwnode->secondary = ERR_PTR(-ENODEV);
3402
3403 if (fwnode_is_primary(dev->fwnode))
3404 dev->fwnode->secondary = fwnode;
3405 else
3406 dev->fwnode = fwnode;
3407}
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
3418{
3419 of_node_put(dev->of_node);
3420 dev->of_node = of_node_get(dev2->of_node);
3421 dev->of_node_reused = true;
3422}
3423EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
3424
3425int device_match_of_node(struct device *dev, const void *np)
3426{
3427 return dev->of_node == np;
3428}
3429EXPORT_SYMBOL_GPL(device_match_of_node);
3430