1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/fwnode.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/kdev_t.h>
21#include <linux/notifier.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/genhd.h>
25#include <linux/kallsyms.h>
26#include <linux/mutex.h>
27#include <linux/pm_runtime.h>
28#include <linux/netdevice.h>
29#include <linux/sched/signal.h>
30#include <linux/sysfs.h>
31
32#include "base.h"
33#include "power/power.h"
34
35#ifdef CONFIG_SYSFS_DEPRECATED
36#ifdef CONFIG_SYSFS_DEPRECATED_V2
37long sysfs_deprecated = 1;
38#else
39long sysfs_deprecated = 0;
40#endif
41static int __init sysfs_deprecated_setup(char *arg)
42{
43 return kstrtol(arg, 10, &sysfs_deprecated);
44}
45early_param("sysfs.deprecated", sysfs_deprecated_setup);
46#endif
47
48
49
50#ifdef CONFIG_SRCU
51static DEFINE_MUTEX(device_links_lock);
52DEFINE_STATIC_SRCU(device_links_srcu);
53
54static inline void device_links_write_lock(void)
55{
56 mutex_lock(&device_links_lock);
57}
58
59static inline void device_links_write_unlock(void)
60{
61 mutex_unlock(&device_links_lock);
62}
63
64int device_links_read_lock(void)
65{
66 return srcu_read_lock(&device_links_srcu);
67}
68
69void device_links_read_unlock(int idx)
70{
71 srcu_read_unlock(&device_links_srcu, idx);
72}
73#else
74static DECLARE_RWSEM(device_links_lock);
75
76static inline void device_links_write_lock(void)
77{
78 down_write(&device_links_lock);
79}
80
81static inline void device_links_write_unlock(void)
82{
83 up_write(&device_links_lock);
84}
85
86int device_links_read_lock(void)
87{
88 down_read(&device_links_lock);
89 return 0;
90}
91
92void device_links_read_unlock(int not_used)
93{
94 up_read(&device_links_lock);
95}
96#endif
97
98
99
100
101
102
103
104
105
106static int device_is_dependent(struct device *dev, void *target)
107{
108 struct device_link *link;
109 int ret;
110
111 if (WARN_ON(dev == target))
112 return 1;
113
114 ret = device_for_each_child(dev, target, device_is_dependent);
115 if (ret)
116 return ret;
117
118 list_for_each_entry(link, &dev->links.consumers, s_node) {
119 if (WARN_ON(link->consumer == target))
120 return 1;
121
122 ret = device_is_dependent(link->consumer, target);
123 if (ret)
124 break;
125 }
126 return ret;
127}
128
129static int device_reorder_to_tail(struct device *dev, void *not_used)
130{
131 struct device_link *link;
132
133
134
135
136
137 if (device_is_registered(dev))
138 devices_kset_move_last(dev);
139
140 if (device_pm_initialized(dev))
141 device_pm_move_last(dev);
142
143 device_for_each_child(dev, NULL, device_reorder_to_tail);
144 list_for_each_entry(link, &dev->links.consumers, s_node)
145 device_reorder_to_tail(link->consumer, NULL);
146
147 return 0;
148}
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178struct device_link *device_link_add(struct device *consumer,
179 struct device *supplier, u32 flags)
180{
181 struct device_link *link;
182
183 if (!consumer || !supplier ||
184 ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
185 return NULL;
186
187 device_links_write_lock();
188 device_pm_lock();
189
190
191
192
193
194
195 if (!device_pm_initialized(supplier)
196 || device_is_dependent(consumer, supplier)) {
197 link = NULL;
198 goto out;
199 }
200
201 list_for_each_entry(link, &supplier->links.consumers, s_node)
202 if (link->consumer == consumer)
203 goto out;
204
205 link = kzalloc(sizeof(*link), GFP_KERNEL);
206 if (!link)
207 goto out;
208
209 if (flags & DL_FLAG_PM_RUNTIME) {
210 if (flags & DL_FLAG_RPM_ACTIVE) {
211 if (pm_runtime_get_sync(supplier) < 0) {
212 pm_runtime_put_noidle(supplier);
213 kfree(link);
214 link = NULL;
215 goto out;
216 }
217 link->rpm_active = true;
218 }
219 pm_runtime_new_link(consumer);
220 }
221 get_device(supplier);
222 link->supplier = supplier;
223 INIT_LIST_HEAD(&link->s_node);
224 get_device(consumer);
225 link->consumer = consumer;
226 INIT_LIST_HEAD(&link->c_node);
227 link->flags = flags;
228
229
230 if (flags & DL_FLAG_STATELESS) {
231 link->status = DL_STATE_NONE;
232 } else {
233 switch (supplier->links.status) {
234 case DL_DEV_DRIVER_BOUND:
235 switch (consumer->links.status) {
236 case DL_DEV_PROBING:
237
238
239
240
241
242 if (flags & DL_FLAG_PM_RUNTIME)
243 pm_runtime_get_sync(supplier);
244
245 link->status = DL_STATE_CONSUMER_PROBE;
246 break;
247 case DL_DEV_DRIVER_BOUND:
248 link->status = DL_STATE_ACTIVE;
249 break;
250 default:
251 link->status = DL_STATE_AVAILABLE;
252 break;
253 }
254 break;
255 case DL_DEV_UNBINDING:
256 link->status = DL_STATE_SUPPLIER_UNBIND;
257 break;
258 default:
259 link->status = DL_STATE_DORMANT;
260 break;
261 }
262 }
263
264
265
266
267
268
269
270
271 device_reorder_to_tail(consumer, NULL);
272
273 list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
274 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
275
276 dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
277
278 out:
279 device_pm_unlock();
280 device_links_write_unlock();
281 return link;
282}
283EXPORT_SYMBOL_GPL(device_link_add);
284
285static void device_link_free(struct device_link *link)
286{
287 put_device(link->consumer);
288 put_device(link->supplier);
289 kfree(link);
290}
291
292#ifdef CONFIG_SRCU
293static void __device_link_free_srcu(struct rcu_head *rhead)
294{
295 device_link_free(container_of(rhead, struct device_link, rcu_head));
296}
297
298static void __device_link_del(struct device_link *link)
299{
300 dev_info(link->consumer, "Dropping the link to %s\n",
301 dev_name(link->supplier));
302
303 if (link->flags & DL_FLAG_PM_RUNTIME)
304 pm_runtime_drop_link(link->consumer);
305
306 list_del_rcu(&link->s_node);
307 list_del_rcu(&link->c_node);
308 call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
309}
310#else
311static void __device_link_del(struct device_link *link)
312{
313 dev_info(link->consumer, "Dropping the link to %s\n",
314 dev_name(link->supplier));
315
316 list_del(&link->s_node);
317 list_del(&link->c_node);
318 device_link_free(link);
319}
320#endif
321
322
323
324
325
326
327
328
329void device_link_del(struct device_link *link)
330{
331 device_links_write_lock();
332 device_pm_lock();
333 __device_link_del(link);
334 device_pm_unlock();
335 device_links_write_unlock();
336}
337EXPORT_SYMBOL_GPL(device_link_del);
338
339static void device_links_missing_supplier(struct device *dev)
340{
341 struct device_link *link;
342
343 list_for_each_entry(link, &dev->links.suppliers, c_node)
344 if (link->status == DL_STATE_CONSUMER_PROBE)
345 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
346}
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364int device_links_check_suppliers(struct device *dev)
365{
366 struct device_link *link;
367 int ret = 0;
368
369 device_links_write_lock();
370
371 list_for_each_entry(link, &dev->links.suppliers, c_node) {
372 if (link->flags & DL_FLAG_STATELESS)
373 continue;
374
375 if (link->status != DL_STATE_AVAILABLE) {
376 device_links_missing_supplier(dev);
377 ret = -EPROBE_DEFER;
378 break;
379 }
380 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
381 }
382 dev->links.status = DL_DEV_PROBING;
383
384 device_links_write_unlock();
385 return ret;
386}
387
388
389
390
391
392
393
394
395
396
397
398
399void device_links_driver_bound(struct device *dev)
400{
401 struct device_link *link;
402
403 device_links_write_lock();
404
405 list_for_each_entry(link, &dev->links.consumers, s_node) {
406 if (link->flags & DL_FLAG_STATELESS)
407 continue;
408
409 WARN_ON(link->status != DL_STATE_DORMANT);
410 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
411 }
412
413 list_for_each_entry(link, &dev->links.suppliers, c_node) {
414 if (link->flags & DL_FLAG_STATELESS)
415 continue;
416
417 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
418 WRITE_ONCE(link->status, DL_STATE_ACTIVE);
419 }
420
421 dev->links.status = DL_DEV_DRIVER_BOUND;
422
423 device_links_write_unlock();
424}
425
426
427
428
429
430
431
432
433
434
435
436
437
438static void __device_links_no_driver(struct device *dev)
439{
440 struct device_link *link, *ln;
441
442 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
443 if (link->flags & DL_FLAG_STATELESS)
444 continue;
445
446 if (link->flags & DL_FLAG_AUTOREMOVE)
447 __device_link_del(link);
448 else if (link->status != DL_STATE_SUPPLIER_UNBIND)
449 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
450 }
451
452 dev->links.status = DL_DEV_NO_DRIVER;
453}
454
455void device_links_no_driver(struct device *dev)
456{
457 device_links_write_lock();
458 __device_links_no_driver(dev);
459 device_links_write_unlock();
460}
461
462
463
464
465
466
467
468
469
470
471
472void device_links_driver_cleanup(struct device *dev)
473{
474 struct device_link *link;
475
476 device_links_write_lock();
477
478 list_for_each_entry(link, &dev->links.consumers, s_node) {
479 if (link->flags & DL_FLAG_STATELESS)
480 continue;
481
482 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
483 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
484 WRITE_ONCE(link->status, DL_STATE_DORMANT);
485 }
486
487 __device_links_no_driver(dev);
488
489 device_links_write_unlock();
490}
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506bool device_links_busy(struct device *dev)
507{
508 struct device_link *link;
509 bool ret = false;
510
511 device_links_write_lock();
512
513 list_for_each_entry(link, &dev->links.consumers, s_node) {
514 if (link->flags & DL_FLAG_STATELESS)
515 continue;
516
517 if (link->status == DL_STATE_CONSUMER_PROBE
518 || link->status == DL_STATE_ACTIVE) {
519 ret = true;
520 break;
521 }
522 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
523 }
524
525 dev->links.status = DL_DEV_UNBINDING;
526
527 device_links_write_unlock();
528 return ret;
529}
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546void device_links_unbind_consumers(struct device *dev)
547{
548 struct device_link *link;
549
550 start:
551 device_links_write_lock();
552
553 list_for_each_entry(link, &dev->links.consumers, s_node) {
554 enum device_link_state status;
555
556 if (link->flags & DL_FLAG_STATELESS)
557 continue;
558
559 status = link->status;
560 if (status == DL_STATE_CONSUMER_PROBE) {
561 device_links_write_unlock();
562
563 wait_for_device_probe();
564 goto start;
565 }
566 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
567 if (status == DL_STATE_ACTIVE) {
568 struct device *consumer = link->consumer;
569
570 get_device(consumer);
571
572 device_links_write_unlock();
573
574 device_release_driver_internal(consumer, NULL,
575 consumer->parent);
576 put_device(consumer);
577 goto start;
578 }
579 }
580
581 device_links_write_unlock();
582}
583
584
585
586
587
588static void device_links_purge(struct device *dev)
589{
590 struct device_link *link, *ln;
591
592
593
594
595
596 device_links_write_lock();
597
598 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
599 WARN_ON(link->status == DL_STATE_ACTIVE);
600 __device_link_del(link);
601 }
602
603 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
604 WARN_ON(link->status != DL_STATE_DORMANT &&
605 link->status != DL_STATE_NONE);
606 __device_link_del(link);
607 }
608
609 device_links_write_unlock();
610}
611
612
613
614int (*platform_notify)(struct device *dev) = NULL;
615int (*platform_notify_remove)(struct device *dev) = NULL;
616static struct kobject *dev_kobj;
617struct kobject *sysfs_dev_char_kobj;
618struct kobject *sysfs_dev_block_kobj;
619
620static DEFINE_MUTEX(device_hotplug_lock);
621
622void lock_device_hotplug(void)
623{
624 mutex_lock(&device_hotplug_lock);
625}
626
627void unlock_device_hotplug(void)
628{
629 mutex_unlock(&device_hotplug_lock);
630}
631
632int lock_device_hotplug_sysfs(void)
633{
634 if (mutex_trylock(&device_hotplug_lock))
635 return 0;
636
637
638 msleep(5);
639 return restart_syscall();
640}
641
642#ifdef CONFIG_BLOCK
643static inline int device_is_not_partition(struct device *dev)
644{
645 return !(dev->type == &part_type);
646}
647#else
648static inline int device_is_not_partition(struct device *dev)
649{
650 return 1;
651}
652#endif
653
654
655
656
657
658
659
660
661
662
663const char *dev_driver_string(const struct device *dev)
664{
665 struct device_driver *drv;
666
667
668
669
670
671 drv = ACCESS_ONCE(dev->driver);
672 return drv ? drv->name :
673 (dev->bus ? dev->bus->name :
674 (dev->class ? dev->class->name : ""));
675}
676EXPORT_SYMBOL(dev_driver_string);
677
678#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
679
680static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
681 char *buf)
682{
683 struct device_attribute *dev_attr = to_dev_attr(attr);
684 struct device *dev = kobj_to_dev(kobj);
685 ssize_t ret = -EIO;
686
687 if (dev_attr->show)
688 ret = dev_attr->show(dev, dev_attr, buf);
689 if (ret >= (ssize_t)PAGE_SIZE) {
690 print_symbol("dev_attr_show: %s returned bad count\n",
691 (unsigned long)dev_attr->show);
692 }
693 return ret;
694}
695
696static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
697 const char *buf, size_t count)
698{
699 struct device_attribute *dev_attr = to_dev_attr(attr);
700 struct device *dev = kobj_to_dev(kobj);
701 ssize_t ret = -EIO;
702
703 if (dev_attr->store)
704 ret = dev_attr->store(dev, dev_attr, buf, count);
705 return ret;
706}
707
708static const struct sysfs_ops dev_sysfs_ops = {
709 .show = dev_attr_show,
710 .store = dev_attr_store,
711};
712
713#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
714
715ssize_t device_store_ulong(struct device *dev,
716 struct device_attribute *attr,
717 const char *buf, size_t size)
718{
719 struct dev_ext_attribute *ea = to_ext_attr(attr);
720 char *end;
721 unsigned long new = simple_strtoul(buf, &end, 0);
722 if (end == buf)
723 return -EINVAL;
724 *(unsigned long *)(ea->var) = new;
725
726 return size;
727}
728EXPORT_SYMBOL_GPL(device_store_ulong);
729
730ssize_t device_show_ulong(struct device *dev,
731 struct device_attribute *attr,
732 char *buf)
733{
734 struct dev_ext_attribute *ea = to_ext_attr(attr);
735 return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
736}
737EXPORT_SYMBOL_GPL(device_show_ulong);
738
739ssize_t device_store_int(struct device *dev,
740 struct device_attribute *attr,
741 const char *buf, size_t size)
742{
743 struct dev_ext_attribute *ea = to_ext_attr(attr);
744 char *end;
745 long new = simple_strtol(buf, &end, 0);
746 if (end == buf || new > INT_MAX || new < INT_MIN)
747 return -EINVAL;
748 *(int *)(ea->var) = new;
749
750 return size;
751}
752EXPORT_SYMBOL_GPL(device_store_int);
753
754ssize_t device_show_int(struct device *dev,
755 struct device_attribute *attr,
756 char *buf)
757{
758 struct dev_ext_attribute *ea = to_ext_attr(attr);
759
760 return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
761}
762EXPORT_SYMBOL_GPL(device_show_int);
763
764ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
765 const char *buf, size_t size)
766{
767 struct dev_ext_attribute *ea = to_ext_attr(attr);
768
769 if (strtobool(buf, ea->var) < 0)
770 return -EINVAL;
771
772 return size;
773}
774EXPORT_SYMBOL_GPL(device_store_bool);
775
776ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
777 char *buf)
778{
779 struct dev_ext_attribute *ea = to_ext_attr(attr);
780
781 return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
782}
783EXPORT_SYMBOL_GPL(device_show_bool);
784
785
786
787
788
789
790
791
792
793static void device_release(struct kobject *kobj)
794{
795 struct device *dev = kobj_to_dev(kobj);
796 struct device_private *p = dev->p;
797
798
799
800
801
802
803
804
805
806
807 devres_release_all(dev);
808
809 if (dev->release)
810 dev->release(dev);
811 else if (dev->type && dev->type->release)
812 dev->type->release(dev);
813 else if (dev->class && dev->class->dev_release)
814 dev->class->dev_release(dev);
815 else
816 WARN(1, KERN_ERR "Device '%s' does not have a release() "
817 "function, it is broken and must be fixed.\n",
818 dev_name(dev));
819 kfree(p);
820}
821
822static const void *device_namespace(struct kobject *kobj)
823{
824 struct device *dev = kobj_to_dev(kobj);
825 const void *ns = NULL;
826
827 if (dev->class && dev->class->ns_type)
828 ns = dev->class->namespace(dev);
829
830 return ns;
831}
832
833static struct kobj_type device_ktype = {
834 .release = device_release,
835 .sysfs_ops = &dev_sysfs_ops,
836 .namespace = device_namespace,
837};
838
839
840static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
841{
842 struct kobj_type *ktype = get_ktype(kobj);
843
844 if (ktype == &device_ktype) {
845 struct device *dev = kobj_to_dev(kobj);
846 if (dev->bus)
847 return 1;
848 if (dev->class)
849 return 1;
850 }
851 return 0;
852}
853
854static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
855{
856 struct device *dev = kobj_to_dev(kobj);
857
858 if (dev->bus)
859 return dev->bus->name;
860 if (dev->class)
861 return dev->class->name;
862 return NULL;
863}
864
865static int dev_uevent(struct kset *kset, struct kobject *kobj,
866 struct kobj_uevent_env *env)
867{
868 struct device *dev = kobj_to_dev(kobj);
869 int retval = 0;
870
871
872 if (MAJOR(dev->devt)) {
873 const char *tmp;
874 const char *name;
875 umode_t mode = 0;
876 kuid_t uid = GLOBAL_ROOT_UID;
877 kgid_t gid = GLOBAL_ROOT_GID;
878
879 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
880 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
881 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
882 if (name) {
883 add_uevent_var(env, "DEVNAME=%s", name);
884 if (mode)
885 add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
886 if (!uid_eq(uid, GLOBAL_ROOT_UID))
887 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
888 if (!gid_eq(gid, GLOBAL_ROOT_GID))
889 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
890 kfree(tmp);
891 }
892 }
893
894 if (dev->type && dev->type->name)
895 add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
896
897 if (dev->driver)
898 add_uevent_var(env, "DRIVER=%s", dev->driver->name);
899
900
901 of_device_uevent(dev, env);
902
903
904 if (dev->bus && dev->bus->uevent) {
905 retval = dev->bus->uevent(dev, env);
906 if (retval)
907 pr_debug("device: '%s': %s: bus uevent() returned %d\n",
908 dev_name(dev), __func__, retval);
909 }
910
911
912 if (dev->class && dev->class->dev_uevent) {
913 retval = dev->class->dev_uevent(dev, env);
914 if (retval)
915 pr_debug("device: '%s': %s: class uevent() "
916 "returned %d\n", dev_name(dev),
917 __func__, retval);
918 }
919
920
921 if (dev->type && dev->type->uevent) {
922 retval = dev->type->uevent(dev, env);
923 if (retval)
924 pr_debug("device: '%s': %s: dev_type uevent() "
925 "returned %d\n", dev_name(dev),
926 __func__, retval);
927 }
928
929 return retval;
930}
931
932static const struct kset_uevent_ops device_uevent_ops = {
933 .filter = dev_uevent_filter,
934 .name = dev_uevent_name,
935 .uevent = dev_uevent,
936};
937
938static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
939 char *buf)
940{
941 struct kobject *top_kobj;
942 struct kset *kset;
943 struct kobj_uevent_env *env = NULL;
944 int i;
945 size_t count = 0;
946 int retval;
947
948
949 top_kobj = &dev->kobj;
950 while (!top_kobj->kset && top_kobj->parent)
951 top_kobj = top_kobj->parent;
952 if (!top_kobj->kset)
953 goto out;
954
955 kset = top_kobj->kset;
956 if (!kset->uevent_ops || !kset->uevent_ops->uevent)
957 goto out;
958
959
960 if (kset->uevent_ops && kset->uevent_ops->filter)
961 if (!kset->uevent_ops->filter(kset, &dev->kobj))
962 goto out;
963
964 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
965 if (!env)
966 return -ENOMEM;
967
968
969 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
970 if (retval)
971 goto out;
972
973
974 for (i = 0; i < env->envp_idx; i++)
975 count += sprintf(&buf[count], "%s\n", env->envp[i]);
976out:
977 kfree(env);
978 return count;
979}
980
981static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
982 const char *buf, size_t count)
983{
984 if (kobject_synth_uevent(&dev->kobj, buf, count))
985 dev_err(dev, "uevent: failed to send synthetic uevent\n");
986
987 return count;
988}
989static DEVICE_ATTR_RW(uevent);
990
991static ssize_t online_show(struct device *dev, struct device_attribute *attr,
992 char *buf)
993{
994 bool val;
995
996 device_lock(dev);
997 val = !dev->offline;
998 device_unlock(dev);
999 return sprintf(buf, "%u\n", val);
1000}
1001
1002static ssize_t online_store(struct device *dev, struct device_attribute *attr,
1003 const char *buf, size_t count)
1004{
1005 bool val;
1006 int ret;
1007
1008 ret = strtobool(buf, &val);
1009 if (ret < 0)
1010 return ret;
1011
1012 ret = lock_device_hotplug_sysfs();
1013 if (ret)
1014 return ret;
1015
1016 ret = val ? device_online(dev) : device_offline(dev);
1017 unlock_device_hotplug();
1018 return ret < 0 ? ret : count;
1019}
1020static DEVICE_ATTR_RW(online);
1021
1022int device_add_groups(struct device *dev, const struct attribute_group **groups)
1023{
1024 return sysfs_create_groups(&dev->kobj, groups);
1025}
1026
1027void device_remove_groups(struct device *dev,
1028 const struct attribute_group **groups)
1029{
1030 sysfs_remove_groups(&dev->kobj, groups);
1031}
1032
1033static int device_add_attrs(struct device *dev)
1034{
1035 struct class *class = dev->class;
1036 const struct device_type *type = dev->type;
1037 int error;
1038
1039 if (class) {
1040 error = device_add_groups(dev, class->dev_groups);
1041 if (error)
1042 return error;
1043 }
1044
1045 if (type) {
1046 error = device_add_groups(dev, type->groups);
1047 if (error)
1048 goto err_remove_class_groups;
1049 }
1050
1051 error = device_add_groups(dev, dev->groups);
1052 if (error)
1053 goto err_remove_type_groups;
1054
1055 if (device_supports_offline(dev) && !dev->offline_disabled) {
1056 error = device_create_file(dev, &dev_attr_online);
1057 if (error)
1058 goto err_remove_dev_groups;
1059 }
1060
1061 return 0;
1062
1063 err_remove_dev_groups:
1064 device_remove_groups(dev, dev->groups);
1065 err_remove_type_groups:
1066 if (type)
1067 device_remove_groups(dev, type->groups);
1068 err_remove_class_groups:
1069 if (class)
1070 device_remove_groups(dev, class->dev_groups);
1071
1072 return error;
1073}
1074
1075static void device_remove_attrs(struct device *dev)
1076{
1077 struct class *class = dev->class;
1078 const struct device_type *type = dev->type;
1079
1080 device_remove_file(dev, &dev_attr_online);
1081 device_remove_groups(dev, dev->groups);
1082
1083 if (type)
1084 device_remove_groups(dev, type->groups);
1085
1086 if (class)
1087 device_remove_groups(dev, class->dev_groups);
1088}
1089
1090static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
1091 char *buf)
1092{
1093 return print_dev_t(buf, dev->devt);
1094}
1095static DEVICE_ATTR_RO(dev);
1096
1097
1098struct kset *devices_kset;
1099
1100
1101
1102
1103
1104
1105static void devices_kset_move_before(struct device *deva, struct device *devb)
1106{
1107 if (!devices_kset)
1108 return;
1109 pr_debug("devices_kset: Moving %s before %s\n",
1110 dev_name(deva), dev_name(devb));
1111 spin_lock(&devices_kset->list_lock);
1112 list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
1113 spin_unlock(&devices_kset->list_lock);
1114}
1115
1116
1117
1118
1119
1120
1121static void devices_kset_move_after(struct device *deva, struct device *devb)
1122{
1123 if (!devices_kset)
1124 return;
1125 pr_debug("devices_kset: Moving %s after %s\n",
1126 dev_name(deva), dev_name(devb));
1127 spin_lock(&devices_kset->list_lock);
1128 list_move(&deva->kobj.entry, &devb->kobj.entry);
1129 spin_unlock(&devices_kset->list_lock);
1130}
1131
1132
1133
1134
1135
1136void devices_kset_move_last(struct device *dev)
1137{
1138 if (!devices_kset)
1139 return;
1140 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
1141 spin_lock(&devices_kset->list_lock);
1142 list_move_tail(&dev->kobj.entry, &devices_kset->list);
1143 spin_unlock(&devices_kset->list_lock);
1144}
1145
1146
1147
1148
1149
1150
1151int device_create_file(struct device *dev,
1152 const struct device_attribute *attr)
1153{
1154 int error = 0;
1155
1156 if (dev) {
1157 WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
1158 "Attribute %s: write permission without 'store'\n",
1159 attr->attr.name);
1160 WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
1161 "Attribute %s: read permission without 'show'\n",
1162 attr->attr.name);
1163 error = sysfs_create_file(&dev->kobj, &attr->attr);
1164 }
1165
1166 return error;
1167}
1168EXPORT_SYMBOL_GPL(device_create_file);
1169
1170
1171
1172
1173
1174
1175void device_remove_file(struct device *dev,
1176 const struct device_attribute *attr)
1177{
1178 if (dev)
1179 sysfs_remove_file(&dev->kobj, &attr->attr);
1180}
1181EXPORT_SYMBOL_GPL(device_remove_file);
1182
1183
1184
1185
1186
1187
1188
1189
1190bool device_remove_file_self(struct device *dev,
1191 const struct device_attribute *attr)
1192{
1193 if (dev)
1194 return sysfs_remove_file_self(&dev->kobj, &attr->attr);
1195 else
1196 return false;
1197}
1198EXPORT_SYMBOL_GPL(device_remove_file_self);
1199
1200
1201
1202
1203
1204
1205int device_create_bin_file(struct device *dev,
1206 const struct bin_attribute *attr)
1207{
1208 int error = -EINVAL;
1209 if (dev)
1210 error = sysfs_create_bin_file(&dev->kobj, attr);
1211 return error;
1212}
1213EXPORT_SYMBOL_GPL(device_create_bin_file);
1214
1215
1216
1217
1218
1219
1220void device_remove_bin_file(struct device *dev,
1221 const struct bin_attribute *attr)
1222{
1223 if (dev)
1224 sysfs_remove_bin_file(&dev->kobj, attr);
1225}
1226EXPORT_SYMBOL_GPL(device_remove_bin_file);
1227
1228static void klist_children_get(struct klist_node *n)
1229{
1230 struct device_private *p = to_device_private_parent(n);
1231 struct device *dev = p->device;
1232
1233 get_device(dev);
1234}
1235
1236static void klist_children_put(struct klist_node *n)
1237{
1238 struct device_private *p = to_device_private_parent(n);
1239 struct device *dev = p->device;
1240
1241 put_device(dev);
1242}
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264void device_initialize(struct device *dev)
1265{
1266 dev->kobj.kset = devices_kset;
1267 kobject_init(&dev->kobj, &device_ktype);
1268 INIT_LIST_HEAD(&dev->dma_pools);
1269 mutex_init(&dev->mutex);
1270 lockdep_set_novalidate_class(&dev->mutex);
1271 spin_lock_init(&dev->devres_lock);
1272 INIT_LIST_HEAD(&dev->devres_head);
1273 device_pm_init(dev);
1274 set_dev_node(dev, -1);
1275#ifdef CONFIG_GENERIC_MSI_IRQ
1276 INIT_LIST_HEAD(&dev->msi_list);
1277#endif
1278 INIT_LIST_HEAD(&dev->links.consumers);
1279 INIT_LIST_HEAD(&dev->links.suppliers);
1280 dev->links.status = DL_DEV_NO_DRIVER;
1281}
1282EXPORT_SYMBOL_GPL(device_initialize);
1283
1284struct kobject *virtual_device_parent(struct device *dev)
1285{
1286 static struct kobject *virtual_dir = NULL;
1287
1288 if (!virtual_dir)
1289 virtual_dir = kobject_create_and_add("virtual",
1290 &devices_kset->kobj);
1291
1292 return virtual_dir;
1293}
1294
1295struct class_dir {
1296 struct kobject kobj;
1297 struct class *class;
1298};
1299
1300#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
1301
1302static void class_dir_release(struct kobject *kobj)
1303{
1304 struct class_dir *dir = to_class_dir(kobj);
1305 kfree(dir);
1306}
1307
1308static const
1309struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
1310{
1311 struct class_dir *dir = to_class_dir(kobj);
1312 return dir->class->ns_type;
1313}
1314
1315static struct kobj_type class_dir_ktype = {
1316 .release = class_dir_release,
1317 .sysfs_ops = &kobj_sysfs_ops,
1318 .child_ns_type = class_dir_child_ns_type
1319};
1320
1321static struct kobject *
1322class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
1323{
1324 struct class_dir *dir;
1325 int retval;
1326
1327 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1328 if (!dir)
1329 return NULL;
1330
1331 dir->class = class;
1332 kobject_init(&dir->kobj, &class_dir_ktype);
1333
1334 dir->kobj.kset = &class->p->glue_dirs;
1335
1336 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
1337 if (retval < 0) {
1338 kobject_put(&dir->kobj);
1339 return NULL;
1340 }
1341 return &dir->kobj;
1342}
1343
1344static DEFINE_MUTEX(gdp_mutex);
1345
1346static struct kobject *get_device_parent(struct device *dev,
1347 struct device *parent)
1348{
1349 if (dev->class) {
1350 struct kobject *kobj = NULL;
1351 struct kobject *parent_kobj;
1352 struct kobject *k;
1353
1354#ifdef CONFIG_BLOCK
1355
1356 if (sysfs_deprecated && dev->class == &block_class) {
1357 if (parent && parent->class == &block_class)
1358 return &parent->kobj;
1359 return &block_class.p->subsys.kobj;
1360 }
1361#endif
1362
1363
1364
1365
1366
1367
1368 if (parent == NULL)
1369 parent_kobj = virtual_device_parent(dev);
1370 else if (parent->class && !dev->class->ns_type)
1371 return &parent->kobj;
1372 else
1373 parent_kobj = &parent->kobj;
1374
1375 mutex_lock(&gdp_mutex);
1376
1377
1378 spin_lock(&dev->class->p->glue_dirs.list_lock);
1379 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
1380 if (k->parent == parent_kobj) {
1381 kobj = kobject_get(k);
1382 break;
1383 }
1384 spin_unlock(&dev->class->p->glue_dirs.list_lock);
1385 if (kobj) {
1386 mutex_unlock(&gdp_mutex);
1387 return kobj;
1388 }
1389
1390
1391 k = class_dir_create_and_add(dev->class, parent_kobj);
1392
1393 mutex_unlock(&gdp_mutex);
1394 return k;
1395 }
1396
1397
1398 if (!parent && dev->bus && dev->bus->dev_root)
1399 return &dev->bus->dev_root->kobj;
1400
1401 if (parent)
1402 return &parent->kobj;
1403 return NULL;
1404}
1405
1406static inline bool live_in_glue_dir(struct kobject *kobj,
1407 struct device *dev)
1408{
1409 if (!kobj || !dev->class ||
1410 kobj->kset != &dev->class->p->glue_dirs)
1411 return false;
1412 return true;
1413}
1414
1415static inline struct kobject *get_glue_dir(struct device *dev)
1416{
1417 return dev->kobj.parent;
1418}
1419
1420
1421
1422
1423
1424
1425static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
1426{
1427
1428 if (!live_in_glue_dir(glue_dir, dev))
1429 return;
1430
1431 mutex_lock(&gdp_mutex);
1432 kobject_put(glue_dir);
1433 mutex_unlock(&gdp_mutex);
1434}
1435
1436static int device_add_class_symlinks(struct device *dev)
1437{
1438 struct device_node *of_node = dev_of_node(dev);
1439 int error;
1440
1441 if (of_node) {
1442 error = sysfs_create_link(&dev->kobj, &of_node->kobj,"of_node");
1443 if (error)
1444 dev_warn(dev, "Error %d creating of_node link\n",error);
1445
1446 }
1447
1448 if (!dev->class)
1449 return 0;
1450
1451 error = sysfs_create_link(&dev->kobj,
1452 &dev->class->p->subsys.kobj,
1453 "subsystem");
1454 if (error)
1455 goto out_devnode;
1456
1457 if (dev->parent && device_is_not_partition(dev)) {
1458 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
1459 "device");
1460 if (error)
1461 goto out_subsys;
1462 }
1463
1464#ifdef CONFIG_BLOCK
1465
1466 if (sysfs_deprecated && dev->class == &block_class)
1467 return 0;
1468#endif
1469
1470
1471 error = sysfs_create_link(&dev->class->p->subsys.kobj,
1472 &dev->kobj, dev_name(dev));
1473 if (error)
1474 goto out_device;
1475
1476 return 0;
1477
1478out_device:
1479 sysfs_remove_link(&dev->kobj, "device");
1480
1481out_subsys:
1482 sysfs_remove_link(&dev->kobj, "subsystem");
1483out_devnode:
1484 sysfs_remove_link(&dev->kobj, "of_node");
1485 return error;
1486}
1487
1488static void device_remove_class_symlinks(struct device *dev)
1489{
1490 if (dev_of_node(dev))
1491 sysfs_remove_link(&dev->kobj, "of_node");
1492
1493 if (!dev->class)
1494 return;
1495
1496 if (dev->parent && device_is_not_partition(dev))
1497 sysfs_remove_link(&dev->kobj, "device");
1498 sysfs_remove_link(&dev->kobj, "subsystem");
1499#ifdef CONFIG_BLOCK
1500 if (sysfs_deprecated && dev->class == &block_class)
1501 return;
1502#endif
1503 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
1504}
1505
1506
1507
1508
1509
1510
1511int dev_set_name(struct device *dev, const char *fmt, ...)
1512{
1513 va_list vargs;
1514 int err;
1515
1516 va_start(vargs, fmt);
1517 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
1518 va_end(vargs);
1519 return err;
1520}
1521EXPORT_SYMBOL_GPL(dev_set_name);
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534static struct kobject *device_to_dev_kobj(struct device *dev)
1535{
1536 struct kobject *kobj;
1537
1538 if (dev->class)
1539 kobj = dev->class->dev_kobj;
1540 else
1541 kobj = sysfs_dev_char_kobj;
1542
1543 return kobj;
1544}
1545
1546static int device_create_sys_dev_entry(struct device *dev)
1547{
1548 struct kobject *kobj = device_to_dev_kobj(dev);
1549 int error = 0;
1550 char devt_str[15];
1551
1552 if (kobj) {
1553 format_dev_t(devt_str, dev->devt);
1554 error = sysfs_create_link(kobj, &dev->kobj, devt_str);
1555 }
1556
1557 return error;
1558}
1559
1560static void device_remove_sys_dev_entry(struct device *dev)
1561{
1562 struct kobject *kobj = device_to_dev_kobj(dev);
1563 char devt_str[15];
1564
1565 if (kobj) {
1566 format_dev_t(devt_str, dev->devt);
1567 sysfs_remove_link(kobj, devt_str);
1568 }
1569}
1570
1571int device_private_init(struct device *dev)
1572{
1573 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
1574 if (!dev->p)
1575 return -ENOMEM;
1576 dev->p->device = dev;
1577 klist_init(&dev->p->klist_children, klist_children_get,
1578 klist_children_put);
1579 INIT_LIST_HEAD(&dev->p->deferred_probe);
1580 return 0;
1581}
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605int device_add(struct device *dev)
1606{
1607 struct device *parent;
1608 struct kobject *kobj;
1609 struct class_interface *class_intf;
1610 int error = -EINVAL;
1611 struct kobject *glue_dir = NULL;
1612
1613 dev = get_device(dev);
1614 if (!dev)
1615 goto done;
1616
1617 if (!dev->p) {
1618 error = device_private_init(dev);
1619 if (error)
1620 goto done;
1621 }
1622
1623
1624
1625
1626
1627
1628 if (dev->init_name) {
1629 dev_set_name(dev, "%s", dev->init_name);
1630 dev->init_name = NULL;
1631 }
1632
1633
1634 if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
1635 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
1636
1637 if (!dev_name(dev)) {
1638 error = -EINVAL;
1639 goto name_error;
1640 }
1641
1642 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
1643
1644 parent = get_device(dev->parent);
1645 kobj = get_device_parent(dev, parent);
1646 if (kobj)
1647 dev->kobj.parent = kobj;
1648
1649
1650 if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
1651 set_dev_node(dev, dev_to_node(parent));
1652
1653
1654
1655 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
1656 if (error) {
1657 glue_dir = get_glue_dir(dev);
1658 goto Error;
1659 }
1660
1661
1662 if (platform_notify)
1663 platform_notify(dev);
1664
1665 error = device_create_file(dev, &dev_attr_uevent);
1666 if (error)
1667 goto attrError;
1668
1669 error = device_add_class_symlinks(dev);
1670 if (error)
1671 goto SymlinkError;
1672 error = device_add_attrs(dev);
1673 if (error)
1674 goto AttrsError;
1675 error = bus_add_device(dev);
1676 if (error)
1677 goto BusError;
1678 error = dpm_sysfs_add(dev);
1679 if (error)
1680 goto DPMError;
1681 device_pm_add(dev);
1682
1683 if (MAJOR(dev->devt)) {
1684 error = device_create_file(dev, &dev_attr_dev);
1685 if (error)
1686 goto DevAttrError;
1687
1688 error = device_create_sys_dev_entry(dev);
1689 if (error)
1690 goto SysEntryError;
1691
1692 devtmpfs_create_node(dev);
1693 }
1694
1695
1696
1697
1698 if (dev->bus)
1699 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
1700 BUS_NOTIFY_ADD_DEVICE, dev);
1701
1702 kobject_uevent(&dev->kobj, KOBJ_ADD);
1703 bus_probe_device(dev);
1704 if (parent)
1705 klist_add_tail(&dev->p->knode_parent,
1706 &parent->p->klist_children);
1707
1708 if (dev->class) {
1709 mutex_lock(&dev->class->p->mutex);
1710
1711 klist_add_tail(&dev->knode_class,
1712 &dev->class->p->klist_devices);
1713
1714
1715 list_for_each_entry(class_intf,
1716 &dev->class->p->interfaces, node)
1717 if (class_intf->add_dev)
1718 class_intf->add_dev(dev, class_intf);
1719 mutex_unlock(&dev->class->p->mutex);
1720 }
1721done:
1722 put_device(dev);
1723 return error;
1724 SysEntryError:
1725 if (MAJOR(dev->devt))
1726 device_remove_file(dev, &dev_attr_dev);
1727 DevAttrError:
1728 device_pm_remove(dev);
1729 dpm_sysfs_remove(dev);
1730 DPMError:
1731 bus_remove_device(dev);
1732 BusError:
1733 device_remove_attrs(dev);
1734 AttrsError:
1735 device_remove_class_symlinks(dev);
1736 SymlinkError:
1737 device_remove_file(dev, &dev_attr_uevent);
1738 attrError:
1739 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
1740 glue_dir = get_glue_dir(dev);
1741 kobject_del(&dev->kobj);
1742 Error:
1743 cleanup_glue_dir(dev, glue_dir);
1744 put_device(parent);
1745name_error:
1746 kfree(dev->p);
1747 dev->p = NULL;
1748 goto done;
1749}
1750EXPORT_SYMBOL_GPL(device_add);
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770int device_register(struct device *dev)
1771{
1772 device_initialize(dev);
1773 return device_add(dev);
1774}
1775EXPORT_SYMBOL_GPL(device_register);
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785struct device *get_device(struct device *dev)
1786{
1787 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
1788}
1789EXPORT_SYMBOL_GPL(get_device);
1790
1791
1792
1793
1794
1795void put_device(struct device *dev)
1796{
1797
1798 if (dev)
1799 kobject_put(&dev->kobj);
1800}
1801EXPORT_SYMBOL_GPL(put_device);
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816void device_del(struct device *dev)
1817{
1818 struct device *parent = dev->parent;
1819 struct kobject *glue_dir = NULL;
1820 struct class_interface *class_intf;
1821
1822
1823
1824
1825 if (dev->bus)
1826 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
1827 BUS_NOTIFY_DEL_DEVICE, dev);
1828
1829 device_links_purge(dev);
1830 dpm_sysfs_remove(dev);
1831 if (parent)
1832 klist_del(&dev->p->knode_parent);
1833 if (MAJOR(dev->devt)) {
1834 devtmpfs_delete_node(dev);
1835 device_remove_sys_dev_entry(dev);
1836 device_remove_file(dev, &dev_attr_dev);
1837 }
1838 if (dev->class) {
1839 device_remove_class_symlinks(dev);
1840
1841 mutex_lock(&dev->class->p->mutex);
1842
1843 list_for_each_entry(class_intf,
1844 &dev->class->p->interfaces, node)
1845 if (class_intf->remove_dev)
1846 class_intf->remove_dev(dev, class_intf);
1847
1848 klist_del(&dev->knode_class);
1849 mutex_unlock(&dev->class->p->mutex);
1850 }
1851 device_remove_file(dev, &dev_attr_uevent);
1852 device_remove_attrs(dev);
1853 bus_remove_device(dev);
1854 device_pm_remove(dev);
1855 driver_deferred_probe_del(dev);
1856 device_remove_properties(dev);
1857
1858
1859
1860
1861 if (platform_notify_remove)
1862 platform_notify_remove(dev);
1863 if (dev->bus)
1864 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
1865 BUS_NOTIFY_REMOVED_DEVICE, dev);
1866 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
1867 glue_dir = get_glue_dir(dev);
1868 kobject_del(&dev->kobj);
1869 cleanup_glue_dir(dev, glue_dir);
1870 put_device(parent);
1871}
1872EXPORT_SYMBOL_GPL(device_del);
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885void device_unregister(struct device *dev)
1886{
1887 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
1888 device_del(dev);
1889 put_device(dev);
1890}
1891EXPORT_SYMBOL_GPL(device_unregister);
1892
1893static struct device *prev_device(struct klist_iter *i)
1894{
1895 struct klist_node *n = klist_prev(i);
1896 struct device *dev = NULL;
1897 struct device_private *p;
1898
1899 if (n) {
1900 p = to_device_private_parent(n);
1901 dev = p->device;
1902 }
1903 return dev;
1904}
1905
1906static struct device *next_device(struct klist_iter *i)
1907{
1908 struct klist_node *n = klist_next(i);
1909 struct device *dev = NULL;
1910 struct device_private *p;
1911
1912 if (n) {
1913 p = to_device_private_parent(n);
1914 dev = p->device;
1915 }
1916 return dev;
1917}
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932const char *device_get_devnode(struct device *dev,
1933 umode_t *mode, kuid_t *uid, kgid_t *gid,
1934 const char **tmp)
1935{
1936 char *s;
1937
1938 *tmp = NULL;
1939
1940
1941 if (dev->type && dev->type->devnode)
1942 *tmp = dev->type->devnode(dev, mode, uid, gid);
1943 if (*tmp)
1944 return *tmp;
1945
1946
1947 if (dev->class && dev->class->devnode)
1948 *tmp = dev->class->devnode(dev, mode);
1949 if (*tmp)
1950 return *tmp;
1951
1952
1953 if (strchr(dev_name(dev), '!') == NULL)
1954 return dev_name(dev);
1955
1956
1957 s = kstrdup(dev_name(dev), GFP_KERNEL);
1958 if (!s)
1959 return NULL;
1960 strreplace(s, '!', '/');
1961 return *tmp = s;
1962}
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976int device_for_each_child(struct device *parent, void *data,
1977 int (*fn)(struct device *dev, void *data))
1978{
1979 struct klist_iter i;
1980 struct device *child;
1981 int error = 0;
1982
1983 if (!parent->p)
1984 return 0;
1985
1986 klist_iter_init(&parent->p->klist_children, &i);
1987 while ((child = next_device(&i)) && !error)
1988 error = fn(child, data);
1989 klist_iter_exit(&i);
1990 return error;
1991}
1992EXPORT_SYMBOL_GPL(device_for_each_child);
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006int device_for_each_child_reverse(struct device *parent, void *data,
2007 int (*fn)(struct device *dev, void *data))
2008{
2009 struct klist_iter i;
2010 struct device *child;
2011 int error = 0;
2012
2013 if (!parent->p)
2014 return 0;
2015
2016 klist_iter_init(&parent->p->klist_children, &i);
2017 while ((child = prev_device(&i)) && !error)
2018 error = fn(child, data);
2019 klist_iter_exit(&i);
2020 return error;
2021}
2022EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041struct device *device_find_child(struct device *parent, void *data,
2042 int (*match)(struct device *dev, void *data))
2043{
2044 struct klist_iter i;
2045 struct device *child;
2046
2047 if (!parent)
2048 return NULL;
2049
2050 klist_iter_init(&parent->p->klist_children, &i);
2051 while ((child = next_device(&i)))
2052 if (match(child, data) && get_device(child))
2053 break;
2054 klist_iter_exit(&i);
2055 return child;
2056}
2057EXPORT_SYMBOL_GPL(device_find_child);
2058
2059int __init devices_init(void)
2060{
2061 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
2062 if (!devices_kset)
2063 return -ENOMEM;
2064 dev_kobj = kobject_create_and_add("dev", NULL);
2065 if (!dev_kobj)
2066 goto dev_kobj_err;
2067 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
2068 if (!sysfs_dev_block_kobj)
2069 goto block_kobj_err;
2070 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
2071 if (!sysfs_dev_char_kobj)
2072 goto char_kobj_err;
2073
2074 return 0;
2075
2076 char_kobj_err:
2077 kobject_put(sysfs_dev_block_kobj);
2078 block_kobj_err:
2079 kobject_put(dev_kobj);
2080 dev_kobj_err:
2081 kset_unregister(devices_kset);
2082 return -ENOMEM;
2083}
2084
2085static int device_check_offline(struct device *dev, void *not_used)
2086{
2087 int ret;
2088
2089 ret = device_for_each_child(dev, NULL, device_check_offline);
2090 if (ret)
2091 return ret;
2092
2093 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
2094}
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107int device_offline(struct device *dev)
2108{
2109 int ret;
2110
2111 if (dev->offline_disabled)
2112 return -EPERM;
2113
2114 ret = device_for_each_child(dev, NULL, device_check_offline);
2115 if (ret)
2116 return ret;
2117
2118 device_lock(dev);
2119 if (device_supports_offline(dev)) {
2120 if (dev->offline) {
2121 ret = 1;
2122 } else {
2123 ret = dev->bus->offline(dev);
2124 if (!ret) {
2125 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2126 dev->offline = true;
2127 }
2128 }
2129 }
2130 device_unlock(dev);
2131
2132 return ret;
2133}
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145int device_online(struct device *dev)
2146{
2147 int ret = 0;
2148
2149 device_lock(dev);
2150 if (device_supports_offline(dev)) {
2151 if (dev->offline) {
2152 ret = dev->bus->online(dev);
2153 if (!ret) {
2154 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2155 dev->offline = false;
2156 }
2157 } else {
2158 ret = 1;
2159 }
2160 }
2161 device_unlock(dev);
2162
2163 return ret;
2164}
2165
2166struct root_device {
2167 struct device dev;
2168 struct module *owner;
2169};
2170
2171static inline struct root_device *to_root_device(struct device *d)
2172{
2173 return container_of(d, struct root_device, dev);
2174}
2175
2176static void root_device_release(struct device *dev)
2177{
2178 kfree(to_root_device(dev));
2179}
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203struct device *__root_device_register(const char *name, struct module *owner)
2204{
2205 struct root_device *root;
2206 int err = -ENOMEM;
2207
2208 root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
2209 if (!root)
2210 return ERR_PTR(err);
2211
2212 err = dev_set_name(&root->dev, "%s", name);
2213 if (err) {
2214 kfree(root);
2215 return ERR_PTR(err);
2216 }
2217
2218 root->dev.release = root_device_release;
2219
2220 err = device_register(&root->dev);
2221 if (err) {
2222 put_device(&root->dev);
2223 return ERR_PTR(err);
2224 }
2225
2226#ifdef CONFIG_MODULES
2227 if (owner) {
2228 struct module_kobject *mk = &owner->mkobj;
2229
2230 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
2231 if (err) {
2232 device_unregister(&root->dev);
2233 return ERR_PTR(err);
2234 }
2235 root->owner = owner;
2236 }
2237#endif
2238
2239 return &root->dev;
2240}
2241EXPORT_SYMBOL_GPL(__root_device_register);
2242
2243
2244
2245
2246
2247
2248
2249
2250void root_device_unregister(struct device *dev)
2251{
2252 struct root_device *root = to_root_device(dev);
2253
2254 if (root->owner)
2255 sysfs_remove_link(&root->dev.kobj, "module");
2256
2257 device_unregister(dev);
2258}
2259EXPORT_SYMBOL_GPL(root_device_unregister);
2260
2261
2262static void device_create_release(struct device *dev)
2263{
2264 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
2265 kfree(dev);
2266}
2267
2268static struct device *
2269device_create_groups_vargs(struct class *class, struct device *parent,
2270 dev_t devt, void *drvdata,
2271 const struct attribute_group **groups,
2272 const char *fmt, va_list args)
2273{
2274 struct device *dev = NULL;
2275 int retval = -ENODEV;
2276
2277 if (class == NULL || IS_ERR(class))
2278 goto error;
2279
2280 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2281 if (!dev) {
2282 retval = -ENOMEM;
2283 goto error;
2284 }
2285
2286 device_initialize(dev);
2287 dev->devt = devt;
2288 dev->class = class;
2289 dev->parent = parent;
2290 dev->groups = groups;
2291 dev->release = device_create_release;
2292 dev_set_drvdata(dev, drvdata);
2293
2294 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
2295 if (retval)
2296 goto error;
2297
2298 retval = device_add(dev);
2299 if (retval)
2300 goto error;
2301
2302 return dev;
2303
2304error:
2305 put_device(dev);
2306 return ERR_PTR(retval);
2307}
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334struct device *device_create_vargs(struct class *class, struct device *parent,
2335 dev_t devt, void *drvdata, const char *fmt,
2336 va_list args)
2337{
2338 return device_create_groups_vargs(class, parent, devt, drvdata, NULL,
2339 fmt, args);
2340}
2341EXPORT_SYMBOL_GPL(device_create_vargs);
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367struct device *device_create(struct class *class, struct device *parent,
2368 dev_t devt, void *drvdata, const char *fmt, ...)
2369{
2370 va_list vargs;
2371 struct device *dev;
2372
2373 va_start(vargs, fmt);
2374 dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs);
2375 va_end(vargs);
2376 return dev;
2377}
2378EXPORT_SYMBOL_GPL(device_create);
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407struct device *device_create_with_groups(struct class *class,
2408 struct device *parent, dev_t devt,
2409 void *drvdata,
2410 const struct attribute_group **groups,
2411 const char *fmt, ...)
2412{
2413 va_list vargs;
2414 struct device *dev;
2415
2416 va_start(vargs, fmt);
2417 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
2418 fmt, vargs);
2419 va_end(vargs);
2420 return dev;
2421}
2422EXPORT_SYMBOL_GPL(device_create_with_groups);
2423
2424static int __match_devt(struct device *dev, const void *data)
2425{
2426 const dev_t *devt = data;
2427
2428 return dev->devt == *devt;
2429}
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439void device_destroy(struct class *class, dev_t devt)
2440{
2441 struct device *dev;
2442
2443 dev = class_find_device(class, NULL, &devt, __match_devt);
2444 if (dev) {
2445 put_device(dev);
2446 device_unregister(dev);
2447 }
2448}
2449EXPORT_SYMBOL_GPL(device_destroy);
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490int device_rename(struct device *dev, const char *new_name)
2491{
2492 struct kobject *kobj = &dev->kobj;
2493 char *old_device_name = NULL;
2494 int error;
2495
2496 dev = get_device(dev);
2497 if (!dev)
2498 return -EINVAL;
2499
2500 dev_dbg(dev, "renaming to %s\n", new_name);
2501
2502 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
2503 if (!old_device_name) {
2504 error = -ENOMEM;
2505 goto out;
2506 }
2507
2508 if (dev->class) {
2509 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
2510 kobj, old_device_name,
2511 new_name, kobject_namespace(kobj));
2512 if (error)
2513 goto out;
2514 }
2515
2516 error = kobject_rename(kobj, new_name);
2517 if (error)
2518 goto out;
2519
2520out:
2521 put_device(dev);
2522
2523 kfree(old_device_name);
2524
2525 return error;
2526}
2527EXPORT_SYMBOL_GPL(device_rename);
2528
2529static int device_move_class_links(struct device *dev,
2530 struct device *old_parent,
2531 struct device *new_parent)
2532{
2533 int error = 0;
2534
2535 if (old_parent)
2536 sysfs_remove_link(&dev->kobj, "device");
2537 if (new_parent)
2538 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
2539 "device");
2540 return error;
2541}
2542
2543
2544
2545
2546
2547
2548
2549int device_move(struct device *dev, struct device *new_parent,
2550 enum dpm_order dpm_order)
2551{
2552 int error;
2553 struct device *old_parent;
2554 struct kobject *new_parent_kobj;
2555
2556 dev = get_device(dev);
2557 if (!dev)
2558 return -EINVAL;
2559
2560 device_pm_lock();
2561 new_parent = get_device(new_parent);
2562 new_parent_kobj = get_device_parent(dev, new_parent);
2563
2564 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
2565 __func__, new_parent ? dev_name(new_parent) : "<NULL>");
2566 error = kobject_move(&dev->kobj, new_parent_kobj);
2567 if (error) {
2568 cleanup_glue_dir(dev, new_parent_kobj);
2569 put_device(new_parent);
2570 goto out;
2571 }
2572 old_parent = dev->parent;
2573 dev->parent = new_parent;
2574 if (old_parent)
2575 klist_remove(&dev->p->knode_parent);
2576 if (new_parent) {
2577 klist_add_tail(&dev->p->knode_parent,
2578 &new_parent->p->klist_children);
2579 set_dev_node(dev, dev_to_node(new_parent));
2580 }
2581
2582 if (dev->class) {
2583 error = device_move_class_links(dev, old_parent, new_parent);
2584 if (error) {
2585
2586 device_move_class_links(dev, new_parent, old_parent);
2587 if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
2588 if (new_parent)
2589 klist_remove(&dev->p->knode_parent);
2590 dev->parent = old_parent;
2591 if (old_parent) {
2592 klist_add_tail(&dev->p->knode_parent,
2593 &old_parent->p->klist_children);
2594 set_dev_node(dev, dev_to_node(old_parent));
2595 }
2596 }
2597 cleanup_glue_dir(dev, new_parent_kobj);
2598 put_device(new_parent);
2599 goto out;
2600 }
2601 }
2602 switch (dpm_order) {
2603 case DPM_ORDER_NONE:
2604 break;
2605 case DPM_ORDER_DEV_AFTER_PARENT:
2606 device_pm_move_after(dev, new_parent);
2607 devices_kset_move_after(dev, new_parent);
2608 break;
2609 case DPM_ORDER_PARENT_BEFORE_DEV:
2610 device_pm_move_before(new_parent, dev);
2611 devices_kset_move_before(new_parent, dev);
2612 break;
2613 case DPM_ORDER_DEV_LAST:
2614 device_pm_move_last(dev);
2615 devices_kset_move_last(dev);
2616 break;
2617 }
2618
2619 put_device(old_parent);
2620out:
2621 device_pm_unlock();
2622 put_device(dev);
2623 return error;
2624}
2625EXPORT_SYMBOL_GPL(device_move);
2626
2627
2628
2629
2630void device_shutdown(void)
2631{
2632 struct device *dev, *parent;
2633
2634 spin_lock(&devices_kset->list_lock);
2635
2636
2637
2638
2639
2640 while (!list_empty(&devices_kset->list)) {
2641 dev = list_entry(devices_kset->list.prev, struct device,
2642 kobj.entry);
2643
2644
2645
2646
2647
2648
2649 parent = get_device(dev->parent);
2650 get_device(dev);
2651
2652
2653
2654
2655 list_del_init(&dev->kobj.entry);
2656 spin_unlock(&devices_kset->list_lock);
2657
2658
2659 if (parent)
2660 device_lock(parent);
2661 device_lock(dev);
2662
2663
2664 pm_runtime_get_noresume(dev);
2665 pm_runtime_barrier(dev);
2666
2667 if (dev->class && dev->class->shutdown) {
2668 if (initcall_debug)
2669 dev_info(dev, "shutdown\n");
2670 dev->class->shutdown(dev);
2671 } else if (dev->bus && dev->bus->shutdown) {
2672 if (initcall_debug)
2673 dev_info(dev, "shutdown\n");
2674 dev->bus->shutdown(dev);
2675 } else if (dev->driver && dev->driver->shutdown) {
2676 if (initcall_debug)
2677 dev_info(dev, "shutdown\n");
2678 dev->driver->shutdown(dev);
2679 }
2680
2681 device_unlock(dev);
2682 if (parent)
2683 device_unlock(parent);
2684
2685 put_device(dev);
2686 put_device(parent);
2687
2688 spin_lock(&devices_kset->list_lock);
2689 }
2690 spin_unlock(&devices_kset->list_lock);
2691}
2692
2693
2694
2695
2696
2697#ifdef CONFIG_PRINTK
2698static int
2699create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
2700{
2701 const char *subsys;
2702 size_t pos = 0;
2703
2704 if (dev->class)
2705 subsys = dev->class->name;
2706 else if (dev->bus)
2707 subsys = dev->bus->name;
2708 else
2709 return 0;
2710
2711 pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys);
2712 if (pos >= hdrlen)
2713 goto overflow;
2714
2715
2716
2717
2718
2719
2720
2721
2722 if (MAJOR(dev->devt)) {
2723 char c;
2724
2725 if (strcmp(subsys, "block") == 0)
2726 c = 'b';
2727 else
2728 c = 'c';
2729 pos++;
2730 pos += snprintf(hdr + pos, hdrlen - pos,
2731 "DEVICE=%c%u:%u",
2732 c, MAJOR(dev->devt), MINOR(dev->devt));
2733 } else if (strcmp(subsys, "net") == 0) {
2734 struct net_device *net = to_net_dev(dev);
2735
2736 pos++;
2737 pos += snprintf(hdr + pos, hdrlen - pos,
2738 "DEVICE=n%u", net->ifindex);
2739 } else {
2740 pos++;
2741 pos += snprintf(hdr + pos, hdrlen - pos,
2742 "DEVICE=+%s:%s", subsys, dev_name(dev));
2743 }
2744
2745 if (pos >= hdrlen)
2746 goto overflow;
2747
2748 return pos;
2749
2750overflow:
2751 dev_WARN(dev, "device/subsystem name too long");
2752 return 0;
2753}
2754
2755int dev_vprintk_emit(int level, const struct device *dev,
2756 const char *fmt, va_list args)
2757{
2758 char hdr[128];
2759 size_t hdrlen;
2760
2761 hdrlen = create_syslog_header(dev, hdr, sizeof(hdr));
2762
2763 return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args);
2764}
2765EXPORT_SYMBOL(dev_vprintk_emit);
2766
2767int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
2768{
2769 va_list args;
2770 int r;
2771
2772 va_start(args, fmt);
2773
2774 r = dev_vprintk_emit(level, dev, fmt, args);
2775
2776 va_end(args);
2777
2778 return r;
2779}
2780EXPORT_SYMBOL(dev_printk_emit);
2781
2782static void __dev_printk(const char *level, const struct device *dev,
2783 struct va_format *vaf)
2784{
2785 if (dev)
2786 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
2787 dev_driver_string(dev), dev_name(dev), vaf);
2788 else
2789 printk("%s(NULL device *): %pV", level, vaf);
2790}
2791
2792void dev_printk(const char *level, const struct device *dev,
2793 const char *fmt, ...)
2794{
2795 struct va_format vaf;
2796 va_list args;
2797
2798 va_start(args, fmt);
2799
2800 vaf.fmt = fmt;
2801 vaf.va = &args;
2802
2803 __dev_printk(level, dev, &vaf);
2804
2805 va_end(args);
2806}
2807EXPORT_SYMBOL(dev_printk);
2808
2809#define define_dev_printk_level(func, kern_level) \
2810void func(const struct device *dev, const char *fmt, ...) \
2811{ \
2812 struct va_format vaf; \
2813 va_list args; \
2814 \
2815 va_start(args, fmt); \
2816 \
2817 vaf.fmt = fmt; \
2818 vaf.va = &args; \
2819 \
2820 __dev_printk(kern_level, dev, &vaf); \
2821 \
2822 va_end(args); \
2823} \
2824EXPORT_SYMBOL(func);
2825
2826define_dev_printk_level(dev_emerg, KERN_EMERG);
2827define_dev_printk_level(dev_alert, KERN_ALERT);
2828define_dev_printk_level(dev_crit, KERN_CRIT);
2829define_dev_printk_level(dev_err, KERN_ERR);
2830define_dev_printk_level(dev_warn, KERN_WARNING);
2831define_dev_printk_level(dev_notice, KERN_NOTICE);
2832define_dev_printk_level(_dev_info, KERN_INFO);
2833
2834#endif
2835
2836static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
2837{
2838 return fwnode && !IS_ERR(fwnode->secondary);
2839}
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
2850{
2851 if (fwnode) {
2852 struct fwnode_handle *fn = dev->fwnode;
2853
2854 if (fwnode_is_primary(fn))
2855 fn = fn->secondary;
2856
2857 if (fn) {
2858 WARN_ON(fwnode->secondary);
2859 fwnode->secondary = fn;
2860 }
2861 dev->fwnode = fwnode;
2862 } else {
2863 dev->fwnode = fwnode_is_primary(dev->fwnode) ?
2864 dev->fwnode->secondary : NULL;
2865 }
2866}
2867EXPORT_SYMBOL_GPL(set_primary_fwnode);
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
2879{
2880 if (fwnode)
2881 fwnode->secondary = ERR_PTR(-ENODEV);
2882
2883 if (fwnode_is_primary(dev->fwnode))
2884 dev->fwnode->secondary = fwnode;
2885 else
2886 dev->fwnode = fwnode;
2887}
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
2898{
2899 of_node_put(dev->of_node);
2900 dev->of_node = of_node_get(dev2->of_node);
2901 dev->of_node_reused = true;
2902}
2903EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
2904