1
2
3
4
5
6
7
8
9
10#include <linux/sched.h>
11#include <linux/fs.h>
12#include <linux/namei.h>
13#include <linux/idr.h>
14#include <linux/slab.h>
15#include <linux/security.h>
16#include <linux/hash.h>
17
18#include "kernfs-internal.h"
19
20DEFINE_MUTEX(kernfs_mutex);
21static DEFINE_SPINLOCK(kernfs_rename_lock);
22static char kernfs_pr_cont_buf[PATH_MAX];
23static DEFINE_SPINLOCK(kernfs_idr_lock);
24
25#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
26
27static bool kernfs_active(struct kernfs_node *kn)
28{
29 lockdep_assert_held(&kernfs_mutex);
30 return atomic_read(&kn->active) >= 0;
31}
32
33static bool kernfs_lockdep(struct kernfs_node *kn)
34{
35#ifdef CONFIG_DEBUG_LOCK_ALLOC
36 return kn->flags & KERNFS_LOCKDEP;
37#else
38 return false;
39#endif
40}
41
42static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen)
43{
44 if (!kn)
45 return strlcpy(buf, "(null)", buflen);
46
47 return strlcpy(buf, kn->parent ? kn->name : "/", buflen);
48}
49
50
51static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
52{
53 size_t depth = 0;
54
55 while (to->parent && to != from) {
56 depth++;
57 to = to->parent;
58 }
59 return depth;
60}
61
62static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
63 struct kernfs_node *b)
64{
65 size_t da, db;
66 struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b);
67
68 if (ra != rb)
69 return NULL;
70
71 da = kernfs_depth(ra->kn, a);
72 db = kernfs_depth(rb->kn, b);
73
74 while (da > db) {
75 a = a->parent;
76 da--;
77 }
78 while (db > da) {
79 b = b->parent;
80 db--;
81 }
82
83
84 while (b != a) {
85 b = b->parent;
86 a = a->parent;
87 }
88
89 return a;
90}
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
123 struct kernfs_node *kn_from,
124 char *buf, size_t buflen)
125{
126 struct kernfs_node *kn, *common;
127 const char parent_str[] = "/..";
128 size_t depth_from, depth_to, len = 0;
129 int i, j;
130
131 if (!kn_to)
132 return strlcpy(buf, "(null)", buflen);
133
134 if (!kn_from)
135 kn_from = kernfs_root(kn_to)->kn;
136
137 if (kn_from == kn_to)
138 return strlcpy(buf, "/", buflen);
139
140 if (!buf)
141 return -EINVAL;
142
143 common = kernfs_common_ancestor(kn_from, kn_to);
144 if (WARN_ON(!common))
145 return -EINVAL;
146
147 depth_to = kernfs_depth(common, kn_to);
148 depth_from = kernfs_depth(common, kn_from);
149
150 buf[0] = '\0';
151
152 for (i = 0; i < depth_from; i++)
153 len += strlcpy(buf + len, parent_str,
154 len < buflen ? buflen - len : 0);
155
156
157 for (i = depth_to - 1; i >= 0; i--) {
158 for (kn = kn_to, j = 0; j < i; j++)
159 kn = kn->parent;
160 len += strlcpy(buf + len, "/",
161 len < buflen ? buflen - len : 0);
162 len += strlcpy(buf + len, kn->name,
163 len < buflen ? buflen - len : 0);
164 }
165
166 return len;
167}
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
184{
185 unsigned long flags;
186 int ret;
187
188 spin_lock_irqsave(&kernfs_rename_lock, flags);
189 ret = kernfs_name_locked(kn, buf, buflen);
190 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
191 return ret;
192}
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
211 char *buf, size_t buflen)
212{
213 unsigned long flags;
214 int ret;
215
216 spin_lock_irqsave(&kernfs_rename_lock, flags);
217 ret = kernfs_path_from_node_locked(to, from, buf, buflen);
218 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
219 return ret;
220}
221EXPORT_SYMBOL_GPL(kernfs_path_from_node);
222
223
224
225
226
227
228
229void pr_cont_kernfs_name(struct kernfs_node *kn)
230{
231 unsigned long flags;
232
233 spin_lock_irqsave(&kernfs_rename_lock, flags);
234
235 kernfs_name_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
236 pr_cont("%s", kernfs_pr_cont_buf);
237
238 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
239}
240
241
242
243
244
245
246
247void pr_cont_kernfs_path(struct kernfs_node *kn)
248{
249 unsigned long flags;
250 int sz;
251
252 spin_lock_irqsave(&kernfs_rename_lock, flags);
253
254 sz = kernfs_path_from_node_locked(kn, NULL, kernfs_pr_cont_buf,
255 sizeof(kernfs_pr_cont_buf));
256 if (sz < 0) {
257 pr_cont("(error)");
258 goto out;
259 }
260
261 if (sz >= sizeof(kernfs_pr_cont_buf)) {
262 pr_cont("(name too long)");
263 goto out;
264 }
265
266 pr_cont("%s", kernfs_pr_cont_buf);
267
268out:
269 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
270}
271
272
273
274
275
276
277
278
279struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
280{
281 struct kernfs_node *parent;
282 unsigned long flags;
283
284 spin_lock_irqsave(&kernfs_rename_lock, flags);
285 parent = kn->parent;
286 kernfs_get(parent);
287 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
288
289 return parent;
290}
291
292
293
294
295
296
297
298
299static unsigned int kernfs_name_hash(const char *name, const void *ns)
300{
301 unsigned long hash = init_name_hash(ns);
302 unsigned int len = strlen(name);
303 while (len--)
304 hash = partial_name_hash(*name++, hash);
305 hash = end_name_hash(hash);
306 hash &= 0x7fffffffU;
307
308 if (hash < 2)
309 hash += 2;
310 if (hash >= INT_MAX)
311 hash = INT_MAX - 1;
312 return hash;
313}
314
315static int kernfs_name_compare(unsigned int hash, const char *name,
316 const void *ns, const struct kernfs_node *kn)
317{
318 if (hash < kn->hash)
319 return -1;
320 if (hash > kn->hash)
321 return 1;
322 if (ns < kn->ns)
323 return -1;
324 if (ns > kn->ns)
325 return 1;
326 return strcmp(name, kn->name);
327}
328
329static int kernfs_sd_compare(const struct kernfs_node *left,
330 const struct kernfs_node *right)
331{
332 return kernfs_name_compare(left->hash, left->name, left->ns, right);
333}
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348static int kernfs_link_sibling(struct kernfs_node *kn)
349{
350 struct rb_node **node = &kn->parent->dir.children.rb_node;
351 struct rb_node *parent = NULL;
352
353 while (*node) {
354 struct kernfs_node *pos;
355 int result;
356
357 pos = rb_to_kn(*node);
358 parent = *node;
359 result = kernfs_sd_compare(kn, pos);
360 if (result < 0)
361 node = &pos->rb.rb_left;
362 else if (result > 0)
363 node = &pos->rb.rb_right;
364 else
365 return -EEXIST;
366 }
367
368
369 rb_link_node(&kn->rb, parent, node);
370 rb_insert_color(&kn->rb, &kn->parent->dir.children);
371
372
373 if (kernfs_type(kn) == KERNFS_DIR)
374 kn->parent->dir.subdirs++;
375
376 return 0;
377}
378
379
380
381
382
383
384
385
386
387
388
389
390static bool kernfs_unlink_sibling(struct kernfs_node *kn)
391{
392 if (RB_EMPTY_NODE(&kn->rb))
393 return false;
394
395 if (kernfs_type(kn) == KERNFS_DIR)
396 kn->parent->dir.subdirs--;
397
398 rb_erase(&kn->rb, &kn->parent->dir.children);
399 RB_CLEAR_NODE(&kn->rb);
400 return true;
401}
402
403
404
405
406
407
408
409
410
411
412
413struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
414{
415 if (unlikely(!kn))
416 return NULL;
417
418 if (!atomic_inc_unless_negative(&kn->active))
419 return NULL;
420
421 if (kernfs_lockdep(kn))
422 rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
423 return kn;
424}
425
426
427
428
429
430
431
432
433void kernfs_put_active(struct kernfs_node *kn)
434{
435 int v;
436
437 if (unlikely(!kn))
438 return;
439
440 if (kernfs_lockdep(kn))
441 rwsem_release(&kn->dep_map, _RET_IP_);
442 v = atomic_dec_return(&kn->active);
443 if (likely(v != KN_DEACTIVATED_BIAS))
444 return;
445
446 wake_up_all(&kernfs_root(kn)->deactivate_waitq);
447}
448
449
450
451
452
453
454
455
456
457static void kernfs_drain(struct kernfs_node *kn)
458 __releases(&kernfs_mutex) __acquires(&kernfs_mutex)
459{
460 struct kernfs_root *root = kernfs_root(kn);
461
462 lockdep_assert_held(&kernfs_mutex);
463 WARN_ON_ONCE(kernfs_active(kn));
464
465 mutex_unlock(&kernfs_mutex);
466
467 if (kernfs_lockdep(kn)) {
468 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
469 if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
470 lock_contended(&kn->dep_map, _RET_IP_);
471 }
472
473
474 wait_event(root->deactivate_waitq,
475 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
476
477 if (kernfs_lockdep(kn)) {
478 lock_acquired(&kn->dep_map, _RET_IP_);
479 rwsem_release(&kn->dep_map, _RET_IP_);
480 }
481
482 kernfs_drain_open_files(kn);
483
484 mutex_lock(&kernfs_mutex);
485}
486
487
488
489
490
491void kernfs_get(struct kernfs_node *kn)
492{
493 if (kn) {
494 WARN_ON(!atomic_read(&kn->count));
495 atomic_inc(&kn->count);
496 }
497}
498EXPORT_SYMBOL_GPL(kernfs_get);
499
500
501
502
503
504
505
506void kernfs_put(struct kernfs_node *kn)
507{
508 struct kernfs_node *parent;
509 struct kernfs_root *root;
510
511 if (!kn || !atomic_dec_and_test(&kn->count))
512 return;
513 root = kernfs_root(kn);
514 repeat:
515
516
517
518
519 parent = kn->parent;
520
521 WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
522 "kernfs_put: %s/%s: released with incorrect active_ref %d\n",
523 parent ? parent->name : "", kn->name, atomic_read(&kn->active));
524
525 if (kernfs_type(kn) == KERNFS_LINK)
526 kernfs_put(kn->symlink.target_kn);
527
528 kfree_const(kn->name);
529
530 if (kn->iattr) {
531 simple_xattrs_free(&kn->iattr->xattrs);
532 kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
533 }
534 spin_lock(&kernfs_idr_lock);
535 idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
536 spin_unlock(&kernfs_idr_lock);
537 kmem_cache_free(kernfs_node_cache, kn);
538
539 kn = parent;
540 if (kn) {
541 if (atomic_dec_and_test(&kn->count))
542 goto repeat;
543 } else {
544
545 idr_destroy(&root->ino_idr);
546 kfree(root);
547 }
548}
549EXPORT_SYMBOL_GPL(kernfs_put);
550
551static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
552{
553 struct kernfs_node *kn;
554
555 if (flags & LOOKUP_RCU)
556 return -ECHILD;
557
558
559 if (d_really_is_negative(dentry))
560 goto out_bad_unlocked;
561
562 kn = kernfs_dentry_node(dentry);
563 mutex_lock(&kernfs_mutex);
564
565
566 if (!kernfs_active(kn))
567 goto out_bad;
568
569
570 if (kernfs_dentry_node(dentry->d_parent) != kn->parent)
571 goto out_bad;
572
573
574 if (strcmp(dentry->d_name.name, kn->name) != 0)
575 goto out_bad;
576
577
578 if (kn->parent && kernfs_ns_enabled(kn->parent) &&
579 kernfs_info(dentry->d_sb)->ns != kn->ns)
580 goto out_bad;
581
582 mutex_unlock(&kernfs_mutex);
583 return 1;
584out_bad:
585 mutex_unlock(&kernfs_mutex);
586out_bad_unlocked:
587 return 0;
588}
589
590const struct dentry_operations kernfs_dops = {
591 .d_revalidate = kernfs_dop_revalidate,
592};
593
594
595
596
597
598
599
600
601
602
603
604
605struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
606{
607 if (dentry->d_sb->s_op == &kernfs_sops)
608 return kernfs_dentry_node(dentry);
609 return NULL;
610}
611
612static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
613 struct kernfs_node *parent,
614 const char *name, umode_t mode,
615 kuid_t uid, kgid_t gid,
616 unsigned flags)
617{
618 struct kernfs_node *kn;
619 u32 id_highbits;
620 int ret;
621
622 name = kstrdup_const(name, GFP_KERNEL);
623 if (!name)
624 return NULL;
625
626 kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
627 if (!kn)
628 goto err_out1;
629
630 idr_preload(GFP_KERNEL);
631 spin_lock(&kernfs_idr_lock);
632 ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC);
633 if (ret >= 0 && ret < root->last_id_lowbits)
634 root->id_highbits++;
635 id_highbits = root->id_highbits;
636 root->last_id_lowbits = ret;
637 spin_unlock(&kernfs_idr_lock);
638 idr_preload_end();
639 if (ret < 0)
640 goto err_out2;
641
642 kn->id = (u64)id_highbits << 32 | ret;
643
644 atomic_set(&kn->count, 1);
645 atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
646 RB_CLEAR_NODE(&kn->rb);
647
648 kn->name = name;
649 kn->mode = mode;
650 kn->flags = flags;
651
652 if (!uid_eq(uid, GLOBAL_ROOT_UID) || !gid_eq(gid, GLOBAL_ROOT_GID)) {
653 struct iattr iattr = {
654 .ia_valid = ATTR_UID | ATTR_GID,
655 .ia_uid = uid,
656 .ia_gid = gid,
657 };
658
659 ret = __kernfs_setattr(kn, &iattr);
660 if (ret < 0)
661 goto err_out3;
662 }
663
664 if (parent) {
665 ret = security_kernfs_init_security(parent, kn);
666 if (ret)
667 goto err_out3;
668 }
669
670 return kn;
671
672 err_out3:
673 idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
674 err_out2:
675 kmem_cache_free(kernfs_node_cache, kn);
676 err_out1:
677 kfree_const(name);
678 return NULL;
679}
680
681struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
682 const char *name, umode_t mode,
683 kuid_t uid, kgid_t gid,
684 unsigned flags)
685{
686 struct kernfs_node *kn;
687
688 kn = __kernfs_new_node(kernfs_root(parent), parent,
689 name, mode, uid, gid, flags);
690 if (kn) {
691 kernfs_get(parent);
692 kn->parent = parent;
693 }
694 return kn;
695}
696
697
698
699
700
701
702
703
704
705
706
707
708struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
709 u64 id)
710{
711 struct kernfs_node *kn;
712 ino_t ino = kernfs_id_ino(id);
713 u32 gen = kernfs_id_gen(id);
714
715 spin_lock(&kernfs_idr_lock);
716
717 kn = idr_find(&root->ino_idr, (u32)ino);
718 if (!kn)
719 goto err_unlock;
720
721 if (sizeof(ino_t) >= sizeof(u64)) {
722
723 if (kernfs_ino(kn) != ino)
724 goto err_unlock;
725 } else {
726
727 if (unlikely(gen && kernfs_gen(kn) != gen))
728 goto err_unlock;
729 }
730
731
732
733
734
735
736 if (unlikely(!(kn->flags & KERNFS_ACTIVATED) ||
737 !atomic_inc_not_zero(&kn->count)))
738 goto err_unlock;
739
740 spin_unlock(&kernfs_idr_lock);
741 return kn;
742err_unlock:
743 spin_unlock(&kernfs_idr_lock);
744 return NULL;
745}
746
747
748
749
750
751
752
753
754
755
756
757
758
759int kernfs_add_one(struct kernfs_node *kn)
760{
761 struct kernfs_node *parent = kn->parent;
762 struct kernfs_iattrs *ps_iattr;
763 bool has_ns;
764 int ret;
765
766 mutex_lock(&kernfs_mutex);
767
768 ret = -EINVAL;
769 has_ns = kernfs_ns_enabled(parent);
770 if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
771 has_ns ? "required" : "invalid", parent->name, kn->name))
772 goto out_unlock;
773
774 if (kernfs_type(parent) != KERNFS_DIR)
775 goto out_unlock;
776
777 ret = -ENOENT;
778 if (parent->flags & KERNFS_EMPTY_DIR)
779 goto out_unlock;
780
781 if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent))
782 goto out_unlock;
783
784 kn->hash = kernfs_name_hash(kn->name, kn->ns);
785
786 ret = kernfs_link_sibling(kn);
787 if (ret)
788 goto out_unlock;
789
790
791 ps_iattr = parent->iattr;
792 if (ps_iattr) {
793 ktime_get_real_ts64(&ps_iattr->ia_ctime);
794 ps_iattr->ia_mtime = ps_iattr->ia_ctime;
795 }
796
797 mutex_unlock(&kernfs_mutex);
798
799
800
801
802
803
804
805
806 if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
807 kernfs_activate(kn);
808 return 0;
809
810out_unlock:
811 mutex_unlock(&kernfs_mutex);
812 return ret;
813}
814
815
816
817
818
819
820
821
822
823
824static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
825 const unsigned char *name,
826 const void *ns)
827{
828 struct rb_node *node = parent->dir.children.rb_node;
829 bool has_ns = kernfs_ns_enabled(parent);
830 unsigned int hash;
831
832 lockdep_assert_held(&kernfs_mutex);
833
834 if (has_ns != (bool)ns) {
835 WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
836 has_ns ? "required" : "invalid", parent->name, name);
837 return NULL;
838 }
839
840 hash = kernfs_name_hash(name, ns);
841 while (node) {
842 struct kernfs_node *kn;
843 int result;
844
845 kn = rb_to_kn(node);
846 result = kernfs_name_compare(hash, name, ns, kn);
847 if (result < 0)
848 node = node->rb_left;
849 else if (result > 0)
850 node = node->rb_right;
851 else
852 return kn;
853 }
854 return NULL;
855}
856
857static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
858 const unsigned char *path,
859 const void *ns)
860{
861 size_t len;
862 char *p, *name;
863
864 lockdep_assert_held(&kernfs_mutex);
865
866
867 spin_lock_irq(&kernfs_rename_lock);
868
869 len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
870
871 if (len >= sizeof(kernfs_pr_cont_buf)) {
872 spin_unlock_irq(&kernfs_rename_lock);
873 return NULL;
874 }
875
876 p = kernfs_pr_cont_buf;
877
878 while ((name = strsep(&p, "/")) && parent) {
879 if (*name == '\0')
880 continue;
881 parent = kernfs_find_ns(parent, name, ns);
882 }
883
884 spin_unlock_irq(&kernfs_rename_lock);
885
886 return parent;
887}
888
889
890
891
892
893
894
895
896
897
898
899struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
900 const char *name, const void *ns)
901{
902 struct kernfs_node *kn;
903
904 mutex_lock(&kernfs_mutex);
905 kn = kernfs_find_ns(parent, name, ns);
906 kernfs_get(kn);
907 mutex_unlock(&kernfs_mutex);
908
909 return kn;
910}
911EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
912
913
914
915
916
917
918
919
920
921
922
923struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
924 const char *path, const void *ns)
925{
926 struct kernfs_node *kn;
927
928 mutex_lock(&kernfs_mutex);
929 kn = kernfs_walk_ns(parent, path, ns);
930 kernfs_get(kn);
931 mutex_unlock(&kernfs_mutex);
932
933 return kn;
934}
935
936
937
938
939
940
941
942
943
944
945struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
946 unsigned int flags, void *priv)
947{
948 struct kernfs_root *root;
949 struct kernfs_node *kn;
950
951 root = kzalloc(sizeof(*root), GFP_KERNEL);
952 if (!root)
953 return ERR_PTR(-ENOMEM);
954
955 idr_init(&root->ino_idr);
956 INIT_LIST_HEAD(&root->supers);
957
958
959
960
961
962
963
964 if (sizeof(ino_t) >= sizeof(u64))
965 root->id_highbits = 0;
966 else
967 root->id_highbits = 1;
968
969 kn = __kernfs_new_node(root, NULL, "", S_IFDIR | S_IRUGO | S_IXUGO,
970 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
971 KERNFS_DIR);
972 if (!kn) {
973 idr_destroy(&root->ino_idr);
974 kfree(root);
975 return ERR_PTR(-ENOMEM);
976 }
977
978 kn->priv = priv;
979 kn->dir.root = root;
980
981 root->syscall_ops = scops;
982 root->flags = flags;
983 root->kn = kn;
984 init_waitqueue_head(&root->deactivate_waitq);
985
986 if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
987 kernfs_activate(kn);
988
989 return root;
990}
991
992
993
994
995
996
997
998
999void kernfs_destroy_root(struct kernfs_root *root)
1000{
1001 kernfs_remove(root->kn);
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
1017 const char *name, umode_t mode,
1018 kuid_t uid, kgid_t gid,
1019 void *priv, const void *ns)
1020{
1021 struct kernfs_node *kn;
1022 int rc;
1023
1024
1025 kn = kernfs_new_node(parent, name, mode | S_IFDIR,
1026 uid, gid, KERNFS_DIR);
1027 if (!kn)
1028 return ERR_PTR(-ENOMEM);
1029
1030 kn->dir.root = parent->dir.root;
1031 kn->ns = ns;
1032 kn->priv = priv;
1033
1034
1035 rc = kernfs_add_one(kn);
1036 if (!rc)
1037 return kn;
1038
1039 kernfs_put(kn);
1040 return ERR_PTR(rc);
1041}
1042
1043
1044
1045
1046
1047
1048
1049
1050struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
1051 const char *name)
1052{
1053 struct kernfs_node *kn;
1054 int rc;
1055
1056
1057 kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR,
1058 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR);
1059 if (!kn)
1060 return ERR_PTR(-ENOMEM);
1061
1062 kn->flags |= KERNFS_EMPTY_DIR;
1063 kn->dir.root = parent->dir.root;
1064 kn->ns = NULL;
1065 kn->priv = NULL;
1066
1067
1068 rc = kernfs_add_one(kn);
1069 if (!rc)
1070 return kn;
1071
1072 kernfs_put(kn);
1073 return ERR_PTR(rc);
1074}
1075
1076static struct dentry *kernfs_iop_lookup(struct inode *dir,
1077 struct dentry *dentry,
1078 unsigned int flags)
1079{
1080 struct dentry *ret;
1081 struct kernfs_node *parent = dir->i_private;
1082 struct kernfs_node *kn;
1083 struct inode *inode;
1084 const void *ns = NULL;
1085
1086 mutex_lock(&kernfs_mutex);
1087
1088 if (kernfs_ns_enabled(parent))
1089 ns = kernfs_info(dir->i_sb)->ns;
1090
1091 kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
1092
1093
1094 if (!kn || !kernfs_active(kn)) {
1095 ret = NULL;
1096 goto out_unlock;
1097 }
1098
1099
1100 inode = kernfs_get_inode(dir->i_sb, kn);
1101 if (!inode) {
1102 ret = ERR_PTR(-ENOMEM);
1103 goto out_unlock;
1104 }
1105
1106
1107 ret = d_splice_alias(inode, dentry);
1108 out_unlock:
1109 mutex_unlock(&kernfs_mutex);
1110 return ret;
1111}
1112
1113static int kernfs_iop_mkdir(struct user_namespace *mnt_userns,
1114 struct inode *dir, struct dentry *dentry,
1115 umode_t mode)
1116{
1117 struct kernfs_node *parent = dir->i_private;
1118 struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops;
1119 int ret;
1120
1121 if (!scops || !scops->mkdir)
1122 return -EPERM;
1123
1124 if (!kernfs_get_active(parent))
1125 return -ENODEV;
1126
1127 ret = scops->mkdir(parent, dentry->d_name.name, mode);
1128
1129 kernfs_put_active(parent);
1130 return ret;
1131}
1132
1133static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
1134{
1135 struct kernfs_node *kn = kernfs_dentry_node(dentry);
1136 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
1137 int ret;
1138
1139 if (!scops || !scops->rmdir)
1140 return -EPERM;
1141
1142 if (!kernfs_get_active(kn))
1143 return -ENODEV;
1144
1145 ret = scops->rmdir(kn);
1146
1147 kernfs_put_active(kn);
1148 return ret;
1149}
1150
1151static int kernfs_iop_rename(struct user_namespace *mnt_userns,
1152 struct inode *old_dir, struct dentry *old_dentry,
1153 struct inode *new_dir, struct dentry *new_dentry,
1154 unsigned int flags)
1155{
1156 struct kernfs_node *kn = kernfs_dentry_node(old_dentry);
1157 struct kernfs_node *new_parent = new_dir->i_private;
1158 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
1159 int ret;
1160
1161 if (flags)
1162 return -EINVAL;
1163
1164 if (!scops || !scops->rename)
1165 return -EPERM;
1166
1167 if (!kernfs_get_active(kn))
1168 return -ENODEV;
1169
1170 if (!kernfs_get_active(new_parent)) {
1171 kernfs_put_active(kn);
1172 return -ENODEV;
1173 }
1174
1175 ret = scops->rename(kn, new_parent, new_dentry->d_name.name);
1176
1177 kernfs_put_active(new_parent);
1178 kernfs_put_active(kn);
1179 return ret;
1180}
1181
1182const struct inode_operations kernfs_dir_iops = {
1183 .lookup = kernfs_iop_lookup,
1184 .permission = kernfs_iop_permission,
1185 .setattr = kernfs_iop_setattr,
1186 .getattr = kernfs_iop_getattr,
1187 .listxattr = kernfs_iop_listxattr,
1188
1189 .mkdir = kernfs_iop_mkdir,
1190 .rmdir = kernfs_iop_rmdir,
1191 .rename = kernfs_iop_rename,
1192};
1193
1194static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
1195{
1196 struct kernfs_node *last;
1197
1198 while (true) {
1199 struct rb_node *rbn;
1200
1201 last = pos;
1202
1203 if (kernfs_type(pos) != KERNFS_DIR)
1204 break;
1205
1206 rbn = rb_first(&pos->dir.children);
1207 if (!rbn)
1208 break;
1209
1210 pos = rb_to_kn(rbn);
1211 }
1212
1213 return last;
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
1226 struct kernfs_node *root)
1227{
1228 struct rb_node *rbn;
1229
1230 lockdep_assert_held(&kernfs_mutex);
1231
1232
1233 if (!pos)
1234 return kernfs_leftmost_descendant(root);
1235
1236
1237 if (pos == root)
1238 return NULL;
1239
1240
1241 rbn = rb_next(&pos->rb);
1242 if (rbn)
1243 return kernfs_leftmost_descendant(rb_to_kn(rbn));
1244
1245
1246 return pos->parent;
1247}
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262void kernfs_activate(struct kernfs_node *kn)
1263{
1264 struct kernfs_node *pos;
1265
1266 mutex_lock(&kernfs_mutex);
1267
1268 pos = NULL;
1269 while ((pos = kernfs_next_descendant_post(pos, kn))) {
1270 if (pos->flags & KERNFS_ACTIVATED)
1271 continue;
1272
1273 WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb));
1274 WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS);
1275
1276 atomic_sub(KN_DEACTIVATED_BIAS, &pos->active);
1277 pos->flags |= KERNFS_ACTIVATED;
1278 }
1279
1280 mutex_unlock(&kernfs_mutex);
1281}
1282
1283static void __kernfs_remove(struct kernfs_node *kn)
1284{
1285 struct kernfs_node *pos;
1286
1287 lockdep_assert_held(&kernfs_mutex);
1288
1289
1290
1291
1292
1293
1294 if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb)))
1295 return;
1296
1297 pr_debug("kernfs %s: removing\n", kn->name);
1298
1299
1300 pos = NULL;
1301 while ((pos = kernfs_next_descendant_post(pos, kn)))
1302 if (kernfs_active(pos))
1303 atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
1304
1305
1306 do {
1307 pos = kernfs_leftmost_descendant(kn);
1308
1309
1310
1311
1312
1313
1314
1315 kernfs_get(pos);
1316
1317
1318
1319
1320
1321
1322
1323 if (kn->flags & KERNFS_ACTIVATED)
1324 kernfs_drain(pos);
1325 else
1326 WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
1327
1328
1329
1330
1331
1332 if (!pos->parent || kernfs_unlink_sibling(pos)) {
1333 struct kernfs_iattrs *ps_iattr =
1334 pos->parent ? pos->parent->iattr : NULL;
1335
1336
1337 if (ps_iattr) {
1338 ktime_get_real_ts64(&ps_iattr->ia_ctime);
1339 ps_iattr->ia_mtime = ps_iattr->ia_ctime;
1340 }
1341
1342 kernfs_put(pos);
1343 }
1344
1345 kernfs_put(pos);
1346 } while (pos != kn);
1347}
1348
1349
1350
1351
1352
1353
1354
1355void kernfs_remove(struct kernfs_node *kn)
1356{
1357 mutex_lock(&kernfs_mutex);
1358 __kernfs_remove(kn);
1359 mutex_unlock(&kernfs_mutex);
1360}
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376void kernfs_break_active_protection(struct kernfs_node *kn)
1377{
1378
1379
1380
1381
1382 kernfs_put_active(kn);
1383}
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400void kernfs_unbreak_active_protection(struct kernfs_node *kn)
1401{
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411 atomic_inc(&kn->active);
1412 if (kernfs_lockdep(kn))
1413 rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_);
1414}
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442bool kernfs_remove_self(struct kernfs_node *kn)
1443{
1444 bool ret;
1445
1446 mutex_lock(&kernfs_mutex);
1447 kernfs_break_active_protection(kn);
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458 if (!(kn->flags & KERNFS_SUICIDAL)) {
1459 kn->flags |= KERNFS_SUICIDAL;
1460 __kernfs_remove(kn);
1461 kn->flags |= KERNFS_SUICIDED;
1462 ret = true;
1463 } else {
1464 wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq;
1465 DEFINE_WAIT(wait);
1466
1467 while (true) {
1468 prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE);
1469
1470 if ((kn->flags & KERNFS_SUICIDED) &&
1471 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
1472 break;
1473
1474 mutex_unlock(&kernfs_mutex);
1475 schedule();
1476 mutex_lock(&kernfs_mutex);
1477 }
1478 finish_wait(waitq, &wait);
1479 WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
1480 ret = false;
1481 }
1482
1483
1484
1485
1486
1487 kernfs_unbreak_active_protection(kn);
1488
1489 mutex_unlock(&kernfs_mutex);
1490 return ret;
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
1503 const void *ns)
1504{
1505 struct kernfs_node *kn;
1506
1507 if (!parent) {
1508 WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
1509 name);
1510 return -ENOENT;
1511 }
1512
1513 mutex_lock(&kernfs_mutex);
1514
1515 kn = kernfs_find_ns(parent, name, ns);
1516 if (kn)
1517 __kernfs_remove(kn);
1518
1519 mutex_unlock(&kernfs_mutex);
1520
1521 if (kn)
1522 return 0;
1523 else
1524 return -ENOENT;
1525}
1526
1527
1528
1529
1530
1531
1532
1533
1534int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
1535 const char *new_name, const void *new_ns)
1536{
1537 struct kernfs_node *old_parent;
1538 const char *old_name = NULL;
1539 int error;
1540
1541
1542 if (!kn->parent)
1543 return -EINVAL;
1544
1545 mutex_lock(&kernfs_mutex);
1546
1547 error = -ENOENT;
1548 if (!kernfs_active(kn) || !kernfs_active(new_parent) ||
1549 (new_parent->flags & KERNFS_EMPTY_DIR))
1550 goto out;
1551
1552 error = 0;
1553 if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
1554 (strcmp(kn->name, new_name) == 0))
1555 goto out;
1556
1557 error = -EEXIST;
1558 if (kernfs_find_ns(new_parent, new_name, new_ns))
1559 goto out;
1560
1561
1562 if (strcmp(kn->name, new_name) != 0) {
1563 error = -ENOMEM;
1564 new_name = kstrdup_const(new_name, GFP_KERNEL);
1565 if (!new_name)
1566 goto out;
1567 } else {
1568 new_name = NULL;
1569 }
1570
1571
1572
1573
1574 kernfs_unlink_sibling(kn);
1575 kernfs_get(new_parent);
1576
1577
1578 spin_lock_irq(&kernfs_rename_lock);
1579
1580 old_parent = kn->parent;
1581 kn->parent = new_parent;
1582
1583 kn->ns = new_ns;
1584 if (new_name) {
1585 old_name = kn->name;
1586 kn->name = new_name;
1587 }
1588
1589 spin_unlock_irq(&kernfs_rename_lock);
1590
1591 kn->hash = kernfs_name_hash(kn->name, kn->ns);
1592 kernfs_link_sibling(kn);
1593
1594 kernfs_put(old_parent);
1595 kfree_const(old_name);
1596
1597 error = 0;
1598 out:
1599 mutex_unlock(&kernfs_mutex);
1600 return error;
1601}
1602
1603
1604static inline unsigned char dt_type(struct kernfs_node *kn)
1605{
1606 return (kn->mode >> 12) & 15;
1607}
1608
1609static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
1610{
1611 kernfs_put(filp->private_data);
1612 return 0;
1613}
1614
1615static struct kernfs_node *kernfs_dir_pos(const void *ns,
1616 struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos)
1617{
1618 if (pos) {
1619 int valid = kernfs_active(pos) &&
1620 pos->parent == parent && hash == pos->hash;
1621 kernfs_put(pos);
1622 if (!valid)
1623 pos = NULL;
1624 }
1625 if (!pos && (hash > 1) && (hash < INT_MAX)) {
1626 struct rb_node *node = parent->dir.children.rb_node;
1627 while (node) {
1628 pos = rb_to_kn(node);
1629
1630 if (hash < pos->hash)
1631 node = node->rb_left;
1632 else if (hash > pos->hash)
1633 node = node->rb_right;
1634 else
1635 break;
1636 }
1637 }
1638
1639 while (pos && (!kernfs_active(pos) || pos->ns != ns)) {
1640 struct rb_node *node = rb_next(&pos->rb);
1641 if (!node)
1642 pos = NULL;
1643 else
1644 pos = rb_to_kn(node);
1645 }
1646 return pos;
1647}
1648
1649static struct kernfs_node *kernfs_dir_next_pos(const void *ns,
1650 struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos)
1651{
1652 pos = kernfs_dir_pos(ns, parent, ino, pos);
1653 if (pos) {
1654 do {
1655 struct rb_node *node = rb_next(&pos->rb);
1656 if (!node)
1657 pos = NULL;
1658 else
1659 pos = rb_to_kn(node);
1660 } while (pos && (!kernfs_active(pos) || pos->ns != ns));
1661 }
1662 return pos;
1663}
1664
1665static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
1666{
1667 struct dentry *dentry = file->f_path.dentry;
1668 struct kernfs_node *parent = kernfs_dentry_node(dentry);
1669 struct kernfs_node *pos = file->private_data;
1670 const void *ns = NULL;
1671
1672 if (!dir_emit_dots(file, ctx))
1673 return 0;
1674 mutex_lock(&kernfs_mutex);
1675
1676 if (kernfs_ns_enabled(parent))
1677 ns = kernfs_info(dentry->d_sb)->ns;
1678
1679 for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
1680 pos;
1681 pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
1682 const char *name = pos->name;
1683 unsigned int type = dt_type(pos);
1684 int len = strlen(name);
1685 ino_t ino = kernfs_ino(pos);
1686
1687 ctx->pos = pos->hash;
1688 file->private_data = pos;
1689 kernfs_get(pos);
1690
1691 mutex_unlock(&kernfs_mutex);
1692 if (!dir_emit(ctx, name, len, ino, type))
1693 return 0;
1694 mutex_lock(&kernfs_mutex);
1695 }
1696 mutex_unlock(&kernfs_mutex);
1697 file->private_data = NULL;
1698 ctx->pos = INT_MAX;
1699 return 0;
1700}
1701
1702const struct file_operations kernfs_dir_fops = {
1703 .read = generic_read_dir,
1704 .iterate_shared = kernfs_fop_readdir,
1705 .release = kernfs_dir_fop_release,
1706 .llseek = generic_file_llseek,
1707};
1708