1
2
3
4
5
6
7
8
9
10#include <linux/sched.h>
11#include <linux/fs.h>
12#include <linux/namei.h>
13#include <linux/idr.h>
14#include <linux/slab.h>
15#include <linux/security.h>
16#include <linux/hash.h>
17
18#include "kernfs-internal.h"
19
20DEFINE_MUTEX(kernfs_mutex);
21static DEFINE_SPINLOCK(kernfs_rename_lock);
22static char kernfs_pr_cont_buf[PATH_MAX];
23static DEFINE_SPINLOCK(kernfs_idr_lock);
24
25#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
26
27static bool kernfs_active(struct kernfs_node *kn)
28{
29 lockdep_assert_held(&kernfs_mutex);
30 return atomic_read(&kn->active) >= 0;
31}
32
33static bool kernfs_lockdep(struct kernfs_node *kn)
34{
35#ifdef CONFIG_DEBUG_LOCK_ALLOC
36 return kn->flags & KERNFS_LOCKDEP;
37#else
38 return false;
39#endif
40}
41
42static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen)
43{
44 if (!kn)
45 return strlcpy(buf, "(null)", buflen);
46
47 return strlcpy(buf, kn->parent ? kn->name : "/", buflen);
48}
49
50
51static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
52{
53 size_t depth = 0;
54
55 while (to->parent && to != from) {
56 depth++;
57 to = to->parent;
58 }
59 return depth;
60}
61
62static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
63 struct kernfs_node *b)
64{
65 size_t da, db;
66 struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b);
67
68 if (ra != rb)
69 return NULL;
70
71 da = kernfs_depth(ra->kn, a);
72 db = kernfs_depth(rb->kn, b);
73
74 while (da > db) {
75 a = a->parent;
76 da--;
77 }
78 while (db > da) {
79 b = b->parent;
80 db--;
81 }
82
83
84 while (b != a) {
85 b = b->parent;
86 a = a->parent;
87 }
88
89 return a;
90}
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
123 struct kernfs_node *kn_from,
124 char *buf, size_t buflen)
125{
126 struct kernfs_node *kn, *common;
127 const char parent_str[] = "/..";
128 size_t depth_from, depth_to, len = 0;
129 int i, j;
130
131 if (!kn_to)
132 return strlcpy(buf, "(null)", buflen);
133
134 if (!kn_from)
135 kn_from = kernfs_root(kn_to)->kn;
136
137 if (kn_from == kn_to)
138 return strlcpy(buf, "/", buflen);
139
140 if (!buf)
141 return -EINVAL;
142
143 common = kernfs_common_ancestor(kn_from, kn_to);
144 if (WARN_ON(!common))
145 return -EINVAL;
146
147 depth_to = kernfs_depth(common, kn_to);
148 depth_from = kernfs_depth(common, kn_from);
149
150 buf[0] = '\0';
151
152 for (i = 0; i < depth_from; i++)
153 len += strlcpy(buf + len, parent_str,
154 len < buflen ? buflen - len : 0);
155
156
157 for (i = depth_to - 1; i >= 0; i--) {
158 for (kn = kn_to, j = 0; j < i; j++)
159 kn = kn->parent;
160 len += strlcpy(buf + len, "/",
161 len < buflen ? buflen - len : 0);
162 len += strlcpy(buf + len, kn->name,
163 len < buflen ? buflen - len : 0);
164 }
165
166 return len;
167}
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
184{
185 unsigned long flags;
186 int ret;
187
188 spin_lock_irqsave(&kernfs_rename_lock, flags);
189 ret = kernfs_name_locked(kn, buf, buflen);
190 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
191 return ret;
192}
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
211 char *buf, size_t buflen)
212{
213 unsigned long flags;
214 int ret;
215
216 spin_lock_irqsave(&kernfs_rename_lock, flags);
217 ret = kernfs_path_from_node_locked(to, from, buf, buflen);
218 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
219 return ret;
220}
221EXPORT_SYMBOL_GPL(kernfs_path_from_node);
222
223
224
225
226
227
228
229void pr_cont_kernfs_name(struct kernfs_node *kn)
230{
231 unsigned long flags;
232
233 spin_lock_irqsave(&kernfs_rename_lock, flags);
234
235 kernfs_name_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
236 pr_cont("%s", kernfs_pr_cont_buf);
237
238 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
239}
240
241
242
243
244
245
246
247void pr_cont_kernfs_path(struct kernfs_node *kn)
248{
249 unsigned long flags;
250 int sz;
251
252 spin_lock_irqsave(&kernfs_rename_lock, flags);
253
254 sz = kernfs_path_from_node_locked(kn, NULL, kernfs_pr_cont_buf,
255 sizeof(kernfs_pr_cont_buf));
256 if (sz < 0) {
257 pr_cont("(error)");
258 goto out;
259 }
260
261 if (sz >= sizeof(kernfs_pr_cont_buf)) {
262 pr_cont("(name too long)");
263 goto out;
264 }
265
266 pr_cont("%s", kernfs_pr_cont_buf);
267
268out:
269 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
270}
271
272
273
274
275
276
277
278
279struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
280{
281 struct kernfs_node *parent;
282 unsigned long flags;
283
284 spin_lock_irqsave(&kernfs_rename_lock, flags);
285 parent = kn->parent;
286 kernfs_get(parent);
287 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
288
289 return parent;
290}
291
292
293
294
295
296
297
298
299static unsigned int kernfs_name_hash(const char *name, const void *ns)
300{
301 unsigned long hash = init_name_hash(ns);
302 unsigned int len = strlen(name);
303 while (len--)
304 hash = partial_name_hash(*name++, hash);
305 hash = end_name_hash(hash);
306 hash &= 0x7fffffffU;
307
308 if (hash < 2)
309 hash += 2;
310 if (hash >= INT_MAX)
311 hash = INT_MAX - 1;
312 return hash;
313}
314
315static int kernfs_name_compare(unsigned int hash, const char *name,
316 const void *ns, const struct kernfs_node *kn)
317{
318 if (hash < kn->hash)
319 return -1;
320 if (hash > kn->hash)
321 return 1;
322 if (ns < kn->ns)
323 return -1;
324 if (ns > kn->ns)
325 return 1;
326 return strcmp(name, kn->name);
327}
328
329static int kernfs_sd_compare(const struct kernfs_node *left,
330 const struct kernfs_node *right)
331{
332 return kernfs_name_compare(left->hash, left->name, left->ns, right);
333}
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348static int kernfs_link_sibling(struct kernfs_node *kn)
349{
350 struct rb_node **node = &kn->parent->dir.children.rb_node;
351 struct rb_node *parent = NULL;
352
353 while (*node) {
354 struct kernfs_node *pos;
355 int result;
356
357 pos = rb_to_kn(*node);
358 parent = *node;
359 result = kernfs_sd_compare(kn, pos);
360 if (result < 0)
361 node = &pos->rb.rb_left;
362 else if (result > 0)
363 node = &pos->rb.rb_right;
364 else
365 return -EEXIST;
366 }
367
368
369 rb_link_node(&kn->rb, parent, node);
370 rb_insert_color(&kn->rb, &kn->parent->dir.children);
371
372
373 if (kernfs_type(kn) == KERNFS_DIR)
374 kn->parent->dir.subdirs++;
375
376 return 0;
377}
378
379
380
381
382
383
384
385
386
387
388
389
390static bool kernfs_unlink_sibling(struct kernfs_node *kn)
391{
392 if (RB_EMPTY_NODE(&kn->rb))
393 return false;
394
395 if (kernfs_type(kn) == KERNFS_DIR)
396 kn->parent->dir.subdirs--;
397
398 rb_erase(&kn->rb, &kn->parent->dir.children);
399 RB_CLEAR_NODE(&kn->rb);
400 return true;
401}
402
403
404
405
406
407
408
409
410
411
412
413struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
414{
415 if (unlikely(!kn))
416 return NULL;
417
418 if (!atomic_inc_unless_negative(&kn->active))
419 return NULL;
420
421 if (kernfs_lockdep(kn))
422 rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
423 return kn;
424}
425
426
427
428
429
430
431
432
433void kernfs_put_active(struct kernfs_node *kn)
434{
435 int v;
436
437 if (unlikely(!kn))
438 return;
439
440 if (kernfs_lockdep(kn))
441 rwsem_release(&kn->dep_map, _RET_IP_);
442 v = atomic_dec_return(&kn->active);
443 if (likely(v != KN_DEACTIVATED_BIAS))
444 return;
445
446 wake_up_all(&kernfs_root(kn)->deactivate_waitq);
447}
448
449
450
451
452
453
454
455
456
457static void kernfs_drain(struct kernfs_node *kn)
458 __releases(&kernfs_mutex) __acquires(&kernfs_mutex)
459{
460 struct kernfs_root *root = kernfs_root(kn);
461
462 lockdep_assert_held(&kernfs_mutex);
463 WARN_ON_ONCE(kernfs_active(kn));
464
465 mutex_unlock(&kernfs_mutex);
466
467 if (kernfs_lockdep(kn)) {
468 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
469 if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
470 lock_contended(&kn->dep_map, _RET_IP_);
471 }
472
473
474 wait_event(root->deactivate_waitq,
475 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
476
477 if (kernfs_lockdep(kn)) {
478 lock_acquired(&kn->dep_map, _RET_IP_);
479 rwsem_release(&kn->dep_map, _RET_IP_);
480 }
481
482 kernfs_drain_open_files(kn);
483
484 mutex_lock(&kernfs_mutex);
485}
486
487
488
489
490
491void kernfs_get(struct kernfs_node *kn)
492{
493 if (kn) {
494 WARN_ON(!atomic_read(&kn->count));
495 atomic_inc(&kn->count);
496 }
497}
498EXPORT_SYMBOL_GPL(kernfs_get);
499
500
501
502
503
504
505
506void kernfs_put(struct kernfs_node *kn)
507{
508 struct kernfs_node *parent;
509 struct kernfs_root *root;
510
511 if (!kn || !atomic_dec_and_test(&kn->count))
512 return;
513 root = kernfs_root(kn);
514 repeat:
515
516
517
518
519 parent = kn->parent;
520
521 WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
522 "kernfs_put: %s/%s: released with incorrect active_ref %d\n",
523 parent ? parent->name : "", kn->name, atomic_read(&kn->active));
524
525 if (kernfs_type(kn) == KERNFS_LINK)
526 kernfs_put(kn->symlink.target_kn);
527
528 kfree_const(kn->name);
529
530 if (kn->iattr) {
531 simple_xattrs_free(&kn->iattr->xattrs);
532 kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
533 }
534 spin_lock(&kernfs_idr_lock);
535 idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
536 spin_unlock(&kernfs_idr_lock);
537 kmem_cache_free(kernfs_node_cache, kn);
538
539 kn = parent;
540 if (kn) {
541 if (atomic_dec_and_test(&kn->count))
542 goto repeat;
543 } else {
544
545 idr_destroy(&root->ino_idr);
546 kfree(root);
547 }
548}
549EXPORT_SYMBOL_GPL(kernfs_put);
550
551
552
553
554
555
556
557
558
559
560
561
562struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
563{
564 if (dentry->d_sb->s_op == &kernfs_sops)
565 return kernfs_dentry_node(dentry);
566 return NULL;
567}
568
569static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
570 struct kernfs_node *parent,
571 const char *name, umode_t mode,
572 kuid_t uid, kgid_t gid,
573 unsigned flags)
574{
575 struct kernfs_node *kn;
576 u32 id_highbits;
577 int ret;
578
579 name = kstrdup_const(name, GFP_KERNEL);
580 if (!name)
581 return NULL;
582
583 kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
584 if (!kn)
585 goto err_out1;
586
587 idr_preload(GFP_KERNEL);
588 spin_lock(&kernfs_idr_lock);
589 ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC);
590 if (ret >= 0 && ret < root->last_id_lowbits)
591 root->id_highbits++;
592 id_highbits = root->id_highbits;
593 root->last_id_lowbits = ret;
594 spin_unlock(&kernfs_idr_lock);
595 idr_preload_end();
596 if (ret < 0)
597 goto err_out2;
598
599 kn->id = (u64)id_highbits << 32 | ret;
600
601 atomic_set(&kn->count, 1);
602 atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
603 RB_CLEAR_NODE(&kn->rb);
604
605 kn->name = name;
606 kn->mode = mode;
607 kn->flags = flags;
608
609 if (!uid_eq(uid, GLOBAL_ROOT_UID) || !gid_eq(gid, GLOBAL_ROOT_GID)) {
610 struct iattr iattr = {
611 .ia_valid = ATTR_UID | ATTR_GID,
612 .ia_uid = uid,
613 .ia_gid = gid,
614 };
615
616 ret = __kernfs_setattr(kn, &iattr);
617 if (ret < 0)
618 goto err_out3;
619 }
620
621 if (parent) {
622 ret = security_kernfs_init_security(parent, kn);
623 if (ret)
624 goto err_out3;
625 }
626
627 return kn;
628
629 err_out3:
630 idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
631 err_out2:
632 kmem_cache_free(kernfs_node_cache, kn);
633 err_out1:
634 kfree_const(name);
635 return NULL;
636}
637
638struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
639 const char *name, umode_t mode,
640 kuid_t uid, kgid_t gid,
641 unsigned flags)
642{
643 struct kernfs_node *kn;
644
645 kn = __kernfs_new_node(kernfs_root(parent), parent,
646 name, mode, uid, gid, flags);
647 if (kn) {
648 kernfs_get(parent);
649 kn->parent = parent;
650 }
651 return kn;
652}
653
654
655
656
657
658
659
660
661
662
663
664
665struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
666 u64 id)
667{
668 struct kernfs_node *kn;
669 ino_t ino = kernfs_id_ino(id);
670 u32 gen = kernfs_id_gen(id);
671
672 spin_lock(&kernfs_idr_lock);
673
674 kn = idr_find(&root->ino_idr, (u32)ino);
675 if (!kn)
676 goto err_unlock;
677
678 if (sizeof(ino_t) >= sizeof(u64)) {
679
680 if (kernfs_ino(kn) != ino)
681 goto err_unlock;
682 } else {
683
684 if (unlikely(gen && kernfs_gen(kn) != gen))
685 goto err_unlock;
686 }
687
688
689
690
691
692
693 if (unlikely(!(kn->flags & KERNFS_ACTIVATED) ||
694 !atomic_inc_not_zero(&kn->count)))
695 goto err_unlock;
696
697 spin_unlock(&kernfs_idr_lock);
698 return kn;
699err_unlock:
700 spin_unlock(&kernfs_idr_lock);
701 return NULL;
702}
703
704
705
706
707
708
709
710
711
712
713
714
715
716int kernfs_add_one(struct kernfs_node *kn)
717{
718 struct kernfs_node *parent = kn->parent;
719 struct kernfs_iattrs *ps_iattr;
720 bool has_ns;
721 int ret;
722
723 mutex_lock(&kernfs_mutex);
724
725 ret = -EINVAL;
726 has_ns = kernfs_ns_enabled(parent);
727 if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
728 has_ns ? "required" : "invalid", parent->name, kn->name))
729 goto out_unlock;
730
731 if (kernfs_type(parent) != KERNFS_DIR)
732 goto out_unlock;
733
734 ret = -ENOENT;
735 if (parent->flags & KERNFS_EMPTY_DIR)
736 goto out_unlock;
737
738 if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent))
739 goto out_unlock;
740
741 kn->hash = kernfs_name_hash(kn->name, kn->ns);
742
743 ret = kernfs_link_sibling(kn);
744 if (ret)
745 goto out_unlock;
746
747
748 ps_iattr = parent->iattr;
749 if (ps_iattr) {
750 ktime_get_real_ts64(&ps_iattr->ia_ctime);
751 ps_iattr->ia_mtime = ps_iattr->ia_ctime;
752 }
753
754 mutex_unlock(&kernfs_mutex);
755
756
757
758
759
760
761
762
763 if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
764 kernfs_activate(kn);
765 return 0;
766
767out_unlock:
768 mutex_unlock(&kernfs_mutex);
769 return ret;
770}
771
772
773
774
775
776
777
778
779
780
781static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
782 const unsigned char *name,
783 const void *ns)
784{
785 struct rb_node *node = parent->dir.children.rb_node;
786 bool has_ns = kernfs_ns_enabled(parent);
787 unsigned int hash;
788
789 lockdep_assert_held(&kernfs_mutex);
790
791 if (has_ns != (bool)ns) {
792 WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
793 has_ns ? "required" : "invalid", parent->name, name);
794 return NULL;
795 }
796
797 hash = kernfs_name_hash(name, ns);
798 while (node) {
799 struct kernfs_node *kn;
800 int result;
801
802 kn = rb_to_kn(node);
803 result = kernfs_name_compare(hash, name, ns, kn);
804 if (result < 0)
805 node = node->rb_left;
806 else if (result > 0)
807 node = node->rb_right;
808 else
809 return kn;
810 }
811 return NULL;
812}
813
814static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
815 const unsigned char *path,
816 const void *ns)
817{
818 size_t len;
819 char *p, *name;
820
821 lockdep_assert_held(&kernfs_mutex);
822
823
824 spin_lock_irq(&kernfs_rename_lock);
825
826 len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
827
828 if (len >= sizeof(kernfs_pr_cont_buf)) {
829 spin_unlock_irq(&kernfs_rename_lock);
830 return NULL;
831 }
832
833 p = kernfs_pr_cont_buf;
834
835 while ((name = strsep(&p, "/")) && parent) {
836 if (*name == '\0')
837 continue;
838 parent = kernfs_find_ns(parent, name, ns);
839 }
840
841 spin_unlock_irq(&kernfs_rename_lock);
842
843 return parent;
844}
845
846
847
848
849
850
851
852
853
854
855
856struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
857 const char *name, const void *ns)
858{
859 struct kernfs_node *kn;
860
861 mutex_lock(&kernfs_mutex);
862 kn = kernfs_find_ns(parent, name, ns);
863 kernfs_get(kn);
864 mutex_unlock(&kernfs_mutex);
865
866 return kn;
867}
868EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
869
870
871
872
873
874
875
876
877
878
879
880struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
881 const char *path, const void *ns)
882{
883 struct kernfs_node *kn;
884
885 mutex_lock(&kernfs_mutex);
886 kn = kernfs_walk_ns(parent, path, ns);
887 kernfs_get(kn);
888 mutex_unlock(&kernfs_mutex);
889
890 return kn;
891}
892
893
894
895
896
897
898
899
900
901
902struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
903 unsigned int flags, void *priv)
904{
905 struct kernfs_root *root;
906 struct kernfs_node *kn;
907
908 root = kzalloc(sizeof(*root), GFP_KERNEL);
909 if (!root)
910 return ERR_PTR(-ENOMEM);
911
912 idr_init(&root->ino_idr);
913 INIT_LIST_HEAD(&root->supers);
914
915
916
917
918
919
920
921 if (sizeof(ino_t) >= sizeof(u64))
922 root->id_highbits = 0;
923 else
924 root->id_highbits = 1;
925
926 kn = __kernfs_new_node(root, NULL, "", S_IFDIR | S_IRUGO | S_IXUGO,
927 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
928 KERNFS_DIR);
929 if (!kn) {
930 idr_destroy(&root->ino_idr);
931 kfree(root);
932 return ERR_PTR(-ENOMEM);
933 }
934
935 kn->priv = priv;
936 kn->dir.root = root;
937
938 root->syscall_ops = scops;
939 root->flags = flags;
940 root->kn = kn;
941 init_waitqueue_head(&root->deactivate_waitq);
942
943 if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
944 kernfs_activate(kn);
945
946 return root;
947}
948
949
950
951
952
953
954
955
956void kernfs_destroy_root(struct kernfs_root *root)
957{
958 kernfs_remove(root->kn);
959}
960
961
962
963
964
965
966
967
968
969
970
971
972
973struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
974 const char *name, umode_t mode,
975 kuid_t uid, kgid_t gid,
976 void *priv, const void *ns)
977{
978 struct kernfs_node *kn;
979 int rc;
980
981
982 kn = kernfs_new_node(parent, name, mode | S_IFDIR,
983 uid, gid, KERNFS_DIR);
984 if (!kn)
985 return ERR_PTR(-ENOMEM);
986
987 kn->dir.root = parent->dir.root;
988 kn->ns = ns;
989 kn->priv = priv;
990
991
992 rc = kernfs_add_one(kn);
993 if (!rc)
994 return kn;
995
996 kernfs_put(kn);
997 return ERR_PTR(rc);
998}
999
1000
1001
1002
1003
1004
1005
1006
1007struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
1008 const char *name)
1009{
1010 struct kernfs_node *kn;
1011 int rc;
1012
1013
1014 kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR,
1015 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR);
1016 if (!kn)
1017 return ERR_PTR(-ENOMEM);
1018
1019 kn->flags |= KERNFS_EMPTY_DIR;
1020 kn->dir.root = parent->dir.root;
1021 kn->ns = NULL;
1022 kn->priv = NULL;
1023
1024
1025 rc = kernfs_add_one(kn);
1026 if (!rc)
1027 return kn;
1028
1029 kernfs_put(kn);
1030 return ERR_PTR(rc);
1031}
1032
1033static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
1034{
1035 struct kernfs_node *kn;
1036
1037 if (flags & LOOKUP_RCU)
1038 return -ECHILD;
1039
1040
1041 if (d_really_is_negative(dentry))
1042 goto out_bad_unlocked;
1043
1044 kn = kernfs_dentry_node(dentry);
1045 mutex_lock(&kernfs_mutex);
1046
1047
1048 if (!kernfs_active(kn))
1049 goto out_bad;
1050
1051
1052 if (kernfs_dentry_node(dentry->d_parent) != kn->parent)
1053 goto out_bad;
1054
1055
1056 if (strcmp(dentry->d_name.name, kn->name) != 0)
1057 goto out_bad;
1058
1059
1060 if (kn->parent && kernfs_ns_enabled(kn->parent) &&
1061 kernfs_info(dentry->d_sb)->ns != kn->ns)
1062 goto out_bad;
1063
1064 mutex_unlock(&kernfs_mutex);
1065 return 1;
1066out_bad:
1067 mutex_unlock(&kernfs_mutex);
1068out_bad_unlocked:
1069 return 0;
1070}
1071
1072const struct dentry_operations kernfs_dops = {
1073 .d_revalidate = kernfs_dop_revalidate,
1074};
1075
1076static struct dentry *kernfs_iop_lookup(struct inode *dir,
1077 struct dentry *dentry,
1078 unsigned int flags)
1079{
1080 struct dentry *ret;
1081 struct kernfs_node *parent = dir->i_private;
1082 struct kernfs_node *kn;
1083 struct inode *inode;
1084 const void *ns = NULL;
1085
1086 mutex_lock(&kernfs_mutex);
1087
1088 if (kernfs_ns_enabled(parent))
1089 ns = kernfs_info(dir->i_sb)->ns;
1090
1091 kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
1092
1093
1094 if (!kn || !kernfs_active(kn)) {
1095 ret = NULL;
1096 goto out_unlock;
1097 }
1098
1099
1100 inode = kernfs_get_inode(dir->i_sb, kn);
1101 if (!inode) {
1102 ret = ERR_PTR(-ENOMEM);
1103 goto out_unlock;
1104 }
1105
1106
1107 ret = d_splice_alias(inode, dentry);
1108 out_unlock:
1109 mutex_unlock(&kernfs_mutex);
1110 return ret;
1111}
1112
1113static int kernfs_iop_mkdir(struct user_namespace *mnt_userns,
1114 struct inode *dir, struct dentry *dentry,
1115 umode_t mode)
1116{
1117 struct kernfs_node *parent = dir->i_private;
1118 struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops;
1119 int ret;
1120
1121 if (!scops || !scops->mkdir)
1122 return -EPERM;
1123
1124 if (!kernfs_get_active(parent))
1125 return -ENODEV;
1126
1127 ret = scops->mkdir(parent, dentry->d_name.name, mode);
1128
1129 kernfs_put_active(parent);
1130 return ret;
1131}
1132
1133static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
1134{
1135 struct kernfs_node *kn = kernfs_dentry_node(dentry);
1136 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
1137 int ret;
1138
1139 if (!scops || !scops->rmdir)
1140 return -EPERM;
1141
1142 if (!kernfs_get_active(kn))
1143 return -ENODEV;
1144
1145 ret = scops->rmdir(kn);
1146
1147 kernfs_put_active(kn);
1148 return ret;
1149}
1150
1151static int kernfs_iop_rename(struct user_namespace *mnt_userns,
1152 struct inode *old_dir, struct dentry *old_dentry,
1153 struct inode *new_dir, struct dentry *new_dentry,
1154 unsigned int flags)
1155{
1156 struct kernfs_node *kn = kernfs_dentry_node(old_dentry);
1157 struct kernfs_node *new_parent = new_dir->i_private;
1158 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
1159 int ret;
1160
1161 if (flags)
1162 return -EINVAL;
1163
1164 if (!scops || !scops->rename)
1165 return -EPERM;
1166
1167 if (!kernfs_get_active(kn))
1168 return -ENODEV;
1169
1170 if (!kernfs_get_active(new_parent)) {
1171 kernfs_put_active(kn);
1172 return -ENODEV;
1173 }
1174
1175 ret = scops->rename(kn, new_parent, new_dentry->d_name.name);
1176
1177 kernfs_put_active(new_parent);
1178 kernfs_put_active(kn);
1179 return ret;
1180}
1181
1182const struct inode_operations kernfs_dir_iops = {
1183 .lookup = kernfs_iop_lookup,
1184 .permission = kernfs_iop_permission,
1185 .setattr = kernfs_iop_setattr,
1186 .getattr = kernfs_iop_getattr,
1187 .listxattr = kernfs_iop_listxattr,
1188
1189 .mkdir = kernfs_iop_mkdir,
1190 .rmdir = kernfs_iop_rmdir,
1191 .rename = kernfs_iop_rename,
1192};
1193
1194static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
1195{
1196 struct kernfs_node *last;
1197
1198 while (true) {
1199 struct rb_node *rbn;
1200
1201 last = pos;
1202
1203 if (kernfs_type(pos) != KERNFS_DIR)
1204 break;
1205
1206 rbn = rb_first(&pos->dir.children);
1207 if (!rbn)
1208 break;
1209
1210 pos = rb_to_kn(rbn);
1211 }
1212
1213 return last;
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
1226 struct kernfs_node *root)
1227{
1228 struct rb_node *rbn;
1229
1230 lockdep_assert_held(&kernfs_mutex);
1231
1232
1233 if (!pos)
1234 return kernfs_leftmost_descendant(root);
1235
1236
1237 if (pos == root)
1238 return NULL;
1239
1240
1241 rbn = rb_next(&pos->rb);
1242 if (rbn)
1243 return kernfs_leftmost_descendant(rb_to_kn(rbn));
1244
1245
1246 return pos->parent;
1247}
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262void kernfs_activate(struct kernfs_node *kn)
1263{
1264 struct kernfs_node *pos;
1265
1266 mutex_lock(&kernfs_mutex);
1267
1268 pos = NULL;
1269 while ((pos = kernfs_next_descendant_post(pos, kn))) {
1270 if (pos->flags & KERNFS_ACTIVATED)
1271 continue;
1272
1273 WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb));
1274 WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS);
1275
1276 atomic_sub(KN_DEACTIVATED_BIAS, &pos->active);
1277 pos->flags |= KERNFS_ACTIVATED;
1278 }
1279
1280 mutex_unlock(&kernfs_mutex);
1281}
1282
1283static void __kernfs_remove(struct kernfs_node *kn)
1284{
1285 struct kernfs_node *pos;
1286
1287 lockdep_assert_held(&kernfs_mutex);
1288
1289
1290
1291
1292
1293
1294 if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb)))
1295 return;
1296
1297 pr_debug("kernfs %s: removing\n", kn->name);
1298
1299
1300 pos = NULL;
1301 while ((pos = kernfs_next_descendant_post(pos, kn)))
1302 if (kernfs_active(pos))
1303 atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
1304
1305
1306 do {
1307 pos = kernfs_leftmost_descendant(kn);
1308
1309
1310
1311
1312
1313
1314
1315 kernfs_get(pos);
1316
1317
1318
1319
1320
1321
1322
1323 if (kn->flags & KERNFS_ACTIVATED)
1324 kernfs_drain(pos);
1325 else
1326 WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
1327
1328
1329
1330
1331
1332 if (!pos->parent || kernfs_unlink_sibling(pos)) {
1333 struct kernfs_iattrs *ps_iattr =
1334 pos->parent ? pos->parent->iattr : NULL;
1335
1336
1337 if (ps_iattr) {
1338 ktime_get_real_ts64(&ps_iattr->ia_ctime);
1339 ps_iattr->ia_mtime = ps_iattr->ia_ctime;
1340 }
1341
1342 kernfs_put(pos);
1343 }
1344
1345 kernfs_put(pos);
1346 } while (pos != kn);
1347}
1348
1349
1350
1351
1352
1353
1354
1355void kernfs_remove(struct kernfs_node *kn)
1356{
1357 mutex_lock(&kernfs_mutex);
1358 __kernfs_remove(kn);
1359 mutex_unlock(&kernfs_mutex);
1360}
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376void kernfs_break_active_protection(struct kernfs_node *kn)
1377{
1378
1379
1380
1381
1382 kernfs_put_active(kn);
1383}
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400void kernfs_unbreak_active_protection(struct kernfs_node *kn)
1401{
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411 atomic_inc(&kn->active);
1412 if (kernfs_lockdep(kn))
1413 rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_);
1414}
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442bool kernfs_remove_self(struct kernfs_node *kn)
1443{
1444 bool ret;
1445
1446 mutex_lock(&kernfs_mutex);
1447 kernfs_break_active_protection(kn);
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458 if (!(kn->flags & KERNFS_SUICIDAL)) {
1459 kn->flags |= KERNFS_SUICIDAL;
1460 __kernfs_remove(kn);
1461 kn->flags |= KERNFS_SUICIDED;
1462 ret = true;
1463 } else {
1464 wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq;
1465 DEFINE_WAIT(wait);
1466
1467 while (true) {
1468 prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE);
1469
1470 if ((kn->flags & KERNFS_SUICIDED) &&
1471 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
1472 break;
1473
1474 mutex_unlock(&kernfs_mutex);
1475 schedule();
1476 mutex_lock(&kernfs_mutex);
1477 }
1478 finish_wait(waitq, &wait);
1479 WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
1480 ret = false;
1481 }
1482
1483
1484
1485
1486
1487 kernfs_unbreak_active_protection(kn);
1488
1489 mutex_unlock(&kernfs_mutex);
1490 return ret;
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
1503 const void *ns)
1504{
1505 struct kernfs_node *kn;
1506
1507 if (!parent) {
1508 WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
1509 name);
1510 return -ENOENT;
1511 }
1512
1513 mutex_lock(&kernfs_mutex);
1514
1515 kn = kernfs_find_ns(parent, name, ns);
1516 if (kn)
1517 __kernfs_remove(kn);
1518
1519 mutex_unlock(&kernfs_mutex);
1520
1521 if (kn)
1522 return 0;
1523 else
1524 return -ENOENT;
1525}
1526
1527
1528
1529
1530
1531
1532
1533
1534int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
1535 const char *new_name, const void *new_ns)
1536{
1537 struct kernfs_node *old_parent;
1538 const char *old_name = NULL;
1539 int error;
1540
1541
1542 if (!kn->parent)
1543 return -EINVAL;
1544
1545 mutex_lock(&kernfs_mutex);
1546
1547 error = -ENOENT;
1548 if (!kernfs_active(kn) || !kernfs_active(new_parent) ||
1549 (new_parent->flags & KERNFS_EMPTY_DIR))
1550 goto out;
1551
1552 error = 0;
1553 if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
1554 (strcmp(kn->name, new_name) == 0))
1555 goto out;
1556
1557 error = -EEXIST;
1558 if (kernfs_find_ns(new_parent, new_name, new_ns))
1559 goto out;
1560
1561
1562 if (strcmp(kn->name, new_name) != 0) {
1563 error = -ENOMEM;
1564 new_name = kstrdup_const(new_name, GFP_KERNEL);
1565 if (!new_name)
1566 goto out;
1567 } else {
1568 new_name = NULL;
1569 }
1570
1571
1572
1573
1574 kernfs_unlink_sibling(kn);
1575 kernfs_get(new_parent);
1576
1577
1578 spin_lock_irq(&kernfs_rename_lock);
1579
1580 old_parent = kn->parent;
1581 kn->parent = new_parent;
1582
1583 kn->ns = new_ns;
1584 if (new_name) {
1585 old_name = kn->name;
1586 kn->name = new_name;
1587 }
1588
1589 spin_unlock_irq(&kernfs_rename_lock);
1590
1591 kn->hash = kernfs_name_hash(kn->name, kn->ns);
1592 kernfs_link_sibling(kn);
1593
1594 kernfs_put(old_parent);
1595 kfree_const(old_name);
1596
1597 error = 0;
1598 out:
1599 mutex_unlock(&kernfs_mutex);
1600 return error;
1601}
1602
1603
1604static inline unsigned char dt_type(struct kernfs_node *kn)
1605{
1606 return (kn->mode >> 12) & 15;
1607}
1608
1609static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
1610{
1611 kernfs_put(filp->private_data);
1612 return 0;
1613}
1614
1615static struct kernfs_node *kernfs_dir_pos(const void *ns,
1616 struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos)
1617{
1618 if (pos) {
1619 int valid = kernfs_active(pos) &&
1620 pos->parent == parent && hash == pos->hash;
1621 kernfs_put(pos);
1622 if (!valid)
1623 pos = NULL;
1624 }
1625 if (!pos && (hash > 1) && (hash < INT_MAX)) {
1626 struct rb_node *node = parent->dir.children.rb_node;
1627 while (node) {
1628 pos = rb_to_kn(node);
1629
1630 if (hash < pos->hash)
1631 node = node->rb_left;
1632 else if (hash > pos->hash)
1633 node = node->rb_right;
1634 else
1635 break;
1636 }
1637 }
1638
1639 while (pos && (!kernfs_active(pos) || pos->ns != ns)) {
1640 struct rb_node *node = rb_next(&pos->rb);
1641 if (!node)
1642 pos = NULL;
1643 else
1644 pos = rb_to_kn(node);
1645 }
1646 return pos;
1647}
1648
1649static struct kernfs_node *kernfs_dir_next_pos(const void *ns,
1650 struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos)
1651{
1652 pos = kernfs_dir_pos(ns, parent, ino, pos);
1653 if (pos) {
1654 do {
1655 struct rb_node *node = rb_next(&pos->rb);
1656 if (!node)
1657 pos = NULL;
1658 else
1659 pos = rb_to_kn(node);
1660 } while (pos && (!kernfs_active(pos) || pos->ns != ns));
1661 }
1662 return pos;
1663}
1664
1665static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
1666{
1667 struct dentry *dentry = file->f_path.dentry;
1668 struct kernfs_node *parent = kernfs_dentry_node(dentry);
1669 struct kernfs_node *pos = file->private_data;
1670 const void *ns = NULL;
1671
1672 if (!dir_emit_dots(file, ctx))
1673 return 0;
1674 mutex_lock(&kernfs_mutex);
1675
1676 if (kernfs_ns_enabled(parent))
1677 ns = kernfs_info(dentry->d_sb)->ns;
1678
1679 for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
1680 pos;
1681 pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
1682 const char *name = pos->name;
1683 unsigned int type = dt_type(pos);
1684 int len = strlen(name);
1685 ino_t ino = kernfs_ino(pos);
1686
1687 ctx->pos = pos->hash;
1688 file->private_data = pos;
1689 kernfs_get(pos);
1690
1691 mutex_unlock(&kernfs_mutex);
1692 if (!dir_emit(ctx, name, len, ino, type))
1693 return 0;
1694 mutex_lock(&kernfs_mutex);
1695 }
1696 mutex_unlock(&kernfs_mutex);
1697 file->private_data = NULL;
1698 ctx->pos = INT_MAX;
1699 return 0;
1700}
1701
1702const struct file_operations kernfs_dir_fops = {
1703 .read = generic_read_dir,
1704 .iterate_shared = kernfs_fop_readdir,
1705 .release = kernfs_dir_fop_release,
1706 .llseek = generic_file_llseek,
1707};
1708