1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/syscalls.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/fsnotify.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/hash.h>
25#include <linux/cache.h>
26#include <linux/export.h>
27#include <linux/mount.h>
28#include <linux/file.h>
29#include <linux/uaccess.h>
30#include <linux/security.h>
31#include <linux/seqlock.h>
32#include <linux/swap.h>
33#include <linux/bootmem.h>
34#include <linux/fs_struct.h>
35#include <linux/hardirq.h>
36#include <linux/bit_spinlock.h>
37#include <linux/rculist_bl.h>
38#include <linux/prefetch.h>
39#include <linux/ratelimit.h>
40#include <linux/list_lru.h>
41#include <linux/kasan.h>
42
43#include "internal.h"
44#include "mount.h"
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84int sysctl_vfs_cache_pressure __read_mostly = 100;
85EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
86
87__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
88
89EXPORT_SYMBOL(rename_lock);
90
91static struct kmem_cache *dentry_cache __read_mostly;
92
93const struct qstr empty_name = QSTR_INIT("", 0);
94EXPORT_SYMBOL(empty_name);
95const struct qstr slash_name = QSTR_INIT("/", 1);
96EXPORT_SYMBOL(slash_name);
97
98
99
100
101
102
103
104
105
106
107static unsigned int d_hash_mask __read_mostly;
108static unsigned int d_hash_shift __read_mostly;
109
110static struct hlist_bl_head *dentry_hashtable __read_mostly;
111
112static inline struct hlist_bl_head *d_hash(unsigned int hash)
113{
114 return dentry_hashtable + (hash >> (32 - d_hash_shift));
115}
116
117#define IN_LOOKUP_SHIFT 10
118static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
119
120static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
121 unsigned int hash)
122{
123 hash += (unsigned long) parent / L1_CACHE_BYTES;
124 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
125}
126
127
128
129struct dentry_stat_t dentry_stat = {
130 .age_limit = 45,
131};
132
133static DEFINE_PER_CPU(long, nr_dentry);
134static DEFINE_PER_CPU(long, nr_dentry_unused);
135
136#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
137
138
139
140
141
142
143
144
145
146
147
148
149
150static long get_nr_dentry(void)
151{
152 int i;
153 long sum = 0;
154 for_each_possible_cpu(i)
155 sum += per_cpu(nr_dentry, i);
156 return sum < 0 ? 0 : sum;
157}
158
159static long get_nr_dentry_unused(void)
160{
161 int i;
162 long sum = 0;
163 for_each_possible_cpu(i)
164 sum += per_cpu(nr_dentry_unused, i);
165 return sum < 0 ? 0 : sum;
166}
167
168int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
169 size_t *lenp, loff_t *ppos)
170{
171 dentry_stat.nr_dentry = get_nr_dentry();
172 dentry_stat.nr_unused = get_nr_dentry_unused();
173 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
174}
175#endif
176
177
178
179
180
181#ifdef CONFIG_DCACHE_WORD_ACCESS
182
183#include <asm/word-at-a-time.h>
184
185
186
187
188
189
190
191
192
193static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
194{
195 unsigned long a,b,mask;
196
197 for (;;) {
198 a = *(unsigned long *)cs;
199 b = load_unaligned_zeropad(ct);
200 if (tcount < sizeof(unsigned long))
201 break;
202 if (unlikely(a != b))
203 return 1;
204 cs += sizeof(unsigned long);
205 ct += sizeof(unsigned long);
206 tcount -= sizeof(unsigned long);
207 if (!tcount)
208 return 0;
209 }
210 mask = bytemask_from_count(tcount);
211 return unlikely(!!((a ^ b) & mask));
212}
213
214#else
215
216static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
217{
218 do {
219 if (*cs != *ct)
220 return 1;
221 cs++;
222 ct++;
223 tcount--;
224 } while (tcount);
225 return 0;
226}
227
228#endif
229
230static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
231{
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248 const unsigned char *cs = lockless_dereference(dentry->d_name.name);
249
250 return dentry_string_cmp(cs, ct, tcount);
251}
252
253struct external_name {
254 union {
255 atomic_t count;
256 struct rcu_head head;
257 } u;
258 unsigned char name[];
259};
260
261static inline struct external_name *external_name(struct dentry *dentry)
262{
263 return container_of(dentry->d_name.name, struct external_name, name[0]);
264}
265
266static void __d_free(struct rcu_head *head)
267{
268 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
269
270 kmem_cache_free(dentry_cache, dentry);
271}
272
273static void __d_free_external(struct rcu_head *head)
274{
275 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
276 kfree(external_name(dentry));
277 kmem_cache_free(dentry_cache, dentry);
278}
279
280static inline int dname_external(const struct dentry *dentry)
281{
282 return dentry->d_name.name != dentry->d_iname;
283}
284
285void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
286{
287 spin_lock(&dentry->d_lock);
288 if (unlikely(dname_external(dentry))) {
289 struct external_name *p = external_name(dentry);
290 atomic_inc(&p->u.count);
291 spin_unlock(&dentry->d_lock);
292 name->name = p->name;
293 } else {
294 memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
295 spin_unlock(&dentry->d_lock);
296 name->name = name->inline_name;
297 }
298}
299EXPORT_SYMBOL(take_dentry_name_snapshot);
300
301void release_dentry_name_snapshot(struct name_snapshot *name)
302{
303 if (unlikely(name->name != name->inline_name)) {
304 struct external_name *p;
305 p = container_of(name->name, struct external_name, name[0]);
306 if (unlikely(atomic_dec_and_test(&p->u.count)))
307 kfree_rcu(p, u.head);
308 }
309}
310EXPORT_SYMBOL(release_dentry_name_snapshot);
311
312static inline void __d_set_inode_and_type(struct dentry *dentry,
313 struct inode *inode,
314 unsigned type_flags)
315{
316 unsigned flags;
317
318 dentry->d_inode = inode;
319 flags = READ_ONCE(dentry->d_flags);
320 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
321 flags |= type_flags;
322 WRITE_ONCE(dentry->d_flags, flags);
323}
324
325static inline void __d_clear_type_and_inode(struct dentry *dentry)
326{
327 unsigned flags = READ_ONCE(dentry->d_flags);
328
329 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
330 WRITE_ONCE(dentry->d_flags, flags);
331 dentry->d_inode = NULL;
332}
333
334static void dentry_free(struct dentry *dentry)
335{
336 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
337 if (unlikely(dname_external(dentry))) {
338 struct external_name *p = external_name(dentry);
339 if (likely(atomic_dec_and_test(&p->u.count))) {
340 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
341 return;
342 }
343 }
344
345 if (!(dentry->d_flags & DCACHE_RCUACCESS))
346 __d_free(&dentry->d_u.d_rcu);
347 else
348 call_rcu(&dentry->d_u.d_rcu, __d_free);
349}
350
351
352
353
354
355static void dentry_unlink_inode(struct dentry * dentry)
356 __releases(dentry->d_lock)
357 __releases(dentry->d_inode->i_lock)
358{
359 struct inode *inode = dentry->d_inode;
360 bool hashed = !d_unhashed(dentry);
361
362 if (hashed)
363 raw_write_seqcount_begin(&dentry->d_seq);
364 __d_clear_type_and_inode(dentry);
365 hlist_del_init(&dentry->d_u.d_alias);
366 if (hashed)
367 raw_write_seqcount_end(&dentry->d_seq);
368 spin_unlock(&dentry->d_lock);
369 spin_unlock(&inode->i_lock);
370 if (!inode->i_nlink)
371 fsnotify_inoderemove(inode);
372 if (dentry->d_op && dentry->d_op->d_iput)
373 dentry->d_op->d_iput(dentry, inode);
374 else
375 iput(inode);
376}
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
393static void d_lru_add(struct dentry *dentry)
394{
395 D_FLAG_VERIFY(dentry, 0);
396 dentry->d_flags |= DCACHE_LRU_LIST;
397 this_cpu_inc(nr_dentry_unused);
398 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
399}
400
401static void d_lru_del(struct dentry *dentry)
402{
403 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
404 dentry->d_flags &= ~DCACHE_LRU_LIST;
405 this_cpu_dec(nr_dentry_unused);
406 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
407}
408
409static void d_shrink_del(struct dentry *dentry)
410{
411 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
412 list_del_init(&dentry->d_lru);
413 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
414 this_cpu_dec(nr_dentry_unused);
415}
416
417static void d_shrink_add(struct dentry *dentry, struct list_head *list)
418{
419 D_FLAG_VERIFY(dentry, 0);
420 list_add(&dentry->d_lru, list);
421 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
422 this_cpu_inc(nr_dentry_unused);
423}
424
425
426
427
428
429
430
431static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
432{
433 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
434 dentry->d_flags &= ~DCACHE_LRU_LIST;
435 this_cpu_dec(nr_dentry_unused);
436 list_lru_isolate(lru, &dentry->d_lru);
437}
438
439static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
440 struct list_head *list)
441{
442 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
443 dentry->d_flags |= DCACHE_SHRINK_LIST;
444 list_lru_isolate_move(lru, &dentry->d_lru, list);
445}
446
447
448
449
450static void dentry_lru_add(struct dentry *dentry)
451{
452 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
453 d_lru_add(dentry);
454 else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
455 dentry->d_flags |= DCACHE_REFERENCED;
456}
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473void __d_drop(struct dentry *dentry)
474{
475 if (!d_unhashed(dentry)) {
476 struct hlist_bl_head *b;
477
478
479
480
481
482 if (unlikely(IS_ROOT(dentry)))
483 b = &dentry->d_sb->s_anon;
484 else
485 b = d_hash(dentry->d_name.hash);
486
487 hlist_bl_lock(b);
488 __hlist_bl_del(&dentry->d_hash);
489 dentry->d_hash.pprev = NULL;
490 hlist_bl_unlock(b);
491
492 write_seqcount_invalidate(&dentry->d_seq);
493 }
494}
495EXPORT_SYMBOL(__d_drop);
496
497void d_drop(struct dentry *dentry)
498{
499 spin_lock(&dentry->d_lock);
500 __d_drop(dentry);
501 spin_unlock(&dentry->d_lock);
502}
503EXPORT_SYMBOL(d_drop);
504
505static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
506{
507 struct dentry *next;
508
509
510
511
512 dentry->d_flags |= DCACHE_DENTRY_KILLED;
513 if (unlikely(list_empty(&dentry->d_child)))
514 return;
515 __list_del_entry(&dentry->d_child);
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535 while (dentry->d_child.next != &parent->d_subdirs) {
536 next = list_entry(dentry->d_child.next, struct dentry, d_child);
537 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
538 break;
539 dentry->d_child.next = next->d_child.next;
540 }
541}
542
543static void __dentry_kill(struct dentry *dentry)
544{
545 struct dentry *parent = NULL;
546 bool can_free = true;
547 if (!IS_ROOT(dentry))
548 parent = dentry->d_parent;
549
550
551
552
553 lockref_mark_dead(&dentry->d_lockref);
554
555
556
557
558
559 if (dentry->d_flags & DCACHE_OP_PRUNE)
560 dentry->d_op->d_prune(dentry);
561
562 if (dentry->d_flags & DCACHE_LRU_LIST) {
563 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
564 d_lru_del(dentry);
565 }
566
567 __d_drop(dentry);
568 dentry_unlist(dentry, parent);
569 if (parent)
570 spin_unlock(&parent->d_lock);
571 if (dentry->d_inode)
572 dentry_unlink_inode(dentry);
573 else
574 spin_unlock(&dentry->d_lock);
575 this_cpu_dec(nr_dentry);
576 if (dentry->d_op && dentry->d_op->d_release)
577 dentry->d_op->d_release(dentry);
578
579 spin_lock(&dentry->d_lock);
580 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
581 dentry->d_flags |= DCACHE_MAY_FREE;
582 can_free = false;
583 }
584 spin_unlock(&dentry->d_lock);
585 if (likely(can_free))
586 dentry_free(dentry);
587}
588
589
590
591
592
593
594
595static struct dentry *dentry_kill(struct dentry *dentry)
596 __releases(dentry->d_lock)
597{
598 struct inode *inode = dentry->d_inode;
599 struct dentry *parent = NULL;
600
601 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
602 goto failed;
603
604 if (!IS_ROOT(dentry)) {
605 parent = dentry->d_parent;
606 if (unlikely(!spin_trylock(&parent->d_lock))) {
607 if (inode)
608 spin_unlock(&inode->i_lock);
609 goto failed;
610 }
611 }
612
613 __dentry_kill(dentry);
614 return parent;
615
616failed:
617 spin_unlock(&dentry->d_lock);
618 return dentry;
619}
620
621static inline struct dentry *lock_parent(struct dentry *dentry)
622{
623 struct dentry *parent = dentry->d_parent;
624 if (IS_ROOT(dentry))
625 return NULL;
626 if (unlikely(dentry->d_lockref.count < 0))
627 return NULL;
628 if (likely(spin_trylock(&parent->d_lock)))
629 return parent;
630 rcu_read_lock();
631 spin_unlock(&dentry->d_lock);
632again:
633 parent = ACCESS_ONCE(dentry->d_parent);
634 spin_lock(&parent->d_lock);
635
636
637
638
639
640
641
642
643 if (unlikely(parent != dentry->d_parent)) {
644 spin_unlock(&parent->d_lock);
645 goto again;
646 }
647 rcu_read_unlock();
648 if (parent != dentry)
649 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
650 else
651 parent = NULL;
652 return parent;
653}
654
655
656
657
658
659
660
661
662
663static inline bool fast_dput(struct dentry *dentry)
664{
665 int ret;
666 unsigned int d_flags;
667
668
669
670
671
672 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
673 return lockref_put_or_lock(&dentry->d_lockref);
674
675
676
677
678
679 ret = lockref_put_return(&dentry->d_lockref);
680
681
682
683
684
685
686 if (unlikely(ret < 0)) {
687 spin_lock(&dentry->d_lock);
688 if (dentry->d_lockref.count > 1) {
689 dentry->d_lockref.count--;
690 spin_unlock(&dentry->d_lock);
691 return 1;
692 }
693 return 0;
694 }
695
696
697
698
699 if (ret)
700 return 1;
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723 smp_rmb();
724 d_flags = ACCESS_ONCE(dentry->d_flags);
725 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
726
727
728 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
729 return 1;
730
731
732
733
734
735
736 spin_lock(&dentry->d_lock);
737
738
739
740
741
742
743
744 if (dentry->d_lockref.count) {
745 spin_unlock(&dentry->d_lock);
746 return 1;
747 }
748
749
750
751
752
753
754 dentry->d_lockref.count = 1;
755 return 0;
756}
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785void dput(struct dentry *dentry)
786{
787 if (unlikely(!dentry))
788 return;
789
790repeat:
791 might_sleep();
792
793 rcu_read_lock();
794 if (likely(fast_dput(dentry))) {
795 rcu_read_unlock();
796 return;
797 }
798
799
800 rcu_read_unlock();
801
802 WARN_ON(d_in_lookup(dentry));
803
804
805 if (unlikely(d_unhashed(dentry)))
806 goto kill_it;
807
808 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
809 goto kill_it;
810
811 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
812 if (dentry->d_op->d_delete(dentry))
813 goto kill_it;
814 }
815
816 dentry_lru_add(dentry);
817
818 dentry->d_lockref.count--;
819 spin_unlock(&dentry->d_lock);
820 return;
821
822kill_it:
823 dentry = dentry_kill(dentry);
824 if (dentry) {
825 cond_resched();
826 goto repeat;
827 }
828}
829EXPORT_SYMBOL(dput);
830
831
832
833static inline void __dget_dlock(struct dentry *dentry)
834{
835 dentry->d_lockref.count++;
836}
837
838static inline void __dget(struct dentry *dentry)
839{
840 lockref_get(&dentry->d_lockref);
841}
842
843struct dentry *dget_parent(struct dentry *dentry)
844{
845 int gotref;
846 struct dentry *ret;
847
848
849
850
851
852 rcu_read_lock();
853 ret = ACCESS_ONCE(dentry->d_parent);
854 gotref = lockref_get_not_zero(&ret->d_lockref);
855 rcu_read_unlock();
856 if (likely(gotref)) {
857 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
858 return ret;
859 dput(ret);
860 }
861
862repeat:
863
864
865
866
867 rcu_read_lock();
868 ret = dentry->d_parent;
869 spin_lock(&ret->d_lock);
870 if (unlikely(ret != dentry->d_parent)) {
871 spin_unlock(&ret->d_lock);
872 rcu_read_unlock();
873 goto repeat;
874 }
875 rcu_read_unlock();
876 BUG_ON(!ret->d_lockref.count);
877 ret->d_lockref.count++;
878 spin_unlock(&ret->d_lock);
879 return ret;
880}
881EXPORT_SYMBOL(dget_parent);
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897static struct dentry *__d_find_alias(struct inode *inode)
898{
899 struct dentry *alias, *discon_alias;
900
901again:
902 discon_alias = NULL;
903 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
904 spin_lock(&alias->d_lock);
905 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
906 if (IS_ROOT(alias) &&
907 (alias->d_flags & DCACHE_DISCONNECTED)) {
908 discon_alias = alias;
909 } else {
910 __dget_dlock(alias);
911 spin_unlock(&alias->d_lock);
912 return alias;
913 }
914 }
915 spin_unlock(&alias->d_lock);
916 }
917 if (discon_alias) {
918 alias = discon_alias;
919 spin_lock(&alias->d_lock);
920 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
921 __dget_dlock(alias);
922 spin_unlock(&alias->d_lock);
923 return alias;
924 }
925 spin_unlock(&alias->d_lock);
926 goto again;
927 }
928 return NULL;
929}
930
931struct dentry *d_find_alias(struct inode *inode)
932{
933 struct dentry *de = NULL;
934
935 if (!hlist_empty(&inode->i_dentry)) {
936 spin_lock(&inode->i_lock);
937 de = __d_find_alias(inode);
938 spin_unlock(&inode->i_lock);
939 }
940 return de;
941}
942EXPORT_SYMBOL(d_find_alias);
943
944
945
946
947
948void d_prune_aliases(struct inode *inode)
949{
950 struct dentry *dentry;
951restart:
952 spin_lock(&inode->i_lock);
953 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
954 spin_lock(&dentry->d_lock);
955 if (!dentry->d_lockref.count) {
956 struct dentry *parent = lock_parent(dentry);
957 if (likely(!dentry->d_lockref.count)) {
958 __dentry_kill(dentry);
959 dput(parent);
960 goto restart;
961 }
962 if (parent)
963 spin_unlock(&parent->d_lock);
964 }
965 spin_unlock(&dentry->d_lock);
966 }
967 spin_unlock(&inode->i_lock);
968}
969EXPORT_SYMBOL(d_prune_aliases);
970
971static void shrink_dentry_list(struct list_head *list)
972{
973 struct dentry *dentry, *parent;
974
975 while (!list_empty(list)) {
976 struct inode *inode;
977 dentry = list_entry(list->prev, struct dentry, d_lru);
978 spin_lock(&dentry->d_lock);
979 parent = lock_parent(dentry);
980
981
982
983
984
985
986 d_shrink_del(dentry);
987
988
989
990
991
992 if (dentry->d_lockref.count > 0) {
993 spin_unlock(&dentry->d_lock);
994 if (parent)
995 spin_unlock(&parent->d_lock);
996 continue;
997 }
998
999
1000 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
1001 bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
1002 spin_unlock(&dentry->d_lock);
1003 if (parent)
1004 spin_unlock(&parent->d_lock);
1005 if (can_free)
1006 dentry_free(dentry);
1007 continue;
1008 }
1009
1010 inode = dentry->d_inode;
1011 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1012 d_shrink_add(dentry, list);
1013 spin_unlock(&dentry->d_lock);
1014 if (parent)
1015 spin_unlock(&parent->d_lock);
1016 continue;
1017 }
1018
1019 __dentry_kill(dentry);
1020
1021
1022
1023
1024
1025
1026
1027 dentry = parent;
1028 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
1029 parent = lock_parent(dentry);
1030 if (dentry->d_lockref.count != 1) {
1031 dentry->d_lockref.count--;
1032 spin_unlock(&dentry->d_lock);
1033 if (parent)
1034 spin_unlock(&parent->d_lock);
1035 break;
1036 }
1037 inode = dentry->d_inode;
1038 if (unlikely(!spin_trylock(&inode->i_lock))) {
1039 spin_unlock(&dentry->d_lock);
1040 if (parent)
1041 spin_unlock(&parent->d_lock);
1042 cpu_relax();
1043 continue;
1044 }
1045 __dentry_kill(dentry);
1046 dentry = parent;
1047 }
1048 }
1049}
1050
1051static enum lru_status dentry_lru_isolate(struct list_head *item,
1052 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1053{
1054 struct list_head *freeable = arg;
1055 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1056
1057
1058
1059
1060
1061
1062
1063 if (!spin_trylock(&dentry->d_lock))
1064 return LRU_SKIP;
1065
1066
1067
1068
1069
1070
1071 if (dentry->d_lockref.count) {
1072 d_lru_isolate(lru, dentry);
1073 spin_unlock(&dentry->d_lock);
1074 return LRU_REMOVED;
1075 }
1076
1077 if (dentry->d_flags & DCACHE_REFERENCED) {
1078 dentry->d_flags &= ~DCACHE_REFERENCED;
1079 spin_unlock(&dentry->d_lock);
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 return LRU_ROTATE;
1101 }
1102
1103 d_lru_shrink_move(lru, dentry, freeable);
1104 spin_unlock(&dentry->d_lock);
1105
1106 return LRU_REMOVED;
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1122{
1123 LIST_HEAD(dispose);
1124 long freed;
1125
1126 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1127 dentry_lru_isolate, &dispose);
1128 shrink_dentry_list(&dispose);
1129 return freed;
1130}
1131
1132static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1133 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1134{
1135 struct list_head *freeable = arg;
1136 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1137
1138
1139
1140
1141
1142
1143 if (!spin_trylock(&dentry->d_lock))
1144 return LRU_SKIP;
1145
1146 d_lru_shrink_move(lru, dentry, freeable);
1147 spin_unlock(&dentry->d_lock);
1148
1149 return LRU_REMOVED;
1150}
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160void shrink_dcache_sb(struct super_block *sb)
1161{
1162 long freed;
1163
1164 do {
1165 LIST_HEAD(dispose);
1166
1167 freed = list_lru_walk(&sb->s_dentry_lru,
1168 dentry_lru_isolate_shrink, &dispose, 1024);
1169
1170 this_cpu_sub(nr_dentry_unused, freed);
1171 shrink_dentry_list(&dispose);
1172 cond_resched();
1173 } while (list_lru_count(&sb->s_dentry_lru) > 0);
1174}
1175EXPORT_SYMBOL(shrink_dcache_sb);
1176
1177
1178
1179
1180
1181
1182
1183
1184enum d_walk_ret {
1185 D_WALK_CONTINUE,
1186 D_WALK_QUIT,
1187 D_WALK_NORETRY,
1188 D_WALK_SKIP,
1189};
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200static void d_walk(struct dentry *parent, void *data,
1201 enum d_walk_ret (*enter)(void *, struct dentry *),
1202 void (*finish)(void *))
1203{
1204 struct dentry *this_parent;
1205 struct list_head *next;
1206 unsigned seq = 0;
1207 enum d_walk_ret ret;
1208 bool retry = true;
1209
1210again:
1211 read_seqbegin_or_lock(&rename_lock, &seq);
1212 this_parent = parent;
1213 spin_lock(&this_parent->d_lock);
1214
1215 ret = enter(data, this_parent);
1216 switch (ret) {
1217 case D_WALK_CONTINUE:
1218 break;
1219 case D_WALK_QUIT:
1220 case D_WALK_SKIP:
1221 goto out_unlock;
1222 case D_WALK_NORETRY:
1223 retry = false;
1224 break;
1225 }
1226repeat:
1227 next = this_parent->d_subdirs.next;
1228resume:
1229 while (next != &this_parent->d_subdirs) {
1230 struct list_head *tmp = next;
1231 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1232 next = tmp->next;
1233
1234 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1235 continue;
1236
1237 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1238
1239 ret = enter(data, dentry);
1240 switch (ret) {
1241 case D_WALK_CONTINUE:
1242 break;
1243 case D_WALK_QUIT:
1244 spin_unlock(&dentry->d_lock);
1245 goto out_unlock;
1246 case D_WALK_NORETRY:
1247 retry = false;
1248 break;
1249 case D_WALK_SKIP:
1250 spin_unlock(&dentry->d_lock);
1251 continue;
1252 }
1253
1254 if (!list_empty(&dentry->d_subdirs)) {
1255 spin_unlock(&this_parent->d_lock);
1256 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1257 this_parent = dentry;
1258 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1259 goto repeat;
1260 }
1261 spin_unlock(&dentry->d_lock);
1262 }
1263
1264
1265
1266 rcu_read_lock();
1267ascend:
1268 if (this_parent != parent) {
1269 struct dentry *child = this_parent;
1270 this_parent = child->d_parent;
1271
1272 spin_unlock(&child->d_lock);
1273 spin_lock(&this_parent->d_lock);
1274
1275
1276 if (need_seqretry(&rename_lock, seq))
1277 goto rename_retry;
1278
1279 do {
1280 next = child->d_child.next;
1281 if (next == &this_parent->d_subdirs)
1282 goto ascend;
1283 child = list_entry(next, struct dentry, d_child);
1284 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1285 rcu_read_unlock();
1286 goto resume;
1287 }
1288 if (need_seqretry(&rename_lock, seq))
1289 goto rename_retry;
1290 rcu_read_unlock();
1291 if (finish)
1292 finish(data);
1293
1294out_unlock:
1295 spin_unlock(&this_parent->d_lock);
1296 done_seqretry(&rename_lock, seq);
1297 return;
1298
1299rename_retry:
1300 spin_unlock(&this_parent->d_lock);
1301 rcu_read_unlock();
1302 BUG_ON(seq & 1);
1303 if (!retry)
1304 return;
1305 seq = 1;
1306 goto again;
1307}
1308
1309struct check_mount {
1310 struct vfsmount *mnt;
1311 unsigned int mounted;
1312};
1313
1314static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1315{
1316 struct check_mount *info = data;
1317 struct path path = { .mnt = info->mnt, .dentry = dentry };
1318
1319 if (likely(!d_mountpoint(dentry)))
1320 return D_WALK_CONTINUE;
1321 if (__path_is_mountpoint(&path)) {
1322 info->mounted = 1;
1323 return D_WALK_QUIT;
1324 }
1325 return D_WALK_CONTINUE;
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336int path_has_submounts(const struct path *parent)
1337{
1338 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1339
1340 read_seqlock_excl(&mount_lock);
1341 d_walk(parent->dentry, &data, path_check_mount, NULL);
1342 read_sequnlock_excl(&mount_lock);
1343
1344 return data.mounted;
1345}
1346EXPORT_SYMBOL(path_has_submounts);
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356int d_set_mounted(struct dentry *dentry)
1357{
1358 struct dentry *p;
1359 int ret = -ENOENT;
1360 write_seqlock(&rename_lock);
1361 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1362
1363 spin_lock(&p->d_lock);
1364 if (unlikely(d_unhashed(p))) {
1365 spin_unlock(&p->d_lock);
1366 goto out;
1367 }
1368 spin_unlock(&p->d_lock);
1369 }
1370 spin_lock(&dentry->d_lock);
1371 if (!d_unlinked(dentry)) {
1372 ret = -EBUSY;
1373 if (!d_mountpoint(dentry)) {
1374 dentry->d_flags |= DCACHE_MOUNTED;
1375 ret = 0;
1376 }
1377 }
1378 spin_unlock(&dentry->d_lock);
1379out:
1380 write_sequnlock(&rename_lock);
1381 return ret;
1382}
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399struct select_data {
1400 struct dentry *start;
1401 struct list_head dispose;
1402 int found;
1403};
1404
1405static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1406{
1407 struct select_data *data = _data;
1408 enum d_walk_ret ret = D_WALK_CONTINUE;
1409
1410 if (data->start == dentry)
1411 goto out;
1412
1413 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1414 data->found++;
1415 } else {
1416 if (dentry->d_flags & DCACHE_LRU_LIST)
1417 d_lru_del(dentry);
1418 if (!dentry->d_lockref.count) {
1419 d_shrink_add(dentry, &data->dispose);
1420 data->found++;
1421 }
1422 }
1423
1424
1425
1426
1427
1428 if (!list_empty(&data->dispose))
1429 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1430out:
1431 return ret;
1432}
1433
1434
1435
1436
1437
1438
1439
1440void shrink_dcache_parent(struct dentry *parent)
1441{
1442 for (;;) {
1443 struct select_data data;
1444
1445 INIT_LIST_HEAD(&data.dispose);
1446 data.start = parent;
1447 data.found = 0;
1448
1449 d_walk(parent, &data, select_collect, NULL);
1450 if (!data.found)
1451 break;
1452
1453 shrink_dentry_list(&data.dispose);
1454 cond_resched();
1455 }
1456}
1457EXPORT_SYMBOL(shrink_dcache_parent);
1458
1459static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1460{
1461
1462 if (!list_empty(&dentry->d_subdirs))
1463 return D_WALK_CONTINUE;
1464
1465
1466 if (dentry == _data && dentry->d_lockref.count == 1)
1467 return D_WALK_CONTINUE;
1468
1469 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1470 " still in use (%d) [unmount of %s %s]\n",
1471 dentry,
1472 dentry->d_inode ?
1473 dentry->d_inode->i_ino : 0UL,
1474 dentry,
1475 dentry->d_lockref.count,
1476 dentry->d_sb->s_type->name,
1477 dentry->d_sb->s_id);
1478 WARN_ON(1);
1479 return D_WALK_CONTINUE;
1480}
1481
1482static void do_one_tree(struct dentry *dentry)
1483{
1484 shrink_dcache_parent(dentry);
1485 d_walk(dentry, dentry, umount_check, NULL);
1486 d_drop(dentry);
1487 dput(dentry);
1488}
1489
1490
1491
1492
1493void shrink_dcache_for_umount(struct super_block *sb)
1494{
1495 struct dentry *dentry;
1496
1497 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1498
1499 dentry = sb->s_root;
1500 sb->s_root = NULL;
1501 do_one_tree(dentry);
1502
1503 while (!hlist_bl_empty(&sb->s_anon)) {
1504 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1505 do_one_tree(dentry);
1506 }
1507}
1508
1509struct detach_data {
1510 struct select_data select;
1511 struct dentry *mountpoint;
1512};
1513static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1514{
1515 struct detach_data *data = _data;
1516
1517 if (d_mountpoint(dentry)) {
1518 __dget_dlock(dentry);
1519 data->mountpoint = dentry;
1520 return D_WALK_QUIT;
1521 }
1522
1523 return select_collect(&data->select, dentry);
1524}
1525
1526static void check_and_drop(void *_data)
1527{
1528 struct detach_data *data = _data;
1529
1530 if (!data->mountpoint && list_empty(&data->select.dispose))
1531 __d_drop(data->select.start);
1532}
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544void d_invalidate(struct dentry *dentry)
1545{
1546
1547
1548
1549 spin_lock(&dentry->d_lock);
1550 if (d_unhashed(dentry)) {
1551 spin_unlock(&dentry->d_lock);
1552 return;
1553 }
1554 spin_unlock(&dentry->d_lock);
1555
1556
1557 if (!dentry->d_inode) {
1558 d_drop(dentry);
1559 return;
1560 }
1561
1562 for (;;) {
1563 struct detach_data data;
1564
1565 data.mountpoint = NULL;
1566 INIT_LIST_HEAD(&data.select.dispose);
1567 data.select.start = dentry;
1568 data.select.found = 0;
1569
1570 d_walk(dentry, &data, detach_and_collect, check_and_drop);
1571
1572 if (!list_empty(&data.select.dispose))
1573 shrink_dentry_list(&data.select.dispose);
1574 else if (!data.mountpoint)
1575 return;
1576
1577 if (data.mountpoint) {
1578 detach_mounts(data.mountpoint);
1579 dput(data.mountpoint);
1580 }
1581 cond_resched();
1582 }
1583}
1584EXPORT_SYMBOL(d_invalidate);
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1597{
1598 struct dentry *dentry;
1599 char *dname;
1600 int err;
1601
1602 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1603 if (!dentry)
1604 return NULL;
1605
1606
1607
1608
1609
1610
1611
1612 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1613 if (unlikely(!name)) {
1614 name = &slash_name;
1615 dname = dentry->d_iname;
1616 } else if (name->len > DNAME_INLINE_LEN-1) {
1617 size_t size = offsetof(struct external_name, name[1]);
1618 struct external_name *p = kmalloc(size + name->len,
1619 GFP_KERNEL_ACCOUNT);
1620 if (!p) {
1621 kmem_cache_free(dentry_cache, dentry);
1622 return NULL;
1623 }
1624 atomic_set(&p->u.count, 1);
1625 dname = p->name;
1626 if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
1627 kasan_unpoison_shadow(dname,
1628 round_up(name->len + 1, sizeof(unsigned long)));
1629 } else {
1630 dname = dentry->d_iname;
1631 }
1632
1633 dentry->d_name.len = name->len;
1634 dentry->d_name.hash = name->hash;
1635 memcpy(dname, name->name, name->len);
1636 dname[name->len] = 0;
1637
1638
1639 smp_wmb();
1640 dentry->d_name.name = dname;
1641
1642 dentry->d_lockref.count = 1;
1643 dentry->d_flags = 0;
1644 spin_lock_init(&dentry->d_lock);
1645 seqcount_init(&dentry->d_seq);
1646 dentry->d_inode = NULL;
1647 dentry->d_parent = dentry;
1648 dentry->d_sb = sb;
1649 dentry->d_op = NULL;
1650 dentry->d_fsdata = NULL;
1651 INIT_HLIST_BL_NODE(&dentry->d_hash);
1652 INIT_LIST_HEAD(&dentry->d_lru);
1653 INIT_LIST_HEAD(&dentry->d_subdirs);
1654 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1655 INIT_LIST_HEAD(&dentry->d_child);
1656 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1657
1658 if (dentry->d_op && dentry->d_op->d_init) {
1659 err = dentry->d_op->d_init(dentry);
1660 if (err) {
1661 if (dname_external(dentry))
1662 kfree(external_name(dentry));
1663 kmem_cache_free(dentry_cache, dentry);
1664 return NULL;
1665 }
1666 }
1667
1668 this_cpu_inc(nr_dentry);
1669
1670 return dentry;
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1683{
1684 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1685 if (!dentry)
1686 return NULL;
1687 dentry->d_flags |= DCACHE_RCUACCESS;
1688 spin_lock(&parent->d_lock);
1689
1690
1691
1692
1693 __dget_dlock(parent);
1694 dentry->d_parent = parent;
1695 list_add(&dentry->d_child, &parent->d_subdirs);
1696 spin_unlock(&parent->d_lock);
1697
1698 return dentry;
1699}
1700EXPORT_SYMBOL(d_alloc);
1701
1702struct dentry *d_alloc_cursor(struct dentry * parent)
1703{
1704 struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
1705 if (dentry) {
1706 dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
1707 dentry->d_parent = dget(parent);
1708 }
1709 return dentry;
1710}
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1721{
1722 return __d_alloc(sb, name);
1723}
1724EXPORT_SYMBOL(d_alloc_pseudo);
1725
1726struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1727{
1728 struct qstr q;
1729
1730 q.name = name;
1731 q.hash_len = hashlen_string(parent, name);
1732 return d_alloc(parent, &q);
1733}
1734EXPORT_SYMBOL(d_alloc_name);
1735
1736void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1737{
1738 WARN_ON_ONCE(dentry->d_op);
1739 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1740 DCACHE_OP_COMPARE |
1741 DCACHE_OP_REVALIDATE |
1742 DCACHE_OP_WEAK_REVALIDATE |
1743 DCACHE_OP_DELETE |
1744 DCACHE_OP_REAL));
1745 dentry->d_op = op;
1746 if (!op)
1747 return;
1748 if (op->d_hash)
1749 dentry->d_flags |= DCACHE_OP_HASH;
1750 if (op->d_compare)
1751 dentry->d_flags |= DCACHE_OP_COMPARE;
1752 if (op->d_revalidate)
1753 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1754 if (op->d_weak_revalidate)
1755 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1756 if (op->d_delete)
1757 dentry->d_flags |= DCACHE_OP_DELETE;
1758 if (op->d_prune)
1759 dentry->d_flags |= DCACHE_OP_PRUNE;
1760 if (op->d_real)
1761 dentry->d_flags |= DCACHE_OP_REAL;
1762
1763}
1764EXPORT_SYMBOL(d_set_d_op);
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774void d_set_fallthru(struct dentry *dentry)
1775{
1776 spin_lock(&dentry->d_lock);
1777 dentry->d_flags |= DCACHE_FALLTHRU;
1778 spin_unlock(&dentry->d_lock);
1779}
1780EXPORT_SYMBOL(d_set_fallthru);
1781
1782static unsigned d_flags_for_inode(struct inode *inode)
1783{
1784 unsigned add_flags = DCACHE_REGULAR_TYPE;
1785
1786 if (!inode)
1787 return DCACHE_MISS_TYPE;
1788
1789 if (S_ISDIR(inode->i_mode)) {
1790 add_flags = DCACHE_DIRECTORY_TYPE;
1791 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1792 if (unlikely(!inode->i_op->lookup))
1793 add_flags = DCACHE_AUTODIR_TYPE;
1794 else
1795 inode->i_opflags |= IOP_LOOKUP;
1796 }
1797 goto type_determined;
1798 }
1799
1800 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1801 if (unlikely(inode->i_op->get_link)) {
1802 add_flags = DCACHE_SYMLINK_TYPE;
1803 goto type_determined;
1804 }
1805 inode->i_opflags |= IOP_NOFOLLOW;
1806 }
1807
1808 if (unlikely(!S_ISREG(inode->i_mode)))
1809 add_flags = DCACHE_SPECIAL_TYPE;
1810
1811type_determined:
1812 if (unlikely(IS_AUTOMOUNT(inode)))
1813 add_flags |= DCACHE_NEED_AUTOMOUNT;
1814 return add_flags;
1815}
1816
1817static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1818{
1819 unsigned add_flags = d_flags_for_inode(inode);
1820 WARN_ON(d_in_lookup(dentry));
1821
1822 spin_lock(&dentry->d_lock);
1823 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1824 raw_write_seqcount_begin(&dentry->d_seq);
1825 __d_set_inode_and_type(dentry, inode, add_flags);
1826 raw_write_seqcount_end(&dentry->d_seq);
1827 fsnotify_update_flags(dentry);
1828 spin_unlock(&dentry->d_lock);
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846void d_instantiate(struct dentry *entry, struct inode * inode)
1847{
1848 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1849 if (inode) {
1850 security_d_instantiate(entry, inode);
1851 spin_lock(&inode->i_lock);
1852 __d_instantiate(entry, inode);
1853 spin_unlock(&inode->i_lock);
1854 }
1855}
1856EXPORT_SYMBOL(d_instantiate);
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1868{
1869 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1870
1871 security_d_instantiate(entry, inode);
1872 spin_lock(&inode->i_lock);
1873 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1874 spin_unlock(&inode->i_lock);
1875 iput(inode);
1876 return -EBUSY;
1877 }
1878 __d_instantiate(entry, inode);
1879 spin_unlock(&inode->i_lock);
1880
1881 return 0;
1882}
1883EXPORT_SYMBOL(d_instantiate_no_diralias);
1884
1885struct dentry *d_make_root(struct inode *root_inode)
1886{
1887 struct dentry *res = NULL;
1888
1889 if (root_inode) {
1890 res = __d_alloc(root_inode->i_sb, NULL);
1891 if (res)
1892 d_instantiate(res, root_inode);
1893 else
1894 iput(root_inode);
1895 }
1896 return res;
1897}
1898EXPORT_SYMBOL(d_make_root);
1899
1900static struct dentry * __d_find_any_alias(struct inode *inode)
1901{
1902 struct dentry *alias;
1903
1904 if (hlist_empty(&inode->i_dentry))
1905 return NULL;
1906 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1907 __dget(alias);
1908 return alias;
1909}
1910
1911
1912
1913
1914
1915
1916
1917
1918struct dentry *d_find_any_alias(struct inode *inode)
1919{
1920 struct dentry *de;
1921
1922 spin_lock(&inode->i_lock);
1923 de = __d_find_any_alias(inode);
1924 spin_unlock(&inode->i_lock);
1925 return de;
1926}
1927EXPORT_SYMBOL(d_find_any_alias);
1928
1929static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
1930{
1931 struct dentry *tmp;
1932 struct dentry *res;
1933 unsigned add_flags;
1934
1935 if (!inode)
1936 return ERR_PTR(-ESTALE);
1937 if (IS_ERR(inode))
1938 return ERR_CAST(inode);
1939
1940 res = d_find_any_alias(inode);
1941 if (res)
1942 goto out_iput;
1943
1944 tmp = __d_alloc(inode->i_sb, NULL);
1945 if (!tmp) {
1946 res = ERR_PTR(-ENOMEM);
1947 goto out_iput;
1948 }
1949
1950 security_d_instantiate(tmp, inode);
1951 spin_lock(&inode->i_lock);
1952 res = __d_find_any_alias(inode);
1953 if (res) {
1954 spin_unlock(&inode->i_lock);
1955 dput(tmp);
1956 goto out_iput;
1957 }
1958
1959
1960 add_flags = d_flags_for_inode(inode);
1961
1962 if (disconnected)
1963 add_flags |= DCACHE_DISCONNECTED;
1964
1965 spin_lock(&tmp->d_lock);
1966 __d_set_inode_and_type(tmp, inode, add_flags);
1967 hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1968 hlist_bl_lock(&tmp->d_sb->s_anon);
1969 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1970 hlist_bl_unlock(&tmp->d_sb->s_anon);
1971 spin_unlock(&tmp->d_lock);
1972 spin_unlock(&inode->i_lock);
1973
1974 return tmp;
1975
1976 out_iput:
1977 iput(inode);
1978 return res;
1979}
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999struct dentry *d_obtain_alias(struct inode *inode)
2000{
2001 return __d_obtain_alias(inode, 1);
2002}
2003EXPORT_SYMBOL(d_obtain_alias);
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020struct dentry *d_obtain_root(struct inode *inode)
2021{
2022 return __d_obtain_alias(inode, 0);
2023}
2024EXPORT_SYMBOL(d_obtain_root);
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2043 struct qstr *name)
2044{
2045 struct dentry *found, *res;
2046
2047
2048
2049
2050
2051 found = d_hash_and_lookup(dentry->d_parent, name);
2052 if (found) {
2053 iput(inode);
2054 return found;
2055 }
2056 if (d_in_lookup(dentry)) {
2057 found = d_alloc_parallel(dentry->d_parent, name,
2058 dentry->d_wait);
2059 if (IS_ERR(found) || !d_in_lookup(found)) {
2060 iput(inode);
2061 return found;
2062 }
2063 } else {
2064 found = d_alloc(dentry->d_parent, name);
2065 if (!found) {
2066 iput(inode);
2067 return ERR_PTR(-ENOMEM);
2068 }
2069 }
2070 res = d_splice_alias(inode, found);
2071 if (res) {
2072 dput(found);
2073 return res;
2074 }
2075 return found;
2076}
2077EXPORT_SYMBOL(d_add_ci);
2078
2079
2080static inline bool d_same_name(const struct dentry *dentry,
2081 const struct dentry *parent,
2082 const struct qstr *name)
2083{
2084 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2085 if (dentry->d_name.len != name->len)
2086 return false;
2087 return dentry_cmp(dentry, name->name, name->len) == 0;
2088 }
2089 return parent->d_op->d_compare(dentry,
2090 dentry->d_name.len, dentry->d_name.name,
2091 name) == 0;
2092}
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123struct dentry *__d_lookup_rcu(const struct dentry *parent,
2124 const struct qstr *name,
2125 unsigned *seqp)
2126{
2127 u64 hashlen = name->hash_len;
2128 const unsigned char *str = name->name;
2129 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2130 struct hlist_bl_node *node;
2131 struct dentry *dentry;
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2154 unsigned seq;
2155
2156seqretry:
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174 seq = raw_seqcount_begin(&dentry->d_seq);
2175 if (dentry->d_parent != parent)
2176 continue;
2177 if (d_unhashed(dentry))
2178 continue;
2179
2180 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2181 int tlen;
2182 const char *tname;
2183 if (dentry->d_name.hash != hashlen_hash(hashlen))
2184 continue;
2185 tlen = dentry->d_name.len;
2186 tname = dentry->d_name.name;
2187
2188 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2189 cpu_relax();
2190 goto seqretry;
2191 }
2192 if (parent->d_op->d_compare(dentry,
2193 tlen, tname, name) != 0)
2194 continue;
2195 } else {
2196 if (dentry->d_name.hash_len != hashlen)
2197 continue;
2198 if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2199 continue;
2200 }
2201 *seqp = seq;
2202 return dentry;
2203 }
2204 return NULL;
2205}
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2219{
2220 struct dentry *dentry;
2221 unsigned seq;
2222
2223 do {
2224 seq = read_seqbegin(&rename_lock);
2225 dentry = __d_lookup(parent, name);
2226 if (dentry)
2227 break;
2228 } while (read_seqretry(&rename_lock, seq));
2229 return dentry;
2230}
2231EXPORT_SYMBOL(d_lookup);
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2249{
2250 unsigned int hash = name->hash;
2251 struct hlist_bl_head *b = d_hash(hash);
2252 struct hlist_bl_node *node;
2253 struct dentry *found = NULL;
2254 struct dentry *dentry;
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276 rcu_read_lock();
2277
2278 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2279
2280 if (dentry->d_name.hash != hash)
2281 continue;
2282
2283 spin_lock(&dentry->d_lock);
2284 if (dentry->d_parent != parent)
2285 goto next;
2286 if (d_unhashed(dentry))
2287 goto next;
2288
2289 if (!d_same_name(dentry, parent, name))
2290 goto next;
2291
2292 dentry->d_lockref.count++;
2293 found = dentry;
2294 spin_unlock(&dentry->d_lock);
2295 break;
2296next:
2297 spin_unlock(&dentry->d_lock);
2298 }
2299 rcu_read_unlock();
2300
2301 return found;
2302}
2303
2304
2305
2306
2307
2308
2309
2310
2311struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2312{
2313
2314
2315
2316
2317
2318 name->hash = full_name_hash(dir, name->name, name->len);
2319 if (dir->d_flags & DCACHE_OP_HASH) {
2320 int err = dir->d_op->d_hash(dir, name);
2321 if (unlikely(err < 0))
2322 return ERR_PTR(err);
2323 }
2324 return d_lookup(dir, name);
2325}
2326EXPORT_SYMBOL(d_hash_and_lookup);
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349void d_delete(struct dentry * dentry)
2350{
2351 struct inode *inode;
2352 int isdir = 0;
2353
2354
2355
2356again:
2357 spin_lock(&dentry->d_lock);
2358 inode = dentry->d_inode;
2359 isdir = S_ISDIR(inode->i_mode);
2360 if (dentry->d_lockref.count == 1) {
2361 if (!spin_trylock(&inode->i_lock)) {
2362 spin_unlock(&dentry->d_lock);
2363 cpu_relax();
2364 goto again;
2365 }
2366 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2367 dentry_unlink_inode(dentry);
2368 fsnotify_nameremove(dentry, isdir);
2369 return;
2370 }
2371
2372 if (!d_unhashed(dentry))
2373 __d_drop(dentry);
2374
2375 spin_unlock(&dentry->d_lock);
2376
2377 fsnotify_nameremove(dentry, isdir);
2378}
2379EXPORT_SYMBOL(d_delete);
2380
2381static void __d_rehash(struct dentry *entry)
2382{
2383 struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2384 BUG_ON(!d_unhashed(entry));
2385 hlist_bl_lock(b);
2386 hlist_bl_add_head_rcu(&entry->d_hash, b);
2387 hlist_bl_unlock(b);
2388}
2389
2390
2391
2392
2393
2394
2395
2396
2397void d_rehash(struct dentry * entry)
2398{
2399 spin_lock(&entry->d_lock);
2400 __d_rehash(entry);
2401 spin_unlock(&entry->d_lock);
2402}
2403EXPORT_SYMBOL(d_rehash);
2404
2405static inline unsigned start_dir_add(struct inode *dir)
2406{
2407
2408 for (;;) {
2409 unsigned n = dir->i_dir_seq;
2410 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2411 return n;
2412 cpu_relax();
2413 }
2414}
2415
2416static inline void end_dir_add(struct inode *dir, unsigned n)
2417{
2418 smp_store_release(&dir->i_dir_seq, n + 2);
2419}
2420
2421static void d_wait_lookup(struct dentry *dentry)
2422{
2423 if (d_in_lookup(dentry)) {
2424 DECLARE_WAITQUEUE(wait, current);
2425 add_wait_queue(dentry->d_wait, &wait);
2426 do {
2427 set_current_state(TASK_UNINTERRUPTIBLE);
2428 spin_unlock(&dentry->d_lock);
2429 schedule();
2430 spin_lock(&dentry->d_lock);
2431 } while (d_in_lookup(dentry));
2432 }
2433}
2434
2435struct dentry *d_alloc_parallel(struct dentry *parent,
2436 const struct qstr *name,
2437 wait_queue_head_t *wq)
2438{
2439 unsigned int hash = name->hash;
2440 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2441 struct hlist_bl_node *node;
2442 struct dentry *new = d_alloc(parent, name);
2443 struct dentry *dentry;
2444 unsigned seq, r_seq, d_seq;
2445
2446 if (unlikely(!new))
2447 return ERR_PTR(-ENOMEM);
2448
2449retry:
2450 rcu_read_lock();
2451 seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
2452 r_seq = read_seqbegin(&rename_lock);
2453 dentry = __d_lookup_rcu(parent, name, &d_seq);
2454 if (unlikely(dentry)) {
2455 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2456 rcu_read_unlock();
2457 goto retry;
2458 }
2459 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2460 rcu_read_unlock();
2461 dput(dentry);
2462 goto retry;
2463 }
2464 rcu_read_unlock();
2465 dput(new);
2466 return dentry;
2467 }
2468 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2469 rcu_read_unlock();
2470 goto retry;
2471 }
2472 hlist_bl_lock(b);
2473 if (unlikely(parent->d_inode->i_dir_seq != seq)) {
2474 hlist_bl_unlock(b);
2475 rcu_read_unlock();
2476 goto retry;
2477 }
2478
2479
2480
2481
2482
2483
2484
2485 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2486 if (dentry->d_name.hash != hash)
2487 continue;
2488 if (dentry->d_parent != parent)
2489 continue;
2490 if (!d_same_name(dentry, parent, name))
2491 continue;
2492 hlist_bl_unlock(b);
2493
2494 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2495 rcu_read_unlock();
2496 goto retry;
2497 }
2498
2499 rcu_read_unlock();
2500
2501
2502
2503
2504 spin_lock(&dentry->d_lock);
2505 d_wait_lookup(dentry);
2506
2507
2508
2509
2510
2511
2512 if (unlikely(dentry->d_name.hash != hash))
2513 goto mismatch;
2514 if (unlikely(dentry->d_parent != parent))
2515 goto mismatch;
2516 if (unlikely(d_unhashed(dentry)))
2517 goto mismatch;
2518 if (unlikely(!d_same_name(dentry, parent, name)))
2519 goto mismatch;
2520
2521 spin_unlock(&dentry->d_lock);
2522 dput(new);
2523 return dentry;
2524 }
2525 rcu_read_unlock();
2526
2527 new->d_flags |= DCACHE_PAR_LOOKUP;
2528 new->d_wait = wq;
2529 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2530 hlist_bl_unlock(b);
2531 return new;
2532mismatch:
2533 spin_unlock(&dentry->d_lock);
2534 dput(dentry);
2535 goto retry;
2536}
2537EXPORT_SYMBOL(d_alloc_parallel);
2538
2539void __d_lookup_done(struct dentry *dentry)
2540{
2541 struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2542 dentry->d_name.hash);
2543 hlist_bl_lock(b);
2544 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2545 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2546 wake_up_all(dentry->d_wait);
2547 dentry->d_wait = NULL;
2548 hlist_bl_unlock(b);
2549 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2550 INIT_LIST_HEAD(&dentry->d_lru);
2551}
2552EXPORT_SYMBOL(__d_lookup_done);
2553
2554
2555
2556static inline void __d_add(struct dentry *dentry, struct inode *inode)
2557{
2558 struct inode *dir = NULL;
2559 unsigned n;
2560 spin_lock(&dentry->d_lock);
2561 if (unlikely(d_in_lookup(dentry))) {
2562 dir = dentry->d_parent->d_inode;
2563 n = start_dir_add(dir);
2564 __d_lookup_done(dentry);
2565 }
2566 if (inode) {
2567 unsigned add_flags = d_flags_for_inode(inode);
2568 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2569 raw_write_seqcount_begin(&dentry->d_seq);
2570 __d_set_inode_and_type(dentry, inode, add_flags);
2571 raw_write_seqcount_end(&dentry->d_seq);
2572 fsnotify_update_flags(dentry);
2573 }
2574 __d_rehash(dentry);
2575 if (dir)
2576 end_dir_add(dir, n);
2577 spin_unlock(&dentry->d_lock);
2578 if (inode)
2579 spin_unlock(&inode->i_lock);
2580}
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591void d_add(struct dentry *entry, struct inode *inode)
2592{
2593 if (inode) {
2594 security_d_instantiate(entry, inode);
2595 spin_lock(&inode->i_lock);
2596 }
2597 __d_add(entry, inode);
2598}
2599EXPORT_SYMBOL(d_add);
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2613{
2614 struct dentry *alias;
2615 unsigned int hash = entry->d_name.hash;
2616
2617 spin_lock(&inode->i_lock);
2618 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2619
2620
2621
2622
2623
2624 if (alias->d_name.hash != hash)
2625 continue;
2626 if (alias->d_parent != entry->d_parent)
2627 continue;
2628 if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2629 continue;
2630 spin_lock(&alias->d_lock);
2631 if (!d_unhashed(alias)) {
2632 spin_unlock(&alias->d_lock);
2633 alias = NULL;
2634 } else {
2635 __dget_dlock(alias);
2636 __d_rehash(alias);
2637 spin_unlock(&alias->d_lock);
2638 }
2639 spin_unlock(&inode->i_lock);
2640 return alias;
2641 }
2642 spin_unlock(&inode->i_lock);
2643 return NULL;
2644}
2645EXPORT_SYMBOL(d_exact_alias);
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661void dentry_update_name_case(struct dentry *dentry, const struct qstr *name)
2662{
2663 BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
2664 BUG_ON(dentry->d_name.len != name->len);
2665
2666 spin_lock(&dentry->d_lock);
2667 write_seqcount_begin(&dentry->d_seq);
2668 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2669 write_seqcount_end(&dentry->d_seq);
2670 spin_unlock(&dentry->d_lock);
2671}
2672EXPORT_SYMBOL(dentry_update_name_case);
2673
2674static void swap_names(struct dentry *dentry, struct dentry *target)
2675{
2676 if (unlikely(dname_external(target))) {
2677 if (unlikely(dname_external(dentry))) {
2678
2679
2680
2681 swap(target->d_name.name, dentry->d_name.name);
2682 } else {
2683
2684
2685
2686
2687 memcpy(target->d_iname, dentry->d_name.name,
2688 dentry->d_name.len + 1);
2689 dentry->d_name.name = target->d_name.name;
2690 target->d_name.name = target->d_iname;
2691 }
2692 } else {
2693 if (unlikely(dname_external(dentry))) {
2694
2695
2696
2697
2698 memcpy(dentry->d_iname, target->d_name.name,
2699 target->d_name.len + 1);
2700 target->d_name.name = dentry->d_name.name;
2701 dentry->d_name.name = dentry->d_iname;
2702 } else {
2703
2704
2705
2706 unsigned int i;
2707 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2708 kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
2709 kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
2710 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2711 swap(((long *) &dentry->d_iname)[i],
2712 ((long *) &target->d_iname)[i]);
2713 }
2714 }
2715 }
2716 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2717}
2718
2719static void copy_name(struct dentry *dentry, struct dentry *target)
2720{
2721 struct external_name *old_name = NULL;
2722 if (unlikely(dname_external(dentry)))
2723 old_name = external_name(dentry);
2724 if (unlikely(dname_external(target))) {
2725 atomic_inc(&external_name(target)->u.count);
2726 dentry->d_name = target->d_name;
2727 } else {
2728 memcpy(dentry->d_iname, target->d_name.name,
2729 target->d_name.len + 1);
2730 dentry->d_name.name = dentry->d_iname;
2731 dentry->d_name.hash_len = target->d_name.hash_len;
2732 }
2733 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2734 kfree_rcu(old_name, u.head);
2735}
2736
2737static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2738{
2739
2740
2741
2742 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2743 spin_lock(&target->d_parent->d_lock);
2744 else {
2745 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2746 spin_lock(&dentry->d_parent->d_lock);
2747 spin_lock_nested(&target->d_parent->d_lock,
2748 DENTRY_D_LOCK_NESTED);
2749 } else {
2750 spin_lock(&target->d_parent->d_lock);
2751 spin_lock_nested(&dentry->d_parent->d_lock,
2752 DENTRY_D_LOCK_NESTED);
2753 }
2754 }
2755 if (target < dentry) {
2756 spin_lock_nested(&target->d_lock, 2);
2757 spin_lock_nested(&dentry->d_lock, 3);
2758 } else {
2759 spin_lock_nested(&dentry->d_lock, 2);
2760 spin_lock_nested(&target->d_lock, 3);
2761 }
2762}
2763
2764static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
2765{
2766 if (target->d_parent != dentry->d_parent)
2767 spin_unlock(&dentry->d_parent->d_lock);
2768 if (target->d_parent != target)
2769 spin_unlock(&target->d_parent->d_lock);
2770 spin_unlock(&target->d_lock);
2771 spin_unlock(&dentry->d_lock);
2772}
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799static void __d_move(struct dentry *dentry, struct dentry *target,
2800 bool exchange)
2801{
2802 struct inode *dir = NULL;
2803 unsigned n;
2804 if (!dentry->d_inode)
2805 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2806
2807 BUG_ON(d_ancestor(dentry, target));
2808 BUG_ON(d_ancestor(target, dentry));
2809
2810 dentry_lock_for_move(dentry, target);
2811 if (unlikely(d_in_lookup(target))) {
2812 dir = target->d_parent->d_inode;
2813 n = start_dir_add(dir);
2814 __d_lookup_done(target);
2815 }
2816
2817 write_seqcount_begin(&dentry->d_seq);
2818 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2819
2820
2821
2822 __d_drop(dentry);
2823 __d_drop(target);
2824
2825
2826 if (exchange)
2827 swap_names(dentry, target);
2828 else
2829 copy_name(dentry, target);
2830
2831
2832 __d_rehash(dentry);
2833 if (exchange)
2834 __d_rehash(target);
2835
2836
2837 if (IS_ROOT(dentry)) {
2838
2839 dentry->d_flags |= DCACHE_RCUACCESS;
2840 dentry->d_parent = target->d_parent;
2841 target->d_parent = target;
2842 list_del_init(&target->d_child);
2843 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2844 } else {
2845
2846 swap(dentry->d_parent, target->d_parent);
2847 list_move(&target->d_child, &target->d_parent->d_subdirs);
2848 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2849 if (exchange)
2850 fsnotify_update_flags(target);
2851 fsnotify_update_flags(dentry);
2852 }
2853
2854 write_seqcount_end(&target->d_seq);
2855 write_seqcount_end(&dentry->d_seq);
2856
2857 if (dir)
2858 end_dir_add(dir, n);
2859 dentry_unlock_for_move(dentry, target);
2860}
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871void d_move(struct dentry *dentry, struct dentry *target)
2872{
2873 write_seqlock(&rename_lock);
2874 __d_move(dentry, target, false);
2875 write_sequnlock(&rename_lock);
2876}
2877EXPORT_SYMBOL(d_move);
2878
2879
2880
2881
2882
2883
2884void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2885{
2886 write_seqlock(&rename_lock);
2887
2888 WARN_ON(!dentry1->d_inode);
2889 WARN_ON(!dentry2->d_inode);
2890 WARN_ON(IS_ROOT(dentry1));
2891 WARN_ON(IS_ROOT(dentry2));
2892
2893 __d_move(dentry1, dentry2, true);
2894
2895 write_sequnlock(&rename_lock);
2896}
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2907{
2908 struct dentry *p;
2909
2910 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2911 if (p->d_parent == p1)
2912 return p;
2913 }
2914 return NULL;
2915}
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926static int __d_unalias(struct inode *inode,
2927 struct dentry *dentry, struct dentry *alias)
2928{
2929 struct mutex *m1 = NULL;
2930 struct rw_semaphore *m2 = NULL;
2931 int ret = -ESTALE;
2932
2933
2934 if (alias->d_parent == dentry->d_parent)
2935 goto out_unalias;
2936
2937
2938 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2939 goto out_err;
2940 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2941 if (!inode_trylock_shared(alias->d_parent->d_inode))
2942 goto out_err;
2943 m2 = &alias->d_parent->d_inode->i_rwsem;
2944out_unalias:
2945 __d_move(alias, dentry, false);
2946 ret = 0;
2947out_err:
2948 if (m2)
2949 up_read(m2);
2950 if (m1)
2951 mutex_unlock(m1);
2952 return ret;
2953}
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2979{
2980 if (IS_ERR(inode))
2981 return ERR_CAST(inode);
2982
2983 BUG_ON(!d_unhashed(dentry));
2984
2985 if (!inode)
2986 goto out;
2987
2988 security_d_instantiate(dentry, inode);
2989 spin_lock(&inode->i_lock);
2990 if (S_ISDIR(inode->i_mode)) {
2991 struct dentry *new = __d_find_any_alias(inode);
2992 if (unlikely(new)) {
2993
2994 spin_unlock(&inode->i_lock);
2995 write_seqlock(&rename_lock);
2996 if (unlikely(d_ancestor(new, dentry))) {
2997 write_sequnlock(&rename_lock);
2998 dput(new);
2999 new = ERR_PTR(-ELOOP);
3000 pr_warn_ratelimited(
3001 "VFS: Lookup of '%s' in %s %s"
3002 " would have caused loop\n",
3003 dentry->d_name.name,
3004 inode->i_sb->s_type->name,
3005 inode->i_sb->s_id);
3006 } else if (!IS_ROOT(new)) {
3007 int err = __d_unalias(inode, dentry, new);
3008 write_sequnlock(&rename_lock);
3009 if (err) {
3010 dput(new);
3011 new = ERR_PTR(err);
3012 }
3013 } else {
3014 __d_move(new, dentry, false);
3015 write_sequnlock(&rename_lock);
3016 }
3017 iput(inode);
3018 return new;
3019 }
3020 }
3021out:
3022 __d_add(dentry, inode);
3023 return NULL;
3024}
3025EXPORT_SYMBOL(d_splice_alias);
3026
3027static int prepend(char **buffer, int *buflen, const char *str, int namelen)
3028{
3029 *buflen -= namelen;
3030 if (*buflen < 0)
3031 return -ENAMETOOLONG;
3032 *buffer -= namelen;
3033 memcpy(*buffer, str, namelen);
3034 return 0;
3035}
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
3056{
3057 const char *dname = ACCESS_ONCE(name->name);
3058 u32 dlen = ACCESS_ONCE(name->len);
3059 char *p;
3060
3061 smp_read_barrier_depends();
3062
3063 *buflen -= dlen + 1;
3064 if (*buflen < 0)
3065 return -ENAMETOOLONG;
3066 p = *buffer -= dlen + 1;
3067 *p++ = '/';
3068 while (dlen--) {
3069 char c = *dname++;
3070 if (!c)
3071 break;
3072 *p++ = c;
3073 }
3074 return 0;
3075}
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094static int prepend_path(const struct path *path,
3095 const struct path *root,
3096 char **buffer, int *buflen)
3097{
3098 struct dentry *dentry;
3099 struct vfsmount *vfsmnt;
3100 struct mount *mnt;
3101 int error = 0;
3102 unsigned seq, m_seq = 0;
3103 char *bptr;
3104 int blen;
3105
3106 rcu_read_lock();
3107restart_mnt:
3108 read_seqbegin_or_lock(&mount_lock, &m_seq);
3109 seq = 0;
3110 rcu_read_lock();
3111restart:
3112 bptr = *buffer;
3113 blen = *buflen;
3114 error = 0;
3115 dentry = path->dentry;
3116 vfsmnt = path->mnt;
3117 mnt = real_mount(vfsmnt);
3118 read_seqbegin_or_lock(&rename_lock, &seq);
3119 while (dentry != root->dentry || vfsmnt != root->mnt) {
3120 struct dentry * parent;
3121
3122 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
3123 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
3124
3125 if (dentry != vfsmnt->mnt_root) {
3126 bptr = *buffer;
3127 blen = *buflen;
3128 error = 3;
3129 break;
3130 }
3131
3132 if (mnt != parent) {
3133 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
3134 mnt = parent;
3135 vfsmnt = &mnt->mnt;
3136 continue;
3137 }
3138 if (!error)
3139 error = is_mounted(vfsmnt) ? 1 : 2;
3140 break;
3141 }
3142 parent = dentry->d_parent;
3143 prefetch(parent);
3144 error = prepend_name(&bptr, &blen, &dentry->d_name);
3145 if (error)
3146 break;
3147
3148 dentry = parent;
3149 }
3150 if (!(seq & 1))
3151 rcu_read_unlock();
3152 if (need_seqretry(&rename_lock, seq)) {
3153 seq = 1;
3154 goto restart;
3155 }
3156 done_seqretry(&rename_lock, seq);
3157
3158 if (!(m_seq & 1))
3159 rcu_read_unlock();
3160 if (need_seqretry(&mount_lock, m_seq)) {
3161 m_seq = 1;
3162 goto restart_mnt;
3163 }
3164 done_seqretry(&mount_lock, m_seq);
3165
3166 if (error >= 0 && bptr == *buffer) {
3167 if (--blen < 0)
3168 error = -ENAMETOOLONG;
3169 else
3170 *--bptr = '/';
3171 }
3172 *buffer = bptr;
3173 *buflen = blen;
3174 return error;
3175}
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193char *__d_path(const struct path *path,
3194 const struct path *root,
3195 char *buf, int buflen)
3196{
3197 char *res = buf + buflen;
3198 int error;
3199
3200 prepend(&res, &buflen, "\0", 1);
3201 error = prepend_path(path, root, &res, &buflen);
3202
3203 if (error < 0)
3204 return ERR_PTR(error);
3205 if (error > 0)
3206 return NULL;
3207 return res;
3208}
3209
3210char *d_absolute_path(const struct path *path,
3211 char *buf, int buflen)
3212{
3213 struct path root = {};
3214 char *res = buf + buflen;
3215 int error;
3216
3217 prepend(&res, &buflen, "\0", 1);
3218 error = prepend_path(path, &root, &res, &buflen);
3219
3220 if (error > 1)
3221 error = -EINVAL;
3222 if (error < 0)
3223 return ERR_PTR(error);
3224 return res;
3225}
3226
3227
3228
3229
3230static int path_with_deleted(const struct path *path,
3231 const struct path *root,
3232 char **buf, int *buflen)
3233{
3234 prepend(buf, buflen, "\0", 1);
3235 if (d_unlinked(path->dentry)) {
3236 int error = prepend(buf, buflen, " (deleted)", 10);
3237 if (error)
3238 return error;
3239 }
3240
3241 return prepend_path(path, root, buf, buflen);
3242}
3243
3244static int prepend_unreachable(char **buffer, int *buflen)
3245{
3246 return prepend(buffer, buflen, "(unreachable)", 13);
3247}
3248
3249static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3250{
3251 unsigned seq;
3252
3253 do {
3254 seq = read_seqcount_begin(&fs->seq);
3255 *root = fs->root;
3256 } while (read_seqcount_retry(&fs->seq, seq));
3257}
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275char *d_path(const struct path *path, char *buf, int buflen)
3276{
3277 char *res = buf + buflen;
3278 struct path root;
3279 int error;
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292 if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3293 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3294 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3295
3296 rcu_read_lock();
3297 get_fs_root_rcu(current->fs, &root);
3298 error = path_with_deleted(path, &root, &res, &buflen);
3299 rcu_read_unlock();
3300
3301 if (error < 0)
3302 res = ERR_PTR(error);
3303 return res;
3304}
3305EXPORT_SYMBOL(d_path);
3306
3307
3308
3309
3310char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3311 const char *fmt, ...)
3312{
3313 va_list args;
3314 char temp[64];
3315 int sz;
3316
3317 va_start(args, fmt);
3318 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3319 va_end(args);
3320
3321 if (sz > sizeof(temp) || sz > buflen)
3322 return ERR_PTR(-ENAMETOOLONG);
3323
3324 buffer += buflen - sz;
3325 return memcpy(buffer, temp, sz);
3326}
3327
3328char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3329{
3330 char *end = buffer + buflen;
3331
3332 if (prepend(&end, &buflen, " (deleted)", 11) ||
3333 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3334 prepend(&end, &buflen, "/", 1))
3335 end = ERR_PTR(-ENAMETOOLONG);
3336 return end;
3337}
3338EXPORT_SYMBOL(simple_dname);
3339
3340
3341
3342
3343static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3344{
3345 struct dentry *dentry;
3346 char *end, *retval;
3347 int len, seq = 0;
3348 int error = 0;
3349
3350 if (buflen < 2)
3351 goto Elong;
3352
3353 rcu_read_lock();
3354restart:
3355 dentry = d;
3356 end = buf + buflen;
3357 len = buflen;
3358 prepend(&end, &len, "\0", 1);
3359
3360 retval = end-1;
3361 *retval = '/';
3362 read_seqbegin_or_lock(&rename_lock, &seq);
3363 while (!IS_ROOT(dentry)) {
3364 struct dentry *parent = dentry->d_parent;
3365
3366 prefetch(parent);
3367 error = prepend_name(&end, &len, &dentry->d_name);
3368 if (error)
3369 break;
3370
3371 retval = end;
3372 dentry = parent;
3373 }
3374 if (!(seq & 1))
3375 rcu_read_unlock();
3376 if (need_seqretry(&rename_lock, seq)) {
3377 seq = 1;
3378 goto restart;
3379 }
3380 done_seqretry(&rename_lock, seq);
3381 if (error)
3382 goto Elong;
3383 return retval;
3384Elong:
3385 return ERR_PTR(-ENAMETOOLONG);
3386}
3387
3388char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3389{
3390 return __dentry_path(dentry, buf, buflen);
3391}
3392EXPORT_SYMBOL(dentry_path_raw);
3393
3394char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3395{
3396 char *p = NULL;
3397 char *retval;
3398
3399 if (d_unlinked(dentry)) {
3400 p = buf + buflen;
3401 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3402 goto Elong;
3403 buflen++;
3404 }
3405 retval = __dentry_path(dentry, buf, buflen);
3406 if (!IS_ERR(retval) && p)
3407 *p = '/';
3408 return retval;
3409Elong:
3410 return ERR_PTR(-ENAMETOOLONG);
3411}
3412
3413static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3414 struct path *pwd)
3415{
3416 unsigned seq;
3417
3418 do {
3419 seq = read_seqcount_begin(&fs->seq);
3420 *root = fs->root;
3421 *pwd = fs->pwd;
3422 } while (read_seqcount_retry(&fs->seq, seq));
3423}
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3444{
3445 int error;
3446 struct path pwd, root;
3447 char *page = __getname();
3448
3449 if (!page)
3450 return -ENOMEM;
3451
3452 rcu_read_lock();
3453 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3454
3455 error = -ENOENT;
3456 if (!d_unlinked(pwd.dentry)) {
3457 unsigned long len;
3458 char *cwd = page + PATH_MAX;
3459 int buflen = PATH_MAX;
3460
3461 prepend(&cwd, &buflen, "\0", 1);
3462 error = prepend_path(&pwd, &root, &cwd, &buflen);
3463 rcu_read_unlock();
3464
3465 if (error < 0)
3466 goto out;
3467
3468
3469 if (error > 0) {
3470 error = prepend_unreachable(&cwd, &buflen);
3471 if (error)
3472 goto out;
3473 }
3474
3475 error = -ERANGE;
3476 len = PATH_MAX + page - cwd;
3477 if (len <= size) {
3478 error = len;
3479 if (copy_to_user(buf, cwd, len))
3480 error = -EFAULT;
3481 }
3482 } else {
3483 rcu_read_unlock();
3484 }
3485
3486out:
3487 __putname(page);
3488 return error;
3489}
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3508{
3509 bool result;
3510 unsigned seq;
3511
3512 if (new_dentry == old_dentry)
3513 return true;
3514
3515 do {
3516
3517 seq = read_seqbegin(&rename_lock);
3518
3519
3520
3521
3522 rcu_read_lock();
3523 if (d_ancestor(old_dentry, new_dentry))
3524 result = true;
3525 else
3526 result = false;
3527 rcu_read_unlock();
3528 } while (read_seqretry(&rename_lock, seq));
3529
3530 return result;
3531}
3532
3533static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3534{
3535 struct dentry *root = data;
3536 if (dentry != root) {
3537 if (d_unhashed(dentry) || !dentry->d_inode)
3538 return D_WALK_SKIP;
3539
3540 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3541 dentry->d_flags |= DCACHE_GENOCIDE;
3542 dentry->d_lockref.count--;
3543 }
3544 }
3545 return D_WALK_CONTINUE;
3546}
3547
3548void d_genocide(struct dentry *parent)
3549{
3550 d_walk(parent, parent, d_genocide_kill, NULL);
3551}
3552
3553void d_tmpfile(struct dentry *dentry, struct inode *inode)
3554{
3555 inode_dec_link_count(inode);
3556 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3557 !hlist_unhashed(&dentry->d_u.d_alias) ||
3558 !d_unlinked(dentry));
3559 spin_lock(&dentry->d_parent->d_lock);
3560 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3561 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3562 (unsigned long long)inode->i_ino);
3563 spin_unlock(&dentry->d_lock);
3564 spin_unlock(&dentry->d_parent->d_lock);
3565 d_instantiate(dentry, inode);
3566}
3567EXPORT_SYMBOL(d_tmpfile);
3568
3569static __initdata unsigned long dhash_entries;
3570static int __init set_dhash_entries(char *str)
3571{
3572 if (!str)
3573 return 0;
3574 dhash_entries = simple_strtoul(str, &str, 0);
3575 return 1;
3576}
3577__setup("dhash_entries=", set_dhash_entries);
3578
3579static void __init dcache_init_early(void)
3580{
3581
3582
3583
3584 if (hashdist)
3585 return;
3586
3587 dentry_hashtable =
3588 alloc_large_system_hash("Dentry cache",
3589 sizeof(struct hlist_bl_head),
3590 dhash_entries,
3591 13,
3592 HASH_EARLY | HASH_ZERO,
3593 &d_hash_shift,
3594 &d_hash_mask,
3595 0,
3596 0);
3597}
3598
3599static void __init dcache_init(void)
3600{
3601
3602
3603
3604
3605
3606 dentry_cache = KMEM_CACHE(dentry,
3607 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT);
3608
3609
3610 if (!hashdist)
3611 return;
3612
3613 dentry_hashtable =
3614 alloc_large_system_hash("Dentry cache",
3615 sizeof(struct hlist_bl_head),
3616 dhash_entries,
3617 13,
3618 HASH_ZERO,
3619 &d_hash_shift,
3620 &d_hash_mask,
3621 0,
3622 0);
3623}
3624
3625
3626struct kmem_cache *names_cachep __read_mostly;
3627EXPORT_SYMBOL(names_cachep);
3628
3629EXPORT_SYMBOL(d_genocide);
3630
3631void __init vfs_caches_init_early(void)
3632{
3633 int i;
3634
3635 for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3636 INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3637
3638 dcache_init_early();
3639 inode_init_early();
3640}
3641
3642void __init vfs_caches_init(void)
3643{
3644 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3645 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3646
3647 dcache_init();
3648 inode_init();
3649 files_init();
3650 files_maxfiles_init();
3651 mnt_init();
3652 bdev_cache_init();
3653 chrdev_init();
3654}
3655