1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/ratelimit.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/fs.h>
22#include <linux/fscrypt.h>
23#include <linux/fsnotify.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/hash.h>
27#include <linux/cache.h>
28#include <linux/export.h>
29#include <linux/security.h>
30#include <linux/seqlock.h>
31#include <linux/memblock.h>
32#include <linux/bit_spinlock.h>
33#include <linux/rculist_bl.h>
34#include <linux/list_lru.h>
35#include "internal.h"
36#include "mount.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74int sysctl_vfs_cache_pressure __read_mostly = 100;
75EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
76
77__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
78
79EXPORT_SYMBOL(rename_lock);
80
81static struct kmem_cache *dentry_cache __read_mostly;
82
83const struct qstr empty_name = QSTR_INIT("", 0);
84EXPORT_SYMBOL(empty_name);
85const struct qstr slash_name = QSTR_INIT("/", 1);
86EXPORT_SYMBOL(slash_name);
87
88
89
90
91
92
93
94
95
96
97static unsigned int d_hash_shift __read_mostly;
98
99static struct hlist_bl_head *dentry_hashtable __read_mostly;
100
101static inline struct hlist_bl_head *d_hash(unsigned int hash)
102{
103 return dentry_hashtable + (hash >> d_hash_shift);
104}
105
106#define IN_LOOKUP_SHIFT 10
107static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
108
109static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
110 unsigned int hash)
111{
112 hash += (unsigned long) parent / L1_CACHE_BYTES;
113 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
114}
115
116
117
118struct dentry_stat_t dentry_stat = {
119 .age_limit = 45,
120};
121
122static DEFINE_PER_CPU(long, nr_dentry);
123static DEFINE_PER_CPU(long, nr_dentry_unused);
124static DEFINE_PER_CPU(long, nr_dentry_negative);
125
126#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
127
128
129
130
131
132
133
134
135
136
137
138
139
140static long get_nr_dentry(void)
141{
142 int i;
143 long sum = 0;
144 for_each_possible_cpu(i)
145 sum += per_cpu(nr_dentry, i);
146 return sum < 0 ? 0 : sum;
147}
148
149static long get_nr_dentry_unused(void)
150{
151 int i;
152 long sum = 0;
153 for_each_possible_cpu(i)
154 sum += per_cpu(nr_dentry_unused, i);
155 return sum < 0 ? 0 : sum;
156}
157
158static long get_nr_dentry_negative(void)
159{
160 int i;
161 long sum = 0;
162
163 for_each_possible_cpu(i)
164 sum += per_cpu(nr_dentry_negative, i);
165 return sum < 0 ? 0 : sum;
166}
167
168int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
169 size_t *lenp, loff_t *ppos)
170{
171 dentry_stat.nr_dentry = get_nr_dentry();
172 dentry_stat.nr_unused = get_nr_dentry_unused();
173 dentry_stat.nr_negative = get_nr_dentry_negative();
174 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
175}
176#endif
177
178
179
180
181
182#ifdef CONFIG_DCACHE_WORD_ACCESS
183
184#include <asm/word-at-a-time.h>
185
186
187
188
189
190
191
192
193
194static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
195{
196 unsigned long a,b,mask;
197
198 for (;;) {
199 a = read_word_at_a_time(cs);
200 b = load_unaligned_zeropad(ct);
201 if (tcount < sizeof(unsigned long))
202 break;
203 if (unlikely(a != b))
204 return 1;
205 cs += sizeof(unsigned long);
206 ct += sizeof(unsigned long);
207 tcount -= sizeof(unsigned long);
208 if (!tcount)
209 return 0;
210 }
211 mask = bytemask_from_count(tcount);
212 return unlikely(!!((a ^ b) & mask));
213}
214
215#else
216
217static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
218{
219 do {
220 if (*cs != *ct)
221 return 1;
222 cs++;
223 ct++;
224 tcount--;
225 } while (tcount);
226 return 0;
227}
228
229#endif
230
231static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
232{
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249 const unsigned char *cs = READ_ONCE(dentry->d_name.name);
250
251 return dentry_string_cmp(cs, ct, tcount);
252}
253
254struct external_name {
255 union {
256 atomic_t count;
257 struct rcu_head head;
258 } u;
259 unsigned char name[];
260};
261
262static inline struct external_name *external_name(struct dentry *dentry)
263{
264 return container_of(dentry->d_name.name, struct external_name, name[0]);
265}
266
267static void __d_free(struct rcu_head *head)
268{
269 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
270
271 kmem_cache_free(dentry_cache, dentry);
272}
273
274static void __d_free_external(struct rcu_head *head)
275{
276 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
277 kfree(external_name(dentry));
278 kmem_cache_free(dentry_cache, dentry);
279}
280
281static inline int dname_external(const struct dentry *dentry)
282{
283 return dentry->d_name.name != dentry->d_iname;
284}
285
286void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
287{
288 spin_lock(&dentry->d_lock);
289 name->name = dentry->d_name;
290 if (unlikely(dname_external(dentry))) {
291 atomic_inc(&external_name(dentry)->u.count);
292 } else {
293 memcpy(name->inline_name, dentry->d_iname,
294 dentry->d_name.len + 1);
295 name->name.name = name->inline_name;
296 }
297 spin_unlock(&dentry->d_lock);
298}
299EXPORT_SYMBOL(take_dentry_name_snapshot);
300
301void release_dentry_name_snapshot(struct name_snapshot *name)
302{
303 if (unlikely(name->name.name != name->inline_name)) {
304 struct external_name *p;
305 p = container_of(name->name.name, struct external_name, name[0]);
306 if (unlikely(atomic_dec_and_test(&p->u.count)))
307 kfree_rcu(p, u.head);
308 }
309}
310EXPORT_SYMBOL(release_dentry_name_snapshot);
311
312static inline void __d_set_inode_and_type(struct dentry *dentry,
313 struct inode *inode,
314 unsigned type_flags)
315{
316 unsigned flags;
317
318 dentry->d_inode = inode;
319 flags = READ_ONCE(dentry->d_flags);
320 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
321 flags |= type_flags;
322 WRITE_ONCE(dentry->d_flags, flags);
323}
324
325static inline void __d_clear_type_and_inode(struct dentry *dentry)
326{
327 unsigned flags = READ_ONCE(dentry->d_flags);
328
329 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
330 WRITE_ONCE(dentry->d_flags, flags);
331 dentry->d_inode = NULL;
332 if (dentry->d_flags & DCACHE_LRU_LIST)
333 this_cpu_inc(nr_dentry_negative);
334}
335
336static void dentry_free(struct dentry *dentry)
337{
338 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
339 if (unlikely(dname_external(dentry))) {
340 struct external_name *p = external_name(dentry);
341 if (likely(atomic_dec_and_test(&p->u.count))) {
342 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
343 return;
344 }
345 }
346
347 if (dentry->d_flags & DCACHE_NORCU)
348 __d_free(&dentry->d_u.d_rcu);
349 else
350 call_rcu(&dentry->d_u.d_rcu, __d_free);
351}
352
353
354
355
356
357static void dentry_unlink_inode(struct dentry * dentry)
358 __releases(dentry->d_lock)
359 __releases(dentry->d_inode->i_lock)
360{
361 struct inode *inode = dentry->d_inode;
362
363 raw_write_seqcount_begin(&dentry->d_seq);
364 __d_clear_type_and_inode(dentry);
365 hlist_del_init(&dentry->d_u.d_alias);
366 raw_write_seqcount_end(&dentry->d_seq);
367 spin_unlock(&dentry->d_lock);
368 spin_unlock(&inode->i_lock);
369 if (!inode->i_nlink)
370 fsnotify_inoderemove(inode);
371 if (dentry->d_op && dentry->d_op->d_iput)
372 dentry->d_op->d_iput(dentry, inode);
373 else
374 iput(inode);
375}
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
397static void d_lru_add(struct dentry *dentry)
398{
399 D_FLAG_VERIFY(dentry, 0);
400 dentry->d_flags |= DCACHE_LRU_LIST;
401 this_cpu_inc(nr_dentry_unused);
402 if (d_is_negative(dentry))
403 this_cpu_inc(nr_dentry_negative);
404 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
405}
406
407static void d_lru_del(struct dentry *dentry)
408{
409 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
410 dentry->d_flags &= ~DCACHE_LRU_LIST;
411 this_cpu_dec(nr_dentry_unused);
412 if (d_is_negative(dentry))
413 this_cpu_dec(nr_dentry_negative);
414 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
415}
416
417static void d_shrink_del(struct dentry *dentry)
418{
419 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
420 list_del_init(&dentry->d_lru);
421 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
422 this_cpu_dec(nr_dentry_unused);
423}
424
425static void d_shrink_add(struct dentry *dentry, struct list_head *list)
426{
427 D_FLAG_VERIFY(dentry, 0);
428 list_add(&dentry->d_lru, list);
429 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
430 this_cpu_inc(nr_dentry_unused);
431}
432
433
434
435
436
437
438
439static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
440{
441 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
442 dentry->d_flags &= ~DCACHE_LRU_LIST;
443 this_cpu_dec(nr_dentry_unused);
444 if (d_is_negative(dentry))
445 this_cpu_dec(nr_dentry_negative);
446 list_lru_isolate(lru, &dentry->d_lru);
447}
448
449static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
450 struct list_head *list)
451{
452 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
453 dentry->d_flags |= DCACHE_SHRINK_LIST;
454 if (d_is_negative(dentry))
455 this_cpu_dec(nr_dentry_negative);
456 list_lru_isolate_move(lru, &dentry->d_lru, list);
457}
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476static void ___d_drop(struct dentry *dentry)
477{
478 struct hlist_bl_head *b;
479
480
481
482
483
484 if (unlikely(IS_ROOT(dentry)))
485 b = &dentry->d_sb->s_roots;
486 else
487 b = d_hash(dentry->d_name.hash);
488
489 hlist_bl_lock(b);
490 __hlist_bl_del(&dentry->d_hash);
491 hlist_bl_unlock(b);
492}
493
494void __d_drop(struct dentry *dentry)
495{
496 if (!d_unhashed(dentry)) {
497 ___d_drop(dentry);
498 dentry->d_hash.pprev = NULL;
499 write_seqcount_invalidate(&dentry->d_seq);
500 }
501}
502EXPORT_SYMBOL(__d_drop);
503
504void d_drop(struct dentry *dentry)
505{
506 spin_lock(&dentry->d_lock);
507 __d_drop(dentry);
508 spin_unlock(&dentry->d_lock);
509}
510EXPORT_SYMBOL(d_drop);
511
512static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
513{
514 struct dentry *next;
515
516
517
518
519 dentry->d_flags |= DCACHE_DENTRY_KILLED;
520 if (unlikely(list_empty(&dentry->d_child)))
521 return;
522 __list_del_entry(&dentry->d_child);
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542 while (dentry->d_child.next != &parent->d_subdirs) {
543 next = list_entry(dentry->d_child.next, struct dentry, d_child);
544 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
545 break;
546 dentry->d_child.next = next->d_child.next;
547 }
548}
549
550static void __dentry_kill(struct dentry *dentry)
551{
552 struct dentry *parent = NULL;
553 bool can_free = true;
554 if (!IS_ROOT(dentry))
555 parent = dentry->d_parent;
556
557
558
559
560 lockref_mark_dead(&dentry->d_lockref);
561
562
563
564
565
566 if (dentry->d_flags & DCACHE_OP_PRUNE)
567 dentry->d_op->d_prune(dentry);
568
569 if (dentry->d_flags & DCACHE_LRU_LIST) {
570 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
571 d_lru_del(dentry);
572 }
573
574 __d_drop(dentry);
575 dentry_unlist(dentry, parent);
576 if (parent)
577 spin_unlock(&parent->d_lock);
578 if (dentry->d_inode)
579 dentry_unlink_inode(dentry);
580 else
581 spin_unlock(&dentry->d_lock);
582 this_cpu_dec(nr_dentry);
583 if (dentry->d_op && dentry->d_op->d_release)
584 dentry->d_op->d_release(dentry);
585
586 spin_lock(&dentry->d_lock);
587 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
588 dentry->d_flags |= DCACHE_MAY_FREE;
589 can_free = false;
590 }
591 spin_unlock(&dentry->d_lock);
592 if (likely(can_free))
593 dentry_free(dentry);
594 cond_resched();
595}
596
597static struct dentry *__lock_parent(struct dentry *dentry)
598{
599 struct dentry *parent;
600 rcu_read_lock();
601 spin_unlock(&dentry->d_lock);
602again:
603 parent = READ_ONCE(dentry->d_parent);
604 spin_lock(&parent->d_lock);
605
606
607
608
609
610
611
612
613 if (unlikely(parent != dentry->d_parent)) {
614 spin_unlock(&parent->d_lock);
615 goto again;
616 }
617 rcu_read_unlock();
618 if (parent != dentry)
619 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
620 else
621 parent = NULL;
622 return parent;
623}
624
625static inline struct dentry *lock_parent(struct dentry *dentry)
626{
627 struct dentry *parent = dentry->d_parent;
628 if (IS_ROOT(dentry))
629 return NULL;
630 if (likely(spin_trylock(&parent->d_lock)))
631 return parent;
632 return __lock_parent(dentry);
633}
634
635static inline bool retain_dentry(struct dentry *dentry)
636{
637 WARN_ON(d_in_lookup(dentry));
638
639
640 if (unlikely(d_unhashed(dentry)))
641 return false;
642
643 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
644 return false;
645
646 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
647 if (dentry->d_op->d_delete(dentry))
648 return false;
649 }
650
651 dentry->d_lockref.count--;
652 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
653 d_lru_add(dentry);
654 else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
655 dentry->d_flags |= DCACHE_REFERENCED;
656 return true;
657}
658
659
660
661
662
663
664static struct dentry *dentry_kill(struct dentry *dentry)
665 __releases(dentry->d_lock)
666{
667 struct inode *inode = dentry->d_inode;
668 struct dentry *parent = NULL;
669
670 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
671 goto slow_positive;
672
673 if (!IS_ROOT(dentry)) {
674 parent = dentry->d_parent;
675 if (unlikely(!spin_trylock(&parent->d_lock))) {
676 parent = __lock_parent(dentry);
677 if (likely(inode || !dentry->d_inode))
678 goto got_locks;
679
680 if (parent)
681 spin_unlock(&parent->d_lock);
682 inode = dentry->d_inode;
683 goto slow_positive;
684 }
685 }
686 __dentry_kill(dentry);
687 return parent;
688
689slow_positive:
690 spin_unlock(&dentry->d_lock);
691 spin_lock(&inode->i_lock);
692 spin_lock(&dentry->d_lock);
693 parent = lock_parent(dentry);
694got_locks:
695 if (unlikely(dentry->d_lockref.count != 1)) {
696 dentry->d_lockref.count--;
697 } else if (likely(!retain_dentry(dentry))) {
698 __dentry_kill(dentry);
699 return parent;
700 }
701
702 if (inode)
703 spin_unlock(&inode->i_lock);
704 if (parent)
705 spin_unlock(&parent->d_lock);
706 spin_unlock(&dentry->d_lock);
707 return NULL;
708}
709
710
711
712
713
714
715
716
717
718static inline bool fast_dput(struct dentry *dentry)
719{
720 int ret;
721 unsigned int d_flags;
722
723
724
725
726
727 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
728 return lockref_put_or_lock(&dentry->d_lockref);
729
730
731
732
733
734 ret = lockref_put_return(&dentry->d_lockref);
735
736
737
738
739
740
741 if (unlikely(ret < 0)) {
742 spin_lock(&dentry->d_lock);
743 if (dentry->d_lockref.count > 1) {
744 dentry->d_lockref.count--;
745 spin_unlock(&dentry->d_lock);
746 return true;
747 }
748 return false;
749 }
750
751
752
753
754 if (ret)
755 return true;
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778 smp_rmb();
779 d_flags = READ_ONCE(dentry->d_flags);
780 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
781
782
783 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
784 return true;
785
786
787
788
789
790
791 spin_lock(&dentry->d_lock);
792
793
794
795
796
797
798
799 if (dentry->d_lockref.count) {
800 spin_unlock(&dentry->d_lock);
801 return true;
802 }
803
804
805
806
807
808
809 dentry->d_lockref.count = 1;
810 return false;
811}
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840void dput(struct dentry *dentry)
841{
842 while (dentry) {
843 might_sleep();
844
845 rcu_read_lock();
846 if (likely(fast_dput(dentry))) {
847 rcu_read_unlock();
848 return;
849 }
850
851
852 rcu_read_unlock();
853
854 if (likely(retain_dentry(dentry))) {
855 spin_unlock(&dentry->d_lock);
856 return;
857 }
858
859 dentry = dentry_kill(dentry);
860 }
861}
862EXPORT_SYMBOL(dput);
863
864static void __dput_to_list(struct dentry *dentry, struct list_head *list)
865__must_hold(&dentry->d_lock)
866{
867 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
868
869 --dentry->d_lockref.count;
870 } else {
871 if (dentry->d_flags & DCACHE_LRU_LIST)
872 d_lru_del(dentry);
873 if (!--dentry->d_lockref.count)
874 d_shrink_add(dentry, list);
875 }
876}
877
878void dput_to_list(struct dentry *dentry, struct list_head *list)
879{
880 rcu_read_lock();
881 if (likely(fast_dput(dentry))) {
882 rcu_read_unlock();
883 return;
884 }
885 rcu_read_unlock();
886 if (!retain_dentry(dentry))
887 __dput_to_list(dentry, list);
888 spin_unlock(&dentry->d_lock);
889}
890
891
892static inline void __dget_dlock(struct dentry *dentry)
893{
894 dentry->d_lockref.count++;
895}
896
897static inline void __dget(struct dentry *dentry)
898{
899 lockref_get(&dentry->d_lockref);
900}
901
902struct dentry *dget_parent(struct dentry *dentry)
903{
904 int gotref;
905 struct dentry *ret;
906
907
908
909
910
911 rcu_read_lock();
912 ret = READ_ONCE(dentry->d_parent);
913 gotref = lockref_get_not_zero(&ret->d_lockref);
914 rcu_read_unlock();
915 if (likely(gotref)) {
916 if (likely(ret == READ_ONCE(dentry->d_parent)))
917 return ret;
918 dput(ret);
919 }
920
921repeat:
922
923
924
925
926 rcu_read_lock();
927 ret = dentry->d_parent;
928 spin_lock(&ret->d_lock);
929 if (unlikely(ret != dentry->d_parent)) {
930 spin_unlock(&ret->d_lock);
931 rcu_read_unlock();
932 goto repeat;
933 }
934 rcu_read_unlock();
935 BUG_ON(!ret->d_lockref.count);
936 ret->d_lockref.count++;
937 spin_unlock(&ret->d_lock);
938 return ret;
939}
940EXPORT_SYMBOL(dget_parent);
941
942static struct dentry * __d_find_any_alias(struct inode *inode)
943{
944 struct dentry *alias;
945
946 if (hlist_empty(&inode->i_dentry))
947 return NULL;
948 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
949 __dget(alias);
950 return alias;
951}
952
953
954
955
956
957
958
959
960struct dentry *d_find_any_alias(struct inode *inode)
961{
962 struct dentry *de;
963
964 spin_lock(&inode->i_lock);
965 de = __d_find_any_alias(inode);
966 spin_unlock(&inode->i_lock);
967 return de;
968}
969EXPORT_SYMBOL(d_find_any_alias);
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985static struct dentry *__d_find_alias(struct inode *inode)
986{
987 struct dentry *alias;
988
989 if (S_ISDIR(inode->i_mode))
990 return __d_find_any_alias(inode);
991
992 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
993 spin_lock(&alias->d_lock);
994 if (!d_unhashed(alias)) {
995 __dget_dlock(alias);
996 spin_unlock(&alias->d_lock);
997 return alias;
998 }
999 spin_unlock(&alias->d_lock);
1000 }
1001 return NULL;
1002}
1003
1004struct dentry *d_find_alias(struct inode *inode)
1005{
1006 struct dentry *de = NULL;
1007
1008 if (!hlist_empty(&inode->i_dentry)) {
1009 spin_lock(&inode->i_lock);
1010 de = __d_find_alias(inode);
1011 spin_unlock(&inode->i_lock);
1012 }
1013 return de;
1014}
1015EXPORT_SYMBOL(d_find_alias);
1016
1017
1018
1019
1020
1021void d_prune_aliases(struct inode *inode)
1022{
1023 struct dentry *dentry;
1024restart:
1025 spin_lock(&inode->i_lock);
1026 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1027 spin_lock(&dentry->d_lock);
1028 if (!dentry->d_lockref.count) {
1029 struct dentry *parent = lock_parent(dentry);
1030 if (likely(!dentry->d_lockref.count)) {
1031 __dentry_kill(dentry);
1032 dput(parent);
1033 goto restart;
1034 }
1035 if (parent)
1036 spin_unlock(&parent->d_lock);
1037 }
1038 spin_unlock(&dentry->d_lock);
1039 }
1040 spin_unlock(&inode->i_lock);
1041}
1042EXPORT_SYMBOL(d_prune_aliases);
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055static bool shrink_lock_dentry(struct dentry *dentry)
1056{
1057 struct inode *inode;
1058 struct dentry *parent;
1059
1060 if (dentry->d_lockref.count)
1061 return false;
1062
1063 inode = dentry->d_inode;
1064 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1065 spin_unlock(&dentry->d_lock);
1066 spin_lock(&inode->i_lock);
1067 spin_lock(&dentry->d_lock);
1068 if (unlikely(dentry->d_lockref.count))
1069 goto out;
1070
1071 if (unlikely(inode != dentry->d_inode))
1072 goto out;
1073 }
1074
1075 parent = dentry->d_parent;
1076 if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
1077 return true;
1078
1079 spin_unlock(&dentry->d_lock);
1080 spin_lock(&parent->d_lock);
1081 if (unlikely(parent != dentry->d_parent)) {
1082 spin_unlock(&parent->d_lock);
1083 spin_lock(&dentry->d_lock);
1084 goto out;
1085 }
1086 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1087 if (likely(!dentry->d_lockref.count))
1088 return true;
1089 spin_unlock(&parent->d_lock);
1090out:
1091 if (inode)
1092 spin_unlock(&inode->i_lock);
1093 return false;
1094}
1095
1096void shrink_dentry_list(struct list_head *list)
1097{
1098 while (!list_empty(list)) {
1099 struct dentry *dentry, *parent;
1100
1101 dentry = list_entry(list->prev, struct dentry, d_lru);
1102 spin_lock(&dentry->d_lock);
1103 rcu_read_lock();
1104 if (!shrink_lock_dentry(dentry)) {
1105 bool can_free = false;
1106 rcu_read_unlock();
1107 d_shrink_del(dentry);
1108 if (dentry->d_lockref.count < 0)
1109 can_free = dentry->d_flags & DCACHE_MAY_FREE;
1110 spin_unlock(&dentry->d_lock);
1111 if (can_free)
1112 dentry_free(dentry);
1113 continue;
1114 }
1115 rcu_read_unlock();
1116 d_shrink_del(dentry);
1117 parent = dentry->d_parent;
1118 if (parent != dentry)
1119 __dput_to_list(parent, list);
1120 __dentry_kill(dentry);
1121 }
1122}
1123
1124static enum lru_status dentry_lru_isolate(struct list_head *item,
1125 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1126{
1127 struct list_head *freeable = arg;
1128 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1129
1130
1131
1132
1133
1134
1135
1136 if (!spin_trylock(&dentry->d_lock))
1137 return LRU_SKIP;
1138
1139
1140
1141
1142
1143
1144 if (dentry->d_lockref.count) {
1145 d_lru_isolate(lru, dentry);
1146 spin_unlock(&dentry->d_lock);
1147 return LRU_REMOVED;
1148 }
1149
1150 if (dentry->d_flags & DCACHE_REFERENCED) {
1151 dentry->d_flags &= ~DCACHE_REFERENCED;
1152 spin_unlock(&dentry->d_lock);
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 return LRU_ROTATE;
1174 }
1175
1176 d_lru_shrink_move(lru, dentry, freeable);
1177 spin_unlock(&dentry->d_lock);
1178
1179 return LRU_REMOVED;
1180}
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1195{
1196 LIST_HEAD(dispose);
1197 long freed;
1198
1199 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1200 dentry_lru_isolate, &dispose);
1201 shrink_dentry_list(&dispose);
1202 return freed;
1203}
1204
1205static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1206 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1207{
1208 struct list_head *freeable = arg;
1209 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1210
1211
1212
1213
1214
1215
1216 if (!spin_trylock(&dentry->d_lock))
1217 return LRU_SKIP;
1218
1219 d_lru_shrink_move(lru, dentry, freeable);
1220 spin_unlock(&dentry->d_lock);
1221
1222 return LRU_REMOVED;
1223}
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233void shrink_dcache_sb(struct super_block *sb)
1234{
1235 do {
1236 LIST_HEAD(dispose);
1237
1238 list_lru_walk(&sb->s_dentry_lru,
1239 dentry_lru_isolate_shrink, &dispose, 1024);
1240 shrink_dentry_list(&dispose);
1241 } while (list_lru_count(&sb->s_dentry_lru) > 0);
1242}
1243EXPORT_SYMBOL(shrink_dcache_sb);
1244
1245
1246
1247
1248
1249
1250
1251
1252enum d_walk_ret {
1253 D_WALK_CONTINUE,
1254 D_WALK_QUIT,
1255 D_WALK_NORETRY,
1256 D_WALK_SKIP,
1257};
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267static void d_walk(struct dentry *parent, void *data,
1268 enum d_walk_ret (*enter)(void *, struct dentry *))
1269{
1270 struct dentry *this_parent;
1271 struct list_head *next;
1272 unsigned seq = 0;
1273 enum d_walk_ret ret;
1274 bool retry = true;
1275
1276again:
1277 read_seqbegin_or_lock(&rename_lock, &seq);
1278 this_parent = parent;
1279 spin_lock(&this_parent->d_lock);
1280
1281 ret = enter(data, this_parent);
1282 switch (ret) {
1283 case D_WALK_CONTINUE:
1284 break;
1285 case D_WALK_QUIT:
1286 case D_WALK_SKIP:
1287 goto out_unlock;
1288 case D_WALK_NORETRY:
1289 retry = false;
1290 break;
1291 }
1292repeat:
1293 next = this_parent->d_subdirs.next;
1294resume:
1295 while (next != &this_parent->d_subdirs) {
1296 struct list_head *tmp = next;
1297 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1298 next = tmp->next;
1299
1300 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1301 continue;
1302
1303 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1304
1305 ret = enter(data, dentry);
1306 switch (ret) {
1307 case D_WALK_CONTINUE:
1308 break;
1309 case D_WALK_QUIT:
1310 spin_unlock(&dentry->d_lock);
1311 goto out_unlock;
1312 case D_WALK_NORETRY:
1313 retry = false;
1314 break;
1315 case D_WALK_SKIP:
1316 spin_unlock(&dentry->d_lock);
1317 continue;
1318 }
1319
1320 if (!list_empty(&dentry->d_subdirs)) {
1321 spin_unlock(&this_parent->d_lock);
1322 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1323 this_parent = dentry;
1324 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1325 goto repeat;
1326 }
1327 spin_unlock(&dentry->d_lock);
1328 }
1329
1330
1331
1332 rcu_read_lock();
1333ascend:
1334 if (this_parent != parent) {
1335 struct dentry *child = this_parent;
1336 this_parent = child->d_parent;
1337
1338 spin_unlock(&child->d_lock);
1339 spin_lock(&this_parent->d_lock);
1340
1341
1342 if (need_seqretry(&rename_lock, seq))
1343 goto rename_retry;
1344
1345 do {
1346 next = child->d_child.next;
1347 if (next == &this_parent->d_subdirs)
1348 goto ascend;
1349 child = list_entry(next, struct dentry, d_child);
1350 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1351 rcu_read_unlock();
1352 goto resume;
1353 }
1354 if (need_seqretry(&rename_lock, seq))
1355 goto rename_retry;
1356 rcu_read_unlock();
1357
1358out_unlock:
1359 spin_unlock(&this_parent->d_lock);
1360 done_seqretry(&rename_lock, seq);
1361 return;
1362
1363rename_retry:
1364 spin_unlock(&this_parent->d_lock);
1365 rcu_read_unlock();
1366 BUG_ON(seq & 1);
1367 if (!retry)
1368 return;
1369 seq = 1;
1370 goto again;
1371}
1372
1373struct check_mount {
1374 struct vfsmount *mnt;
1375 unsigned int mounted;
1376};
1377
1378static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1379{
1380 struct check_mount *info = data;
1381 struct path path = { .mnt = info->mnt, .dentry = dentry };
1382
1383 if (likely(!d_mountpoint(dentry)))
1384 return D_WALK_CONTINUE;
1385 if (__path_is_mountpoint(&path)) {
1386 info->mounted = 1;
1387 return D_WALK_QUIT;
1388 }
1389 return D_WALK_CONTINUE;
1390}
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400int path_has_submounts(const struct path *parent)
1401{
1402 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1403
1404 read_seqlock_excl(&mount_lock);
1405 d_walk(parent->dentry, &data, path_check_mount);
1406 read_sequnlock_excl(&mount_lock);
1407
1408 return data.mounted;
1409}
1410EXPORT_SYMBOL(path_has_submounts);
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420int d_set_mounted(struct dentry *dentry)
1421{
1422 struct dentry *p;
1423 int ret = -ENOENT;
1424 write_seqlock(&rename_lock);
1425 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1426
1427 spin_lock(&p->d_lock);
1428 if (unlikely(d_unhashed(p))) {
1429 spin_unlock(&p->d_lock);
1430 goto out;
1431 }
1432 spin_unlock(&p->d_lock);
1433 }
1434 spin_lock(&dentry->d_lock);
1435 if (!d_unlinked(dentry)) {
1436 ret = -EBUSY;
1437 if (!d_mountpoint(dentry)) {
1438 dentry->d_flags |= DCACHE_MOUNTED;
1439 ret = 0;
1440 }
1441 }
1442 spin_unlock(&dentry->d_lock);
1443out:
1444 write_sequnlock(&rename_lock);
1445 return ret;
1446}
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463struct select_data {
1464 struct dentry *start;
1465 union {
1466 long found;
1467 struct dentry *victim;
1468 };
1469 struct list_head dispose;
1470};
1471
1472static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1473{
1474 struct select_data *data = _data;
1475 enum d_walk_ret ret = D_WALK_CONTINUE;
1476
1477 if (data->start == dentry)
1478 goto out;
1479
1480 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1481 data->found++;
1482 } else {
1483 if (dentry->d_flags & DCACHE_LRU_LIST)
1484 d_lru_del(dentry);
1485 if (!dentry->d_lockref.count) {
1486 d_shrink_add(dentry, &data->dispose);
1487 data->found++;
1488 }
1489 }
1490
1491
1492
1493
1494
1495 if (!list_empty(&data->dispose))
1496 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1497out:
1498 return ret;
1499}
1500
1501static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
1502{
1503 struct select_data *data = _data;
1504 enum d_walk_ret ret = D_WALK_CONTINUE;
1505
1506 if (data->start == dentry)
1507 goto out;
1508
1509 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1510 if (!dentry->d_lockref.count) {
1511 rcu_read_lock();
1512 data->victim = dentry;
1513 return D_WALK_QUIT;
1514 }
1515 } else {
1516 if (dentry->d_flags & DCACHE_LRU_LIST)
1517 d_lru_del(dentry);
1518 if (!dentry->d_lockref.count)
1519 d_shrink_add(dentry, &data->dispose);
1520 }
1521
1522
1523
1524
1525
1526 if (!list_empty(&data->dispose))
1527 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1528out:
1529 return ret;
1530}
1531
1532
1533
1534
1535
1536
1537
1538void shrink_dcache_parent(struct dentry *parent)
1539{
1540 for (;;) {
1541 struct select_data data = {.start = parent};
1542
1543 INIT_LIST_HEAD(&data.dispose);
1544 d_walk(parent, &data, select_collect);
1545
1546 if (!list_empty(&data.dispose)) {
1547 shrink_dentry_list(&data.dispose);
1548 continue;
1549 }
1550
1551 cond_resched();
1552 if (!data.found)
1553 break;
1554 data.victim = NULL;
1555 d_walk(parent, &data, select_collect2);
1556 if (data.victim) {
1557 struct dentry *parent;
1558 spin_lock(&data.victim->d_lock);
1559 if (!shrink_lock_dentry(data.victim)) {
1560 spin_unlock(&data.victim->d_lock);
1561 rcu_read_unlock();
1562 } else {
1563 rcu_read_unlock();
1564 parent = data.victim->d_parent;
1565 if (parent != data.victim)
1566 __dput_to_list(parent, &data.dispose);
1567 __dentry_kill(data.victim);
1568 }
1569 }
1570 if (!list_empty(&data.dispose))
1571 shrink_dentry_list(&data.dispose);
1572 }
1573}
1574EXPORT_SYMBOL(shrink_dcache_parent);
1575
1576static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1577{
1578
1579 if (!list_empty(&dentry->d_subdirs))
1580 return D_WALK_CONTINUE;
1581
1582
1583 if (dentry == _data && dentry->d_lockref.count == 1)
1584 return D_WALK_CONTINUE;
1585
1586 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1587 " still in use (%d) [unmount of %s %s]\n",
1588 dentry,
1589 dentry->d_inode ?
1590 dentry->d_inode->i_ino : 0UL,
1591 dentry,
1592 dentry->d_lockref.count,
1593 dentry->d_sb->s_type->name,
1594 dentry->d_sb->s_id);
1595 WARN_ON(1);
1596 return D_WALK_CONTINUE;
1597}
1598
1599static void do_one_tree(struct dentry *dentry)
1600{
1601 shrink_dcache_parent(dentry);
1602 d_walk(dentry, dentry, umount_check);
1603 d_drop(dentry);
1604 dput(dentry);
1605}
1606
1607
1608
1609
1610void shrink_dcache_for_umount(struct super_block *sb)
1611{
1612 struct dentry *dentry;
1613
1614 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1615
1616 dentry = sb->s_root;
1617 sb->s_root = NULL;
1618 do_one_tree(dentry);
1619
1620 while (!hlist_bl_empty(&sb->s_roots)) {
1621 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1622 do_one_tree(dentry);
1623 }
1624}
1625
1626static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
1627{
1628 struct dentry **victim = _data;
1629 if (d_mountpoint(dentry)) {
1630 __dget_dlock(dentry);
1631 *victim = dentry;
1632 return D_WALK_QUIT;
1633 }
1634 return D_WALK_CONTINUE;
1635}
1636
1637
1638
1639
1640
1641void d_invalidate(struct dentry *dentry)
1642{
1643 bool had_submounts = false;
1644 spin_lock(&dentry->d_lock);
1645 if (d_unhashed(dentry)) {
1646 spin_unlock(&dentry->d_lock);
1647 return;
1648 }
1649 __d_drop(dentry);
1650 spin_unlock(&dentry->d_lock);
1651
1652
1653 if (!dentry->d_inode)
1654 return;
1655
1656 shrink_dcache_parent(dentry);
1657 for (;;) {
1658 struct dentry *victim = NULL;
1659 d_walk(dentry, &victim, find_submount);
1660 if (!victim) {
1661 if (had_submounts)
1662 shrink_dcache_parent(dentry);
1663 return;
1664 }
1665 had_submounts = true;
1666 detach_mounts(victim);
1667 dput(victim);
1668 }
1669}
1670EXPORT_SYMBOL(d_invalidate);
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1683{
1684 struct dentry *dentry;
1685 char *dname;
1686 int err;
1687
1688 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1689 if (!dentry)
1690 return NULL;
1691
1692
1693
1694
1695
1696
1697
1698 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1699 if (unlikely(!name)) {
1700 name = &slash_name;
1701 dname = dentry->d_iname;
1702 } else if (name->len > DNAME_INLINE_LEN-1) {
1703 size_t size = offsetof(struct external_name, name[1]);
1704 struct external_name *p = kmalloc(size + name->len,
1705 GFP_KERNEL_ACCOUNT |
1706 __GFP_RECLAIMABLE);
1707 if (!p) {
1708 kmem_cache_free(dentry_cache, dentry);
1709 return NULL;
1710 }
1711 atomic_set(&p->u.count, 1);
1712 dname = p->name;
1713 } else {
1714 dname = dentry->d_iname;
1715 }
1716
1717 dentry->d_name.len = name->len;
1718 dentry->d_name.hash = name->hash;
1719 memcpy(dname, name->name, name->len);
1720 dname[name->len] = 0;
1721
1722
1723 smp_store_release(&dentry->d_name.name, dname);
1724
1725 dentry->d_lockref.count = 1;
1726 dentry->d_flags = 0;
1727 spin_lock_init(&dentry->d_lock);
1728 seqcount_init(&dentry->d_seq);
1729 dentry->d_inode = NULL;
1730 dentry->d_parent = dentry;
1731 dentry->d_sb = sb;
1732 dentry->d_op = NULL;
1733 dentry->d_fsdata = NULL;
1734 INIT_HLIST_BL_NODE(&dentry->d_hash);
1735 INIT_LIST_HEAD(&dentry->d_lru);
1736 INIT_LIST_HEAD(&dentry->d_subdirs);
1737 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1738 INIT_LIST_HEAD(&dentry->d_child);
1739 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1740
1741 if (dentry->d_op && dentry->d_op->d_init) {
1742 err = dentry->d_op->d_init(dentry);
1743 if (err) {
1744 if (dname_external(dentry))
1745 kfree(external_name(dentry));
1746 kmem_cache_free(dentry_cache, dentry);
1747 return NULL;
1748 }
1749 }
1750
1751 this_cpu_inc(nr_dentry);
1752
1753 return dentry;
1754}
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1766{
1767 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1768 if (!dentry)
1769 return NULL;
1770 spin_lock(&parent->d_lock);
1771
1772
1773
1774
1775 __dget_dlock(parent);
1776 dentry->d_parent = parent;
1777 list_add(&dentry->d_child, &parent->d_subdirs);
1778 spin_unlock(&parent->d_lock);
1779
1780 return dentry;
1781}
1782EXPORT_SYMBOL(d_alloc);
1783
1784struct dentry *d_alloc_anon(struct super_block *sb)
1785{
1786 return __d_alloc(sb, NULL);
1787}
1788EXPORT_SYMBOL(d_alloc_anon);
1789
1790struct dentry *d_alloc_cursor(struct dentry * parent)
1791{
1792 struct dentry *dentry = d_alloc_anon(parent->d_sb);
1793 if (dentry) {
1794 dentry->d_flags |= DCACHE_DENTRY_CURSOR;
1795 dentry->d_parent = dget(parent);
1796 }
1797 return dentry;
1798}
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1816{
1817 struct dentry *dentry = __d_alloc(sb, name);
1818 if (likely(dentry))
1819 dentry->d_flags |= DCACHE_NORCU;
1820 return dentry;
1821}
1822
1823struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1824{
1825 struct qstr q;
1826
1827 q.name = name;
1828 q.hash_len = hashlen_string(parent, name);
1829 return d_alloc(parent, &q);
1830}
1831EXPORT_SYMBOL(d_alloc_name);
1832
1833void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1834{
1835 WARN_ON_ONCE(dentry->d_op);
1836 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1837 DCACHE_OP_COMPARE |
1838 DCACHE_OP_REVALIDATE |
1839 DCACHE_OP_WEAK_REVALIDATE |
1840 DCACHE_OP_DELETE |
1841 DCACHE_OP_REAL));
1842 dentry->d_op = op;
1843 if (!op)
1844 return;
1845 if (op->d_hash)
1846 dentry->d_flags |= DCACHE_OP_HASH;
1847 if (op->d_compare)
1848 dentry->d_flags |= DCACHE_OP_COMPARE;
1849 if (op->d_revalidate)
1850 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1851 if (op->d_weak_revalidate)
1852 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1853 if (op->d_delete)
1854 dentry->d_flags |= DCACHE_OP_DELETE;
1855 if (op->d_prune)
1856 dentry->d_flags |= DCACHE_OP_PRUNE;
1857 if (op->d_real)
1858 dentry->d_flags |= DCACHE_OP_REAL;
1859
1860}
1861EXPORT_SYMBOL(d_set_d_op);
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871void d_set_fallthru(struct dentry *dentry)
1872{
1873 spin_lock(&dentry->d_lock);
1874 dentry->d_flags |= DCACHE_FALLTHRU;
1875 spin_unlock(&dentry->d_lock);
1876}
1877EXPORT_SYMBOL(d_set_fallthru);
1878
1879static unsigned d_flags_for_inode(struct inode *inode)
1880{
1881 unsigned add_flags = DCACHE_REGULAR_TYPE;
1882
1883 if (!inode)
1884 return DCACHE_MISS_TYPE;
1885
1886 if (S_ISDIR(inode->i_mode)) {
1887 add_flags = DCACHE_DIRECTORY_TYPE;
1888 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1889 if (unlikely(!inode->i_op->lookup))
1890 add_flags = DCACHE_AUTODIR_TYPE;
1891 else
1892 inode->i_opflags |= IOP_LOOKUP;
1893 }
1894 goto type_determined;
1895 }
1896
1897 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1898 if (unlikely(inode->i_op->get_link)) {
1899 add_flags = DCACHE_SYMLINK_TYPE;
1900 goto type_determined;
1901 }
1902 inode->i_opflags |= IOP_NOFOLLOW;
1903 }
1904
1905 if (unlikely(!S_ISREG(inode->i_mode)))
1906 add_flags = DCACHE_SPECIAL_TYPE;
1907
1908type_determined:
1909 if (unlikely(IS_AUTOMOUNT(inode)))
1910 add_flags |= DCACHE_NEED_AUTOMOUNT;
1911 return add_flags;
1912}
1913
1914static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1915{
1916 unsigned add_flags = d_flags_for_inode(inode);
1917 WARN_ON(d_in_lookup(dentry));
1918
1919 spin_lock(&dentry->d_lock);
1920
1921
1922
1923 if (dentry->d_flags & DCACHE_LRU_LIST)
1924 this_cpu_dec(nr_dentry_negative);
1925 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1926 raw_write_seqcount_begin(&dentry->d_seq);
1927 __d_set_inode_and_type(dentry, inode, add_flags);
1928 raw_write_seqcount_end(&dentry->d_seq);
1929 fsnotify_update_flags(dentry);
1930 spin_unlock(&dentry->d_lock);
1931}
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948void d_instantiate(struct dentry *entry, struct inode * inode)
1949{
1950 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1951 if (inode) {
1952 security_d_instantiate(entry, inode);
1953 spin_lock(&inode->i_lock);
1954 __d_instantiate(entry, inode);
1955 spin_unlock(&inode->i_lock);
1956 }
1957}
1958EXPORT_SYMBOL(d_instantiate);
1959
1960
1961
1962
1963
1964
1965
1966void d_instantiate_new(struct dentry *entry, struct inode *inode)
1967{
1968 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1969 BUG_ON(!inode);
1970 lockdep_annotate_inode_mutex_key(inode);
1971 security_d_instantiate(entry, inode);
1972 spin_lock(&inode->i_lock);
1973 __d_instantiate(entry, inode);
1974 WARN_ON(!(inode->i_state & I_NEW));
1975 inode->i_state &= ~I_NEW & ~I_CREATING;
1976 smp_mb();
1977 wake_up_bit(&inode->i_state, __I_NEW);
1978 spin_unlock(&inode->i_lock);
1979}
1980EXPORT_SYMBOL(d_instantiate_new);
1981
1982struct dentry *d_make_root(struct inode *root_inode)
1983{
1984 struct dentry *res = NULL;
1985
1986 if (root_inode) {
1987 res = d_alloc_anon(root_inode->i_sb);
1988 if (res)
1989 d_instantiate(res, root_inode);
1990 else
1991 iput(root_inode);
1992 }
1993 return res;
1994}
1995EXPORT_SYMBOL(d_make_root);
1996
1997static struct dentry *__d_instantiate_anon(struct dentry *dentry,
1998 struct inode *inode,
1999 bool disconnected)
2000{
2001 struct dentry *res;
2002 unsigned add_flags;
2003
2004 security_d_instantiate(dentry, inode);
2005 spin_lock(&inode->i_lock);
2006 res = __d_find_any_alias(inode);
2007 if (res) {
2008 spin_unlock(&inode->i_lock);
2009 dput(dentry);
2010 goto out_iput;
2011 }
2012
2013
2014 add_flags = d_flags_for_inode(inode);
2015
2016 if (disconnected)
2017 add_flags |= DCACHE_DISCONNECTED;
2018
2019 spin_lock(&dentry->d_lock);
2020 __d_set_inode_and_type(dentry, inode, add_flags);
2021 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2022 if (!disconnected) {
2023 hlist_bl_lock(&dentry->d_sb->s_roots);
2024 hlist_bl_add_head(&dentry->d_hash, &dentry->d_sb->s_roots);
2025 hlist_bl_unlock(&dentry->d_sb->s_roots);
2026 }
2027 spin_unlock(&dentry->d_lock);
2028 spin_unlock(&inode->i_lock);
2029
2030 return dentry;
2031
2032 out_iput:
2033 iput(inode);
2034 return res;
2035}
2036
2037struct dentry *d_instantiate_anon(struct dentry *dentry, struct inode *inode)
2038{
2039 return __d_instantiate_anon(dentry, inode, true);
2040}
2041EXPORT_SYMBOL(d_instantiate_anon);
2042
2043static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
2044{
2045 struct dentry *tmp;
2046 struct dentry *res;
2047
2048 if (!inode)
2049 return ERR_PTR(-ESTALE);
2050 if (IS_ERR(inode))
2051 return ERR_CAST(inode);
2052
2053 res = d_find_any_alias(inode);
2054 if (res)
2055 goto out_iput;
2056
2057 tmp = d_alloc_anon(inode->i_sb);
2058 if (!tmp) {
2059 res = ERR_PTR(-ENOMEM);
2060 goto out_iput;
2061 }
2062
2063 return __d_instantiate_anon(tmp, inode, disconnected);
2064
2065out_iput:
2066 iput(inode);
2067 return res;
2068}
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088struct dentry *d_obtain_alias(struct inode *inode)
2089{
2090 return __d_obtain_alias(inode, true);
2091}
2092EXPORT_SYMBOL(d_obtain_alias);
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109struct dentry *d_obtain_root(struct inode *inode)
2110{
2111 return __d_obtain_alias(inode, false);
2112}
2113EXPORT_SYMBOL(d_obtain_root);
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2132 struct qstr *name)
2133{
2134 struct dentry *found, *res;
2135
2136
2137
2138
2139
2140 found = d_hash_and_lookup(dentry->d_parent, name);
2141 if (found) {
2142 iput(inode);
2143 return found;
2144 }
2145 if (d_in_lookup(dentry)) {
2146 found = d_alloc_parallel(dentry->d_parent, name,
2147 dentry->d_wait);
2148 if (IS_ERR(found) || !d_in_lookup(found)) {
2149 iput(inode);
2150 return found;
2151 }
2152 } else {
2153 found = d_alloc(dentry->d_parent, name);
2154 if (!found) {
2155 iput(inode);
2156 return ERR_PTR(-ENOMEM);
2157 }
2158 }
2159 res = d_splice_alias(inode, found);
2160 if (res) {
2161 dput(found);
2162 return res;
2163 }
2164 return found;
2165}
2166EXPORT_SYMBOL(d_add_ci);
2167
2168
2169static inline bool d_same_name(const struct dentry *dentry,
2170 const struct dentry *parent,
2171 const struct qstr *name)
2172{
2173 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2174 if (dentry->d_name.len != name->len)
2175 return false;
2176 return dentry_cmp(dentry, name->name, name->len) == 0;
2177 }
2178 return parent->d_op->d_compare(dentry,
2179 dentry->d_name.len, dentry->d_name.name,
2180 name) == 0;
2181}
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212struct dentry *__d_lookup_rcu(const struct dentry *parent,
2213 const struct qstr *name,
2214 unsigned *seqp)
2215{
2216 u64 hashlen = name->hash_len;
2217 const unsigned char *str = name->name;
2218 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2219 struct hlist_bl_node *node;
2220 struct dentry *dentry;
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2243 unsigned seq;
2244
2245seqretry:
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263 seq = raw_seqcount_begin(&dentry->d_seq);
2264 if (dentry->d_parent != parent)
2265 continue;
2266 if (d_unhashed(dentry))
2267 continue;
2268
2269 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2270 int tlen;
2271 const char *tname;
2272 if (dentry->d_name.hash != hashlen_hash(hashlen))
2273 continue;
2274 tlen = dentry->d_name.len;
2275 tname = dentry->d_name.name;
2276
2277 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2278 cpu_relax();
2279 goto seqretry;
2280 }
2281 if (parent->d_op->d_compare(dentry,
2282 tlen, tname, name) != 0)
2283 continue;
2284 } else {
2285 if (dentry->d_name.hash_len != hashlen)
2286 continue;
2287 if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2288 continue;
2289 }
2290 *seqp = seq;
2291 return dentry;
2292 }
2293 return NULL;
2294}
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2308{
2309 struct dentry *dentry;
2310 unsigned seq;
2311
2312 do {
2313 seq = read_seqbegin(&rename_lock);
2314 dentry = __d_lookup(parent, name);
2315 if (dentry)
2316 break;
2317 } while (read_seqretry(&rename_lock, seq));
2318 return dentry;
2319}
2320EXPORT_SYMBOL(d_lookup);
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2338{
2339 unsigned int hash = name->hash;
2340 struct hlist_bl_head *b = d_hash(hash);
2341 struct hlist_bl_node *node;
2342 struct dentry *found = NULL;
2343 struct dentry *dentry;
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365 rcu_read_lock();
2366
2367 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2368
2369 if (dentry->d_name.hash != hash)
2370 continue;
2371
2372 spin_lock(&dentry->d_lock);
2373 if (dentry->d_parent != parent)
2374 goto next;
2375 if (d_unhashed(dentry))
2376 goto next;
2377
2378 if (!d_same_name(dentry, parent, name))
2379 goto next;
2380
2381 dentry->d_lockref.count++;
2382 found = dentry;
2383 spin_unlock(&dentry->d_lock);
2384 break;
2385next:
2386 spin_unlock(&dentry->d_lock);
2387 }
2388 rcu_read_unlock();
2389
2390 return found;
2391}
2392
2393
2394
2395
2396
2397
2398
2399
2400struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2401{
2402
2403
2404
2405
2406
2407 name->hash = full_name_hash(dir, name->name, name->len);
2408 if (dir->d_flags & DCACHE_OP_HASH) {
2409 int err = dir->d_op->d_hash(dir, name);
2410 if (unlikely(err < 0))
2411 return ERR_PTR(err);
2412 }
2413 return d_lookup(dir, name);
2414}
2415EXPORT_SYMBOL(d_hash_and_lookup);
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438void d_delete(struct dentry * dentry)
2439{
2440 struct inode *inode = dentry->d_inode;
2441
2442 spin_lock(&inode->i_lock);
2443 spin_lock(&dentry->d_lock);
2444
2445
2446
2447 if (dentry->d_lockref.count == 1) {
2448 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2449 dentry_unlink_inode(dentry);
2450 } else {
2451 __d_drop(dentry);
2452 spin_unlock(&dentry->d_lock);
2453 spin_unlock(&inode->i_lock);
2454 }
2455}
2456EXPORT_SYMBOL(d_delete);
2457
2458static void __d_rehash(struct dentry *entry)
2459{
2460 struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2461
2462 hlist_bl_lock(b);
2463 hlist_bl_add_head_rcu(&entry->d_hash, b);
2464 hlist_bl_unlock(b);
2465}
2466
2467
2468
2469
2470
2471
2472
2473
2474void d_rehash(struct dentry * entry)
2475{
2476 spin_lock(&entry->d_lock);
2477 __d_rehash(entry);
2478 spin_unlock(&entry->d_lock);
2479}
2480EXPORT_SYMBOL(d_rehash);
2481
2482static inline unsigned start_dir_add(struct inode *dir)
2483{
2484
2485 for (;;) {
2486 unsigned n = dir->i_dir_seq;
2487 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2488 return n;
2489 cpu_relax();
2490 }
2491}
2492
2493static inline void end_dir_add(struct inode *dir, unsigned n)
2494{
2495 smp_store_release(&dir->i_dir_seq, n + 2);
2496}
2497
2498static void d_wait_lookup(struct dentry *dentry)
2499{
2500 if (d_in_lookup(dentry)) {
2501 DECLARE_WAITQUEUE(wait, current);
2502 add_wait_queue(dentry->d_wait, &wait);
2503 do {
2504 set_current_state(TASK_UNINTERRUPTIBLE);
2505 spin_unlock(&dentry->d_lock);
2506 schedule();
2507 spin_lock(&dentry->d_lock);
2508 } while (d_in_lookup(dentry));
2509 }
2510}
2511
2512struct dentry *d_alloc_parallel(struct dentry *parent,
2513 const struct qstr *name,
2514 wait_queue_head_t *wq)
2515{
2516 unsigned int hash = name->hash;
2517 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2518 struct hlist_bl_node *node;
2519 struct dentry *new = d_alloc(parent, name);
2520 struct dentry *dentry;
2521 unsigned seq, r_seq, d_seq;
2522
2523 if (unlikely(!new))
2524 return ERR_PTR(-ENOMEM);
2525
2526retry:
2527 rcu_read_lock();
2528 seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2529 r_seq = read_seqbegin(&rename_lock);
2530 dentry = __d_lookup_rcu(parent, name, &d_seq);
2531 if (unlikely(dentry)) {
2532 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2533 rcu_read_unlock();
2534 goto retry;
2535 }
2536 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2537 rcu_read_unlock();
2538 dput(dentry);
2539 goto retry;
2540 }
2541 rcu_read_unlock();
2542 dput(new);
2543 return dentry;
2544 }
2545 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2546 rcu_read_unlock();
2547 goto retry;
2548 }
2549
2550 if (unlikely(seq & 1)) {
2551 rcu_read_unlock();
2552 goto retry;
2553 }
2554
2555 hlist_bl_lock(b);
2556 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2557 hlist_bl_unlock(b);
2558 rcu_read_unlock();
2559 goto retry;
2560 }
2561
2562
2563
2564
2565
2566
2567
2568 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2569 if (dentry->d_name.hash != hash)
2570 continue;
2571 if (dentry->d_parent != parent)
2572 continue;
2573 if (!d_same_name(dentry, parent, name))
2574 continue;
2575 hlist_bl_unlock(b);
2576
2577 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2578 rcu_read_unlock();
2579 goto retry;
2580 }
2581
2582 rcu_read_unlock();
2583
2584
2585
2586
2587 spin_lock(&dentry->d_lock);
2588 d_wait_lookup(dentry);
2589
2590
2591
2592
2593
2594
2595 if (unlikely(dentry->d_name.hash != hash))
2596 goto mismatch;
2597 if (unlikely(dentry->d_parent != parent))
2598 goto mismatch;
2599 if (unlikely(d_unhashed(dentry)))
2600 goto mismatch;
2601 if (unlikely(!d_same_name(dentry, parent, name)))
2602 goto mismatch;
2603
2604 spin_unlock(&dentry->d_lock);
2605 dput(new);
2606 return dentry;
2607 }
2608 rcu_read_unlock();
2609
2610 new->d_flags |= DCACHE_PAR_LOOKUP;
2611 new->d_wait = wq;
2612 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2613 hlist_bl_unlock(b);
2614 return new;
2615mismatch:
2616 spin_unlock(&dentry->d_lock);
2617 dput(dentry);
2618 goto retry;
2619}
2620EXPORT_SYMBOL(d_alloc_parallel);
2621
2622void __d_lookup_done(struct dentry *dentry)
2623{
2624 struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2625 dentry->d_name.hash);
2626 hlist_bl_lock(b);
2627 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2628 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2629 wake_up_all(dentry->d_wait);
2630 dentry->d_wait = NULL;
2631 hlist_bl_unlock(b);
2632 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2633 INIT_LIST_HEAD(&dentry->d_lru);
2634}
2635EXPORT_SYMBOL(__d_lookup_done);
2636
2637
2638
2639static inline void __d_add(struct dentry *dentry, struct inode *inode)
2640{
2641 struct inode *dir = NULL;
2642 unsigned n;
2643 spin_lock(&dentry->d_lock);
2644 if (unlikely(d_in_lookup(dentry))) {
2645 dir = dentry->d_parent->d_inode;
2646 n = start_dir_add(dir);
2647 __d_lookup_done(dentry);
2648 }
2649 if (inode) {
2650 unsigned add_flags = d_flags_for_inode(inode);
2651 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2652 raw_write_seqcount_begin(&dentry->d_seq);
2653 __d_set_inode_and_type(dentry, inode, add_flags);
2654 raw_write_seqcount_end(&dentry->d_seq);
2655 fsnotify_update_flags(dentry);
2656 }
2657 __d_rehash(dentry);
2658 if (dir)
2659 end_dir_add(dir, n);
2660 spin_unlock(&dentry->d_lock);
2661 if (inode)
2662 spin_unlock(&inode->i_lock);
2663}
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674void d_add(struct dentry *entry, struct inode *inode)
2675{
2676 if (inode) {
2677 security_d_instantiate(entry, inode);
2678 spin_lock(&inode->i_lock);
2679 }
2680 __d_add(entry, inode);
2681}
2682EXPORT_SYMBOL(d_add);
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2696{
2697 struct dentry *alias;
2698 unsigned int hash = entry->d_name.hash;
2699
2700 spin_lock(&inode->i_lock);
2701 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2702
2703
2704
2705
2706
2707 if (alias->d_name.hash != hash)
2708 continue;
2709 if (alias->d_parent != entry->d_parent)
2710 continue;
2711 if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2712 continue;
2713 spin_lock(&alias->d_lock);
2714 if (!d_unhashed(alias)) {
2715 spin_unlock(&alias->d_lock);
2716 alias = NULL;
2717 } else {
2718 __dget_dlock(alias);
2719 __d_rehash(alias);
2720 spin_unlock(&alias->d_lock);
2721 }
2722 spin_unlock(&inode->i_lock);
2723 return alias;
2724 }
2725 spin_unlock(&inode->i_lock);
2726 return NULL;
2727}
2728EXPORT_SYMBOL(d_exact_alias);
2729
2730static void swap_names(struct dentry *dentry, struct dentry *target)
2731{
2732 if (unlikely(dname_external(target))) {
2733 if (unlikely(dname_external(dentry))) {
2734
2735
2736
2737 swap(target->d_name.name, dentry->d_name.name);
2738 } else {
2739
2740
2741
2742
2743 memcpy(target->d_iname, dentry->d_name.name,
2744 dentry->d_name.len + 1);
2745 dentry->d_name.name = target->d_name.name;
2746 target->d_name.name = target->d_iname;
2747 }
2748 } else {
2749 if (unlikely(dname_external(dentry))) {
2750
2751
2752
2753
2754 memcpy(dentry->d_iname, target->d_name.name,
2755 target->d_name.len + 1);
2756 target->d_name.name = dentry->d_name.name;
2757 dentry->d_name.name = dentry->d_iname;
2758 } else {
2759
2760
2761
2762 unsigned int i;
2763 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2764 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2765 swap(((long *) &dentry->d_iname)[i],
2766 ((long *) &target->d_iname)[i]);
2767 }
2768 }
2769 }
2770 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2771}
2772
2773static void copy_name(struct dentry *dentry, struct dentry *target)
2774{
2775 struct external_name *old_name = NULL;
2776 if (unlikely(dname_external(dentry)))
2777 old_name = external_name(dentry);
2778 if (unlikely(dname_external(target))) {
2779 atomic_inc(&external_name(target)->u.count);
2780 dentry->d_name = target->d_name;
2781 } else {
2782 memcpy(dentry->d_iname, target->d_name.name,
2783 target->d_name.len + 1);
2784 dentry->d_name.name = dentry->d_iname;
2785 dentry->d_name.hash_len = target->d_name.hash_len;
2786 }
2787 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2788 kfree_rcu(old_name, u.head);
2789}
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802static void __d_move(struct dentry *dentry, struct dentry *target,
2803 bool exchange)
2804{
2805 struct dentry *old_parent, *p;
2806 struct inode *dir = NULL;
2807 unsigned n;
2808
2809 WARN_ON(!dentry->d_inode);
2810 if (WARN_ON(dentry == target))
2811 return;
2812
2813 BUG_ON(d_ancestor(target, dentry));
2814 old_parent = dentry->d_parent;
2815 p = d_ancestor(old_parent, target);
2816 if (IS_ROOT(dentry)) {
2817 BUG_ON(p);
2818 spin_lock(&target->d_parent->d_lock);
2819 } else if (!p) {
2820
2821 spin_lock(&target->d_parent->d_lock);
2822 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2823 } else {
2824 BUG_ON(p == dentry);
2825 spin_lock(&old_parent->d_lock);
2826 if (p != target)
2827 spin_lock_nested(&target->d_parent->d_lock,
2828 DENTRY_D_LOCK_NESTED);
2829 }
2830 spin_lock_nested(&dentry->d_lock, 2);
2831 spin_lock_nested(&target->d_lock, 3);
2832
2833 if (unlikely(d_in_lookup(target))) {
2834 dir = target->d_parent->d_inode;
2835 n = start_dir_add(dir);
2836 __d_lookup_done(target);
2837 }
2838
2839 write_seqcount_begin(&dentry->d_seq);
2840 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2841
2842
2843 if (!d_unhashed(dentry))
2844 ___d_drop(dentry);
2845 if (!d_unhashed(target))
2846 ___d_drop(target);
2847
2848
2849 dentry->d_parent = target->d_parent;
2850 if (!exchange) {
2851 copy_name(dentry, target);
2852 target->d_hash.pprev = NULL;
2853 dentry->d_parent->d_lockref.count++;
2854 if (dentry != old_parent)
2855 WARN_ON(!--old_parent->d_lockref.count);
2856 } else {
2857 target->d_parent = old_parent;
2858 swap_names(dentry, target);
2859 list_move(&target->d_child, &target->d_parent->d_subdirs);
2860 __d_rehash(target);
2861 fsnotify_update_flags(target);
2862 }
2863 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2864 __d_rehash(dentry);
2865 fsnotify_update_flags(dentry);
2866 fscrypt_handle_d_move(dentry);
2867
2868 write_seqcount_end(&target->d_seq);
2869 write_seqcount_end(&dentry->d_seq);
2870
2871 if (dir)
2872 end_dir_add(dir, n);
2873
2874 if (dentry->d_parent != old_parent)
2875 spin_unlock(&dentry->d_parent->d_lock);
2876 if (dentry != old_parent)
2877 spin_unlock(&old_parent->d_lock);
2878 spin_unlock(&target->d_lock);
2879 spin_unlock(&dentry->d_lock);
2880}
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891void d_move(struct dentry *dentry, struct dentry *target)
2892{
2893 write_seqlock(&rename_lock);
2894 __d_move(dentry, target, false);
2895 write_sequnlock(&rename_lock);
2896}
2897EXPORT_SYMBOL(d_move);
2898
2899
2900
2901
2902
2903
2904void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2905{
2906 write_seqlock(&rename_lock);
2907
2908 WARN_ON(!dentry1->d_inode);
2909 WARN_ON(!dentry2->d_inode);
2910 WARN_ON(IS_ROOT(dentry1));
2911 WARN_ON(IS_ROOT(dentry2));
2912
2913 __d_move(dentry1, dentry2, true);
2914
2915 write_sequnlock(&rename_lock);
2916}
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2927{
2928 struct dentry *p;
2929
2930 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2931 if (p->d_parent == p1)
2932 return p;
2933 }
2934 return NULL;
2935}
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946static int __d_unalias(struct inode *inode,
2947 struct dentry *dentry, struct dentry *alias)
2948{
2949 struct mutex *m1 = NULL;
2950 struct rw_semaphore *m2 = NULL;
2951 int ret = -ESTALE;
2952
2953
2954 if (alias->d_parent == dentry->d_parent)
2955 goto out_unalias;
2956
2957
2958 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2959 goto out_err;
2960 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2961 if (!inode_trylock_shared(alias->d_parent->d_inode))
2962 goto out_err;
2963 m2 = &alias->d_parent->d_inode->i_rwsem;
2964out_unalias:
2965 __d_move(alias, dentry, false);
2966 ret = 0;
2967out_err:
2968 if (m2)
2969 up_read(m2);
2970 if (m1)
2971 mutex_unlock(m1);
2972 return ret;
2973}
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2999{
3000 if (IS_ERR(inode))
3001 return ERR_CAST(inode);
3002
3003 BUG_ON(!d_unhashed(dentry));
3004
3005 if (!inode)
3006 goto out;
3007
3008 security_d_instantiate(dentry, inode);
3009 spin_lock(&inode->i_lock);
3010 if (S_ISDIR(inode->i_mode)) {
3011 struct dentry *new = __d_find_any_alias(inode);
3012 if (unlikely(new)) {
3013
3014 spin_unlock(&inode->i_lock);
3015 write_seqlock(&rename_lock);
3016 if (unlikely(d_ancestor(new, dentry))) {
3017 write_sequnlock(&rename_lock);
3018 dput(new);
3019 new = ERR_PTR(-ELOOP);
3020 pr_warn_ratelimited(
3021 "VFS: Lookup of '%s' in %s %s"
3022 " would have caused loop\n",
3023 dentry->d_name.name,
3024 inode->i_sb->s_type->name,
3025 inode->i_sb->s_id);
3026 } else if (!IS_ROOT(new)) {
3027 struct dentry *old_parent = dget(new->d_parent);
3028 int err = __d_unalias(inode, dentry, new);
3029 write_sequnlock(&rename_lock);
3030 if (err) {
3031 dput(new);
3032 new = ERR_PTR(err);
3033 }
3034 dput(old_parent);
3035 } else {
3036 __d_move(new, dentry, false);
3037 write_sequnlock(&rename_lock);
3038 }
3039 iput(inode);
3040 return new;
3041 }
3042 }
3043out:
3044 __d_add(dentry, inode);
3045 return NULL;
3046}
3047EXPORT_SYMBOL(d_splice_alias);
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3066{
3067 bool result;
3068 unsigned seq;
3069
3070 if (new_dentry == old_dentry)
3071 return true;
3072
3073 do {
3074
3075 seq = read_seqbegin(&rename_lock);
3076
3077
3078
3079
3080 rcu_read_lock();
3081 if (d_ancestor(old_dentry, new_dentry))
3082 result = true;
3083 else
3084 result = false;
3085 rcu_read_unlock();
3086 } while (read_seqretry(&rename_lock, seq));
3087
3088 return result;
3089}
3090EXPORT_SYMBOL(is_subdir);
3091
3092static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3093{
3094 struct dentry *root = data;
3095 if (dentry != root) {
3096 if (d_unhashed(dentry) || !dentry->d_inode)
3097 return D_WALK_SKIP;
3098
3099 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3100 dentry->d_flags |= DCACHE_GENOCIDE;
3101 dentry->d_lockref.count--;
3102 }
3103 }
3104 return D_WALK_CONTINUE;
3105}
3106
3107void d_genocide(struct dentry *parent)
3108{
3109 d_walk(parent, parent, d_genocide_kill);
3110}
3111
3112EXPORT_SYMBOL(d_genocide);
3113
3114void d_tmpfile(struct dentry *dentry, struct inode *inode)
3115{
3116 inode_dec_link_count(inode);
3117 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3118 !hlist_unhashed(&dentry->d_u.d_alias) ||
3119 !d_unlinked(dentry));
3120 spin_lock(&dentry->d_parent->d_lock);
3121 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3122 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3123 (unsigned long long)inode->i_ino);
3124 spin_unlock(&dentry->d_lock);
3125 spin_unlock(&dentry->d_parent->d_lock);
3126 d_instantiate(dentry, inode);
3127}
3128EXPORT_SYMBOL(d_tmpfile);
3129
3130static __initdata unsigned long dhash_entries;
3131static int __init set_dhash_entries(char *str)
3132{
3133 if (!str)
3134 return 0;
3135 dhash_entries = simple_strtoul(str, &str, 0);
3136 return 1;
3137}
3138__setup("dhash_entries=", set_dhash_entries);
3139
3140static void __init dcache_init_early(void)
3141{
3142
3143
3144
3145 if (hashdist)
3146 return;
3147
3148 dentry_hashtable =
3149 alloc_large_system_hash("Dentry cache",
3150 sizeof(struct hlist_bl_head),
3151 dhash_entries,
3152 13,
3153 HASH_EARLY | HASH_ZERO,
3154 &d_hash_shift,
3155 NULL,
3156 0,
3157 0);
3158 d_hash_shift = 32 - d_hash_shift;
3159}
3160
3161static void __init dcache_init(void)
3162{
3163
3164
3165
3166
3167
3168 dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3169 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
3170 d_iname);
3171
3172
3173 if (!hashdist)
3174 return;
3175
3176 dentry_hashtable =
3177 alloc_large_system_hash("Dentry cache",
3178 sizeof(struct hlist_bl_head),
3179 dhash_entries,
3180 13,
3181 HASH_ZERO,
3182 &d_hash_shift,
3183 NULL,
3184 0,
3185 0);
3186 d_hash_shift = 32 - d_hash_shift;
3187}
3188
3189
3190struct kmem_cache *names_cachep __read_mostly;
3191EXPORT_SYMBOL(names_cachep);
3192
3193void __init vfs_caches_init_early(void)
3194{
3195 int i;
3196
3197 for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3198 INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3199
3200 dcache_init_early();
3201 inode_init_early();
3202}
3203
3204void __init vfs_caches_init(void)
3205{
3206 names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3207 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3208
3209 dcache_init();
3210 inode_init();
3211 files_init();
3212 files_maxfiles_init();
3213 mnt_init();
3214 bdev_cache_init();
3215 chrdev_init();
3216}
3217