1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/ratelimit.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/fsnotify.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/hash.h>
25#include <linux/cache.h>
26#include <linux/export.h>
27#include <linux/security.h>
28#include <linux/seqlock.h>
29#include <linux/bootmem.h>
30#include <linux/bit_spinlock.h>
31#include <linux/rculist_bl.h>
32#include <linux/list_lru.h>
33#include "internal.h"
34#include "mount.h"
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72int sysctl_vfs_cache_pressure __read_mostly = 100;
73EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
74
75__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
76
77EXPORT_SYMBOL(rename_lock);
78
79static struct kmem_cache *dentry_cache __read_mostly;
80
81const struct qstr empty_name = QSTR_INIT("", 0);
82EXPORT_SYMBOL(empty_name);
83const struct qstr slash_name = QSTR_INIT("/", 1);
84EXPORT_SYMBOL(slash_name);
85
86
87
88
89
90
91
92
93
94
95static unsigned int d_hash_shift __read_mostly;
96
97static struct hlist_bl_head *dentry_hashtable __read_mostly;
98
99static inline struct hlist_bl_head *d_hash(unsigned int hash)
100{
101 return dentry_hashtable + (hash >> d_hash_shift);
102}
103
104#define IN_LOOKUP_SHIFT 10
105static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
106
107static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
108 unsigned int hash)
109{
110 hash += (unsigned long) parent / L1_CACHE_BYTES;
111 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
112}
113
114
115
116struct dentry_stat_t dentry_stat = {
117 .age_limit = 45,
118};
119
120static DEFINE_PER_CPU(long, nr_dentry);
121static DEFINE_PER_CPU(long, nr_dentry_unused);
122
123#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
124
125
126
127
128
129
130
131
132
133
134
135
136
137static long get_nr_dentry(void)
138{
139 int i;
140 long sum = 0;
141 for_each_possible_cpu(i)
142 sum += per_cpu(nr_dentry, i);
143 return sum < 0 ? 0 : sum;
144}
145
146static long get_nr_dentry_unused(void)
147{
148 int i;
149 long sum = 0;
150 for_each_possible_cpu(i)
151 sum += per_cpu(nr_dentry_unused, i);
152 return sum < 0 ? 0 : sum;
153}
154
155int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
156 size_t *lenp, loff_t *ppos)
157{
158 dentry_stat.nr_dentry = get_nr_dentry();
159 dentry_stat.nr_unused = get_nr_dentry_unused();
160 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
161}
162#endif
163
164
165
166
167
168#ifdef CONFIG_DCACHE_WORD_ACCESS
169
170#include <asm/word-at-a-time.h>
171
172
173
174
175
176
177
178
179
180static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
181{
182 unsigned long a,b,mask;
183
184 for (;;) {
185 a = read_word_at_a_time(cs);
186 b = load_unaligned_zeropad(ct);
187 if (tcount < sizeof(unsigned long))
188 break;
189 if (unlikely(a != b))
190 return 1;
191 cs += sizeof(unsigned long);
192 ct += sizeof(unsigned long);
193 tcount -= sizeof(unsigned long);
194 if (!tcount)
195 return 0;
196 }
197 mask = bytemask_from_count(tcount);
198 return unlikely(!!((a ^ b) & mask));
199}
200
201#else
202
203static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
204{
205 do {
206 if (*cs != *ct)
207 return 1;
208 cs++;
209 ct++;
210 tcount--;
211 } while (tcount);
212 return 0;
213}
214
215#endif
216
217static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
218{
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235 const unsigned char *cs = READ_ONCE(dentry->d_name.name);
236
237 return dentry_string_cmp(cs, ct, tcount);
238}
239
240struct external_name {
241 union {
242 atomic_t count;
243 struct rcu_head head;
244 } u;
245 unsigned char name[];
246};
247
248static inline struct external_name *external_name(struct dentry *dentry)
249{
250 return container_of(dentry->d_name.name, struct external_name, name[0]);
251}
252
253static void __d_free(struct rcu_head *head)
254{
255 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
256
257 kmem_cache_free(dentry_cache, dentry);
258}
259
260static void __d_free_external_name(struct rcu_head *head)
261{
262 struct external_name *name = container_of(head, struct external_name,
263 u.head);
264
265 mod_node_page_state(page_pgdat(virt_to_page(name)),
266 NR_INDIRECTLY_RECLAIMABLE_BYTES,
267 -ksize(name));
268
269 kfree(name);
270}
271
272static void __d_free_external(struct rcu_head *head)
273{
274 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
275
276 __d_free_external_name(&external_name(dentry)->u.head);
277
278 kmem_cache_free(dentry_cache, dentry);
279}
280
281static inline int dname_external(const struct dentry *dentry)
282{
283 return dentry->d_name.name != dentry->d_iname;
284}
285
286void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
287{
288 spin_lock(&dentry->d_lock);
289 if (unlikely(dname_external(dentry))) {
290 struct external_name *p = external_name(dentry);
291 atomic_inc(&p->u.count);
292 spin_unlock(&dentry->d_lock);
293 name->name = p->name;
294 } else {
295 memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
296 spin_unlock(&dentry->d_lock);
297 name->name = name->inline_name;
298 }
299}
300EXPORT_SYMBOL(take_dentry_name_snapshot);
301
302void release_dentry_name_snapshot(struct name_snapshot *name)
303{
304 if (unlikely(name->name != name->inline_name)) {
305 struct external_name *p;
306 p = container_of(name->name, struct external_name, name[0]);
307 if (unlikely(atomic_dec_and_test(&p->u.count)))
308 call_rcu(&p->u.head, __d_free_external_name);
309 }
310}
311EXPORT_SYMBOL(release_dentry_name_snapshot);
312
313static inline void __d_set_inode_and_type(struct dentry *dentry,
314 struct inode *inode,
315 unsigned type_flags)
316{
317 unsigned flags;
318
319 dentry->d_inode = inode;
320 flags = READ_ONCE(dentry->d_flags);
321 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
322 flags |= type_flags;
323 WRITE_ONCE(dentry->d_flags, flags);
324}
325
326static inline void __d_clear_type_and_inode(struct dentry *dentry)
327{
328 unsigned flags = READ_ONCE(dentry->d_flags);
329
330 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
331 WRITE_ONCE(dentry->d_flags, flags);
332 dentry->d_inode = NULL;
333}
334
335static void dentry_free(struct dentry *dentry)
336{
337 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
338 if (unlikely(dname_external(dentry))) {
339 struct external_name *p = external_name(dentry);
340 if (likely(atomic_dec_and_test(&p->u.count))) {
341 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
342 return;
343 }
344 }
345
346 if (!(dentry->d_flags & DCACHE_RCUACCESS))
347 __d_free(&dentry->d_u.d_rcu);
348 else
349 call_rcu(&dentry->d_u.d_rcu, __d_free);
350}
351
352
353
354
355
356static void dentry_unlink_inode(struct dentry * dentry)
357 __releases(dentry->d_lock)
358 __releases(dentry->d_inode->i_lock)
359{
360 struct inode *inode = dentry->d_inode;
361 bool hashed = !d_unhashed(dentry);
362
363 if (hashed)
364 raw_write_seqcount_begin(&dentry->d_seq);
365 __d_clear_type_and_inode(dentry);
366 hlist_del_init(&dentry->d_u.d_alias);
367 if (hashed)
368 raw_write_seqcount_end(&dentry->d_seq);
369 spin_unlock(&dentry->d_lock);
370 spin_unlock(&inode->i_lock);
371 if (!inode->i_nlink)
372 fsnotify_inoderemove(inode);
373 if (dentry->d_op && dentry->d_op->d_iput)
374 dentry->d_op->d_iput(dentry, inode);
375 else
376 iput(inode);
377}
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
394static void d_lru_add(struct dentry *dentry)
395{
396 D_FLAG_VERIFY(dentry, 0);
397 dentry->d_flags |= DCACHE_LRU_LIST;
398 this_cpu_inc(nr_dentry_unused);
399 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
400}
401
402static void d_lru_del(struct dentry *dentry)
403{
404 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
405 dentry->d_flags &= ~DCACHE_LRU_LIST;
406 this_cpu_dec(nr_dentry_unused);
407 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
408}
409
410static void d_shrink_del(struct dentry *dentry)
411{
412 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
413 list_del_init(&dentry->d_lru);
414 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
415 this_cpu_dec(nr_dentry_unused);
416}
417
418static void d_shrink_add(struct dentry *dentry, struct list_head *list)
419{
420 D_FLAG_VERIFY(dentry, 0);
421 list_add(&dentry->d_lru, list);
422 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
423 this_cpu_inc(nr_dentry_unused);
424}
425
426
427
428
429
430
431
432static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
433{
434 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
435 dentry->d_flags &= ~DCACHE_LRU_LIST;
436 this_cpu_dec(nr_dentry_unused);
437 list_lru_isolate(lru, &dentry->d_lru);
438}
439
440static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
441 struct list_head *list)
442{
443 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
444 dentry->d_flags |= DCACHE_SHRINK_LIST;
445 list_lru_isolate_move(lru, &dentry->d_lru, list);
446}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465static void ___d_drop(struct dentry *dentry)
466{
467 struct hlist_bl_head *b;
468
469
470
471
472
473 if (unlikely(IS_ROOT(dentry)))
474 b = &dentry->d_sb->s_roots;
475 else
476 b = d_hash(dentry->d_name.hash);
477
478 hlist_bl_lock(b);
479 __hlist_bl_del(&dentry->d_hash);
480 hlist_bl_unlock(b);
481}
482
483void __d_drop(struct dentry *dentry)
484{
485 if (!d_unhashed(dentry)) {
486 ___d_drop(dentry);
487 dentry->d_hash.pprev = NULL;
488 write_seqcount_invalidate(&dentry->d_seq);
489 }
490}
491EXPORT_SYMBOL(__d_drop);
492
493void d_drop(struct dentry *dentry)
494{
495 spin_lock(&dentry->d_lock);
496 __d_drop(dentry);
497 spin_unlock(&dentry->d_lock);
498}
499EXPORT_SYMBOL(d_drop);
500
501static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
502{
503 struct dentry *next;
504
505
506
507
508 dentry->d_flags |= DCACHE_DENTRY_KILLED;
509 if (unlikely(list_empty(&dentry->d_child)))
510 return;
511 __list_del_entry(&dentry->d_child);
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531 while (dentry->d_child.next != &parent->d_subdirs) {
532 next = list_entry(dentry->d_child.next, struct dentry, d_child);
533 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
534 break;
535 dentry->d_child.next = next->d_child.next;
536 }
537}
538
539static void __dentry_kill(struct dentry *dentry)
540{
541 struct dentry *parent = NULL;
542 bool can_free = true;
543 if (!IS_ROOT(dentry))
544 parent = dentry->d_parent;
545
546
547
548
549 lockref_mark_dead(&dentry->d_lockref);
550
551
552
553
554
555 if (dentry->d_flags & DCACHE_OP_PRUNE)
556 dentry->d_op->d_prune(dentry);
557
558 if (dentry->d_flags & DCACHE_LRU_LIST) {
559 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
560 d_lru_del(dentry);
561 }
562
563 __d_drop(dentry);
564 dentry_unlist(dentry, parent);
565 if (parent)
566 spin_unlock(&parent->d_lock);
567 if (dentry->d_inode)
568 dentry_unlink_inode(dentry);
569 else
570 spin_unlock(&dentry->d_lock);
571 this_cpu_dec(nr_dentry);
572 if (dentry->d_op && dentry->d_op->d_release)
573 dentry->d_op->d_release(dentry);
574
575 spin_lock(&dentry->d_lock);
576 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
577 dentry->d_flags |= DCACHE_MAY_FREE;
578 can_free = false;
579 }
580 spin_unlock(&dentry->d_lock);
581 if (likely(can_free))
582 dentry_free(dentry);
583}
584
585static struct dentry *__lock_parent(struct dentry *dentry)
586{
587 struct dentry *parent;
588 rcu_read_lock();
589 spin_unlock(&dentry->d_lock);
590again:
591 parent = READ_ONCE(dentry->d_parent);
592 spin_lock(&parent->d_lock);
593
594
595
596
597
598
599
600
601 if (unlikely(parent != dentry->d_parent)) {
602 spin_unlock(&parent->d_lock);
603 goto again;
604 }
605 rcu_read_unlock();
606 if (parent != dentry)
607 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
608 else
609 parent = NULL;
610 return parent;
611}
612
613static inline struct dentry *lock_parent(struct dentry *dentry)
614{
615 struct dentry *parent = dentry->d_parent;
616 if (IS_ROOT(dentry))
617 return NULL;
618 if (likely(spin_trylock(&parent->d_lock)))
619 return parent;
620 return __lock_parent(dentry);
621}
622
623static inline bool retain_dentry(struct dentry *dentry)
624{
625 WARN_ON(d_in_lookup(dentry));
626
627
628 if (unlikely(d_unhashed(dentry)))
629 return false;
630
631 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
632 return false;
633
634 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
635 if (dentry->d_op->d_delete(dentry))
636 return false;
637 }
638
639 dentry->d_lockref.count--;
640 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
641 d_lru_add(dentry);
642 else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
643 dentry->d_flags |= DCACHE_REFERENCED;
644 return true;
645}
646
647
648
649
650
651
652static struct dentry *dentry_kill(struct dentry *dentry)
653 __releases(dentry->d_lock)
654{
655 struct inode *inode = dentry->d_inode;
656 struct dentry *parent = NULL;
657
658 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
659 goto slow_positive;
660
661 if (!IS_ROOT(dentry)) {
662 parent = dentry->d_parent;
663 if (unlikely(!spin_trylock(&parent->d_lock))) {
664 parent = __lock_parent(dentry);
665 if (likely(inode || !dentry->d_inode))
666 goto got_locks;
667
668 if (parent)
669 spin_unlock(&parent->d_lock);
670 inode = dentry->d_inode;
671 goto slow_positive;
672 }
673 }
674 __dentry_kill(dentry);
675 return parent;
676
677slow_positive:
678 spin_unlock(&dentry->d_lock);
679 spin_lock(&inode->i_lock);
680 spin_lock(&dentry->d_lock);
681 parent = lock_parent(dentry);
682got_locks:
683 if (unlikely(dentry->d_lockref.count != 1)) {
684 dentry->d_lockref.count--;
685 } else if (likely(!retain_dentry(dentry))) {
686 __dentry_kill(dentry);
687 return parent;
688 }
689
690 if (inode)
691 spin_unlock(&inode->i_lock);
692 if (parent)
693 spin_unlock(&parent->d_lock);
694 spin_unlock(&dentry->d_lock);
695 return NULL;
696}
697
698
699
700
701
702
703
704
705
706static inline bool fast_dput(struct dentry *dentry)
707{
708 int ret;
709 unsigned int d_flags;
710
711
712
713
714
715 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
716 return lockref_put_or_lock(&dentry->d_lockref);
717
718
719
720
721
722 ret = lockref_put_return(&dentry->d_lockref);
723
724
725
726
727
728
729 if (unlikely(ret < 0)) {
730 spin_lock(&dentry->d_lock);
731 if (dentry->d_lockref.count > 1) {
732 dentry->d_lockref.count--;
733 spin_unlock(&dentry->d_lock);
734 return 1;
735 }
736 return 0;
737 }
738
739
740
741
742 if (ret)
743 return 1;
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766 smp_rmb();
767 d_flags = READ_ONCE(dentry->d_flags);
768 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
769
770
771 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
772 return 1;
773
774
775
776
777
778
779 spin_lock(&dentry->d_lock);
780
781
782
783
784
785
786
787 if (dentry->d_lockref.count) {
788 spin_unlock(&dentry->d_lock);
789 return 1;
790 }
791
792
793
794
795
796
797 dentry->d_lockref.count = 1;
798 return 0;
799}
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828void dput(struct dentry *dentry)
829{
830 if (unlikely(!dentry))
831 return;
832
833repeat:
834 might_sleep();
835
836 rcu_read_lock();
837 if (likely(fast_dput(dentry))) {
838 rcu_read_unlock();
839 return;
840 }
841
842
843 rcu_read_unlock();
844
845 if (likely(retain_dentry(dentry))) {
846 spin_unlock(&dentry->d_lock);
847 return;
848 }
849
850 dentry = dentry_kill(dentry);
851 if (dentry) {
852 cond_resched();
853 goto repeat;
854 }
855}
856EXPORT_SYMBOL(dput);
857
858
859
860static inline void __dget_dlock(struct dentry *dentry)
861{
862 dentry->d_lockref.count++;
863}
864
865static inline void __dget(struct dentry *dentry)
866{
867 lockref_get(&dentry->d_lockref);
868}
869
870struct dentry *dget_parent(struct dentry *dentry)
871{
872 int gotref;
873 struct dentry *ret;
874
875
876
877
878
879 rcu_read_lock();
880 ret = READ_ONCE(dentry->d_parent);
881 gotref = lockref_get_not_zero(&ret->d_lockref);
882 rcu_read_unlock();
883 if (likely(gotref)) {
884 if (likely(ret == READ_ONCE(dentry->d_parent)))
885 return ret;
886 dput(ret);
887 }
888
889repeat:
890
891
892
893
894 rcu_read_lock();
895 ret = dentry->d_parent;
896 spin_lock(&ret->d_lock);
897 if (unlikely(ret != dentry->d_parent)) {
898 spin_unlock(&ret->d_lock);
899 rcu_read_unlock();
900 goto repeat;
901 }
902 rcu_read_unlock();
903 BUG_ON(!ret->d_lockref.count);
904 ret->d_lockref.count++;
905 spin_unlock(&ret->d_lock);
906 return ret;
907}
908EXPORT_SYMBOL(dget_parent);
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924static struct dentry *__d_find_alias(struct inode *inode)
925{
926 struct dentry *alias, *discon_alias;
927
928again:
929 discon_alias = NULL;
930 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
931 spin_lock(&alias->d_lock);
932 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
933 if (IS_ROOT(alias) &&
934 (alias->d_flags & DCACHE_DISCONNECTED)) {
935 discon_alias = alias;
936 } else {
937 __dget_dlock(alias);
938 spin_unlock(&alias->d_lock);
939 return alias;
940 }
941 }
942 spin_unlock(&alias->d_lock);
943 }
944 if (discon_alias) {
945 alias = discon_alias;
946 spin_lock(&alias->d_lock);
947 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
948 __dget_dlock(alias);
949 spin_unlock(&alias->d_lock);
950 return alias;
951 }
952 spin_unlock(&alias->d_lock);
953 goto again;
954 }
955 return NULL;
956}
957
958struct dentry *d_find_alias(struct inode *inode)
959{
960 struct dentry *de = NULL;
961
962 if (!hlist_empty(&inode->i_dentry)) {
963 spin_lock(&inode->i_lock);
964 de = __d_find_alias(inode);
965 spin_unlock(&inode->i_lock);
966 }
967 return de;
968}
969EXPORT_SYMBOL(d_find_alias);
970
971
972
973
974
975void d_prune_aliases(struct inode *inode)
976{
977 struct dentry *dentry;
978restart:
979 spin_lock(&inode->i_lock);
980 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
981 spin_lock(&dentry->d_lock);
982 if (!dentry->d_lockref.count) {
983 struct dentry *parent = lock_parent(dentry);
984 if (likely(!dentry->d_lockref.count)) {
985 __dentry_kill(dentry);
986 dput(parent);
987 goto restart;
988 }
989 if (parent)
990 spin_unlock(&parent->d_lock);
991 }
992 spin_unlock(&dentry->d_lock);
993 }
994 spin_unlock(&inode->i_lock);
995}
996EXPORT_SYMBOL(d_prune_aliases);
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static bool shrink_lock_dentry(struct dentry *dentry)
1010{
1011 struct inode *inode;
1012 struct dentry *parent;
1013
1014 if (dentry->d_lockref.count)
1015 return false;
1016
1017 inode = dentry->d_inode;
1018 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1019 spin_unlock(&dentry->d_lock);
1020 spin_lock(&inode->i_lock);
1021 spin_lock(&dentry->d_lock);
1022 if (unlikely(dentry->d_lockref.count))
1023 goto out;
1024
1025 if (unlikely(inode != dentry->d_inode))
1026 goto out;
1027 }
1028
1029 parent = dentry->d_parent;
1030 if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
1031 return true;
1032
1033 spin_unlock(&dentry->d_lock);
1034 spin_lock(&parent->d_lock);
1035 if (unlikely(parent != dentry->d_parent)) {
1036 spin_unlock(&parent->d_lock);
1037 spin_lock(&dentry->d_lock);
1038 goto out;
1039 }
1040 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1041 if (likely(!dentry->d_lockref.count))
1042 return true;
1043 spin_unlock(&parent->d_lock);
1044out:
1045 if (inode)
1046 spin_unlock(&inode->i_lock);
1047 return false;
1048}
1049
1050static void shrink_dentry_list(struct list_head *list)
1051{
1052 while (!list_empty(list)) {
1053 struct dentry *dentry, *parent;
1054
1055 cond_resched();
1056
1057 dentry = list_entry(list->prev, struct dentry, d_lru);
1058 spin_lock(&dentry->d_lock);
1059 rcu_read_lock();
1060 if (!shrink_lock_dentry(dentry)) {
1061 bool can_free = false;
1062 rcu_read_unlock();
1063 d_shrink_del(dentry);
1064 if (dentry->d_lockref.count < 0)
1065 can_free = dentry->d_flags & DCACHE_MAY_FREE;
1066 spin_unlock(&dentry->d_lock);
1067 if (can_free)
1068 dentry_free(dentry);
1069 continue;
1070 }
1071 rcu_read_unlock();
1072 d_shrink_del(dentry);
1073 parent = dentry->d_parent;
1074 __dentry_kill(dentry);
1075 if (parent == dentry)
1076 continue;
1077
1078
1079
1080
1081
1082
1083 dentry = parent;
1084 while (dentry && !lockref_put_or_lock(&dentry->d_lockref))
1085 dentry = dentry_kill(dentry);
1086 }
1087}
1088
1089static enum lru_status dentry_lru_isolate(struct list_head *item,
1090 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1091{
1092 struct list_head *freeable = arg;
1093 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1094
1095
1096
1097
1098
1099
1100
1101 if (!spin_trylock(&dentry->d_lock))
1102 return LRU_SKIP;
1103
1104
1105
1106
1107
1108
1109 if (dentry->d_lockref.count) {
1110 d_lru_isolate(lru, dentry);
1111 spin_unlock(&dentry->d_lock);
1112 return LRU_REMOVED;
1113 }
1114
1115 if (dentry->d_flags & DCACHE_REFERENCED) {
1116 dentry->d_flags &= ~DCACHE_REFERENCED;
1117 spin_unlock(&dentry->d_lock);
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138 return LRU_ROTATE;
1139 }
1140
1141 d_lru_shrink_move(lru, dentry, freeable);
1142 spin_unlock(&dentry->d_lock);
1143
1144 return LRU_REMOVED;
1145}
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1160{
1161 LIST_HEAD(dispose);
1162 long freed;
1163
1164 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1165 dentry_lru_isolate, &dispose);
1166 shrink_dentry_list(&dispose);
1167 return freed;
1168}
1169
1170static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1171 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1172{
1173 struct list_head *freeable = arg;
1174 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1175
1176
1177
1178
1179
1180
1181 if (!spin_trylock(&dentry->d_lock))
1182 return LRU_SKIP;
1183
1184 d_lru_shrink_move(lru, dentry, freeable);
1185 spin_unlock(&dentry->d_lock);
1186
1187 return LRU_REMOVED;
1188}
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198void shrink_dcache_sb(struct super_block *sb)
1199{
1200 long freed;
1201
1202 do {
1203 LIST_HEAD(dispose);
1204
1205 freed = list_lru_walk(&sb->s_dentry_lru,
1206 dentry_lru_isolate_shrink, &dispose, 1024);
1207
1208 this_cpu_sub(nr_dentry_unused, freed);
1209 shrink_dentry_list(&dispose);
1210 } while (list_lru_count(&sb->s_dentry_lru) > 0);
1211}
1212EXPORT_SYMBOL(shrink_dcache_sb);
1213
1214
1215
1216
1217
1218
1219
1220
1221enum d_walk_ret {
1222 D_WALK_CONTINUE,
1223 D_WALK_QUIT,
1224 D_WALK_NORETRY,
1225 D_WALK_SKIP,
1226};
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237static void d_walk(struct dentry *parent, void *data,
1238 enum d_walk_ret (*enter)(void *, struct dentry *),
1239 void (*finish)(void *))
1240{
1241 struct dentry *this_parent;
1242 struct list_head *next;
1243 unsigned seq = 0;
1244 enum d_walk_ret ret;
1245 bool retry = true;
1246
1247again:
1248 read_seqbegin_or_lock(&rename_lock, &seq);
1249 this_parent = parent;
1250 spin_lock(&this_parent->d_lock);
1251
1252 ret = enter(data, this_parent);
1253 switch (ret) {
1254 case D_WALK_CONTINUE:
1255 break;
1256 case D_WALK_QUIT:
1257 case D_WALK_SKIP:
1258 goto out_unlock;
1259 case D_WALK_NORETRY:
1260 retry = false;
1261 break;
1262 }
1263repeat:
1264 next = this_parent->d_subdirs.next;
1265resume:
1266 while (next != &this_parent->d_subdirs) {
1267 struct list_head *tmp = next;
1268 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1269 next = tmp->next;
1270
1271 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1272 continue;
1273
1274 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1275
1276 ret = enter(data, dentry);
1277 switch (ret) {
1278 case D_WALK_CONTINUE:
1279 break;
1280 case D_WALK_QUIT:
1281 spin_unlock(&dentry->d_lock);
1282 goto out_unlock;
1283 case D_WALK_NORETRY:
1284 retry = false;
1285 break;
1286 case D_WALK_SKIP:
1287 spin_unlock(&dentry->d_lock);
1288 continue;
1289 }
1290
1291 if (!list_empty(&dentry->d_subdirs)) {
1292 spin_unlock(&this_parent->d_lock);
1293 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1294 this_parent = dentry;
1295 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1296 goto repeat;
1297 }
1298 spin_unlock(&dentry->d_lock);
1299 }
1300
1301
1302
1303 rcu_read_lock();
1304ascend:
1305 if (this_parent != parent) {
1306 struct dentry *child = this_parent;
1307 this_parent = child->d_parent;
1308
1309 spin_unlock(&child->d_lock);
1310 spin_lock(&this_parent->d_lock);
1311
1312
1313 if (need_seqretry(&rename_lock, seq))
1314 goto rename_retry;
1315
1316 do {
1317 next = child->d_child.next;
1318 if (next == &this_parent->d_subdirs)
1319 goto ascend;
1320 child = list_entry(next, struct dentry, d_child);
1321 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1322 rcu_read_unlock();
1323 goto resume;
1324 }
1325 if (need_seqretry(&rename_lock, seq))
1326 goto rename_retry;
1327 rcu_read_unlock();
1328 if (finish)
1329 finish(data);
1330
1331out_unlock:
1332 spin_unlock(&this_parent->d_lock);
1333 done_seqretry(&rename_lock, seq);
1334 return;
1335
1336rename_retry:
1337 spin_unlock(&this_parent->d_lock);
1338 rcu_read_unlock();
1339 BUG_ON(seq & 1);
1340 if (!retry)
1341 return;
1342 seq = 1;
1343 goto again;
1344}
1345
1346struct check_mount {
1347 struct vfsmount *mnt;
1348 unsigned int mounted;
1349};
1350
1351static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1352{
1353 struct check_mount *info = data;
1354 struct path path = { .mnt = info->mnt, .dentry = dentry };
1355
1356 if (likely(!d_mountpoint(dentry)))
1357 return D_WALK_CONTINUE;
1358 if (__path_is_mountpoint(&path)) {
1359 info->mounted = 1;
1360 return D_WALK_QUIT;
1361 }
1362 return D_WALK_CONTINUE;
1363}
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373int path_has_submounts(const struct path *parent)
1374{
1375 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1376
1377 read_seqlock_excl(&mount_lock);
1378 d_walk(parent->dentry, &data, path_check_mount, NULL);
1379 read_sequnlock_excl(&mount_lock);
1380
1381 return data.mounted;
1382}
1383EXPORT_SYMBOL(path_has_submounts);
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393int d_set_mounted(struct dentry *dentry)
1394{
1395 struct dentry *p;
1396 int ret = -ENOENT;
1397 write_seqlock(&rename_lock);
1398 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1399
1400 spin_lock(&p->d_lock);
1401 if (unlikely(d_unhashed(p))) {
1402 spin_unlock(&p->d_lock);
1403 goto out;
1404 }
1405 spin_unlock(&p->d_lock);
1406 }
1407 spin_lock(&dentry->d_lock);
1408 if (!d_unlinked(dentry)) {
1409 ret = -EBUSY;
1410 if (!d_mountpoint(dentry)) {
1411 dentry->d_flags |= DCACHE_MOUNTED;
1412 ret = 0;
1413 }
1414 }
1415 spin_unlock(&dentry->d_lock);
1416out:
1417 write_sequnlock(&rename_lock);
1418 return ret;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436struct select_data {
1437 struct dentry *start;
1438 struct list_head dispose;
1439 int found;
1440};
1441
1442static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1443{
1444 struct select_data *data = _data;
1445 enum d_walk_ret ret = D_WALK_CONTINUE;
1446
1447 if (data->start == dentry)
1448 goto out;
1449
1450 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1451 data->found++;
1452 } else {
1453 if (dentry->d_flags & DCACHE_LRU_LIST)
1454 d_lru_del(dentry);
1455 if (!dentry->d_lockref.count) {
1456 d_shrink_add(dentry, &data->dispose);
1457 data->found++;
1458 }
1459 }
1460
1461
1462
1463
1464
1465 if (!list_empty(&data->dispose))
1466 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1467out:
1468 return ret;
1469}
1470
1471
1472
1473
1474
1475
1476
1477void shrink_dcache_parent(struct dentry *parent)
1478{
1479 for (;;) {
1480 struct select_data data;
1481
1482 INIT_LIST_HEAD(&data.dispose);
1483 data.start = parent;
1484 data.found = 0;
1485
1486 d_walk(parent, &data, select_collect, NULL);
1487 if (!data.found)
1488 break;
1489
1490 shrink_dentry_list(&data.dispose);
1491 }
1492}
1493EXPORT_SYMBOL(shrink_dcache_parent);
1494
1495static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1496{
1497
1498 if (!list_empty(&dentry->d_subdirs))
1499 return D_WALK_CONTINUE;
1500
1501
1502 if (dentry == _data && dentry->d_lockref.count == 1)
1503 return D_WALK_CONTINUE;
1504
1505 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1506 " still in use (%d) [unmount of %s %s]\n",
1507 dentry,
1508 dentry->d_inode ?
1509 dentry->d_inode->i_ino : 0UL,
1510 dentry,
1511 dentry->d_lockref.count,
1512 dentry->d_sb->s_type->name,
1513 dentry->d_sb->s_id);
1514 WARN_ON(1);
1515 return D_WALK_CONTINUE;
1516}
1517
1518static void do_one_tree(struct dentry *dentry)
1519{
1520 shrink_dcache_parent(dentry);
1521 d_walk(dentry, dentry, umount_check, NULL);
1522 d_drop(dentry);
1523 dput(dentry);
1524}
1525
1526
1527
1528
1529void shrink_dcache_for_umount(struct super_block *sb)
1530{
1531 struct dentry *dentry;
1532
1533 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1534
1535 dentry = sb->s_root;
1536 sb->s_root = NULL;
1537 do_one_tree(dentry);
1538
1539 while (!hlist_bl_empty(&sb->s_roots)) {
1540 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1541 do_one_tree(dentry);
1542 }
1543}
1544
1545struct detach_data {
1546 struct select_data select;
1547 struct dentry *mountpoint;
1548};
1549static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1550{
1551 struct detach_data *data = _data;
1552
1553 if (d_mountpoint(dentry)) {
1554 __dget_dlock(dentry);
1555 data->mountpoint = dentry;
1556 return D_WALK_QUIT;
1557 }
1558
1559 return select_collect(&data->select, dentry);
1560}
1561
1562static void check_and_drop(void *_data)
1563{
1564 struct detach_data *data = _data;
1565
1566 if (!data->mountpoint && list_empty(&data->select.dispose))
1567 __d_drop(data->select.start);
1568}
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580void d_invalidate(struct dentry *dentry)
1581{
1582
1583
1584
1585 spin_lock(&dentry->d_lock);
1586 if (d_unhashed(dentry)) {
1587 spin_unlock(&dentry->d_lock);
1588 return;
1589 }
1590 spin_unlock(&dentry->d_lock);
1591
1592
1593 if (!dentry->d_inode) {
1594 d_drop(dentry);
1595 return;
1596 }
1597
1598 for (;;) {
1599 struct detach_data data;
1600
1601 data.mountpoint = NULL;
1602 INIT_LIST_HEAD(&data.select.dispose);
1603 data.select.start = dentry;
1604 data.select.found = 0;
1605
1606 d_walk(dentry, &data, detach_and_collect, check_and_drop);
1607
1608 if (!list_empty(&data.select.dispose))
1609 shrink_dentry_list(&data.select.dispose);
1610 else if (!data.mountpoint)
1611 return;
1612
1613 if (data.mountpoint) {
1614 detach_mounts(data.mountpoint);
1615 dput(data.mountpoint);
1616 }
1617 }
1618}
1619EXPORT_SYMBOL(d_invalidate);
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1632{
1633 struct external_name *ext = NULL;
1634 struct dentry *dentry;
1635 char *dname;
1636 int err;
1637
1638 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1639 if (!dentry)
1640 return NULL;
1641
1642
1643
1644
1645
1646
1647
1648 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1649 if (unlikely(!name)) {
1650 name = &slash_name;
1651 dname = dentry->d_iname;
1652 } else if (name->len > DNAME_INLINE_LEN-1) {
1653 size_t size = offsetof(struct external_name, name[1]);
1654
1655 ext = kmalloc(size + name->len, GFP_KERNEL_ACCOUNT);
1656 if (!ext) {
1657 kmem_cache_free(dentry_cache, dentry);
1658 return NULL;
1659 }
1660 atomic_set(&ext->u.count, 1);
1661 dname = ext->name;
1662 } else {
1663 dname = dentry->d_iname;
1664 }
1665
1666 dentry->d_name.len = name->len;
1667 dentry->d_name.hash = name->hash;
1668 memcpy(dname, name->name, name->len);
1669 dname[name->len] = 0;
1670
1671
1672 smp_store_release(&dentry->d_name.name, dname);
1673
1674 dentry->d_lockref.count = 1;
1675 dentry->d_flags = 0;
1676 spin_lock_init(&dentry->d_lock);
1677 seqcount_init(&dentry->d_seq);
1678 dentry->d_inode = NULL;
1679 dentry->d_parent = dentry;
1680 dentry->d_sb = sb;
1681 dentry->d_op = NULL;
1682 dentry->d_fsdata = NULL;
1683 INIT_HLIST_BL_NODE(&dentry->d_hash);
1684 INIT_LIST_HEAD(&dentry->d_lru);
1685 INIT_LIST_HEAD(&dentry->d_subdirs);
1686 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1687 INIT_LIST_HEAD(&dentry->d_child);
1688 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1689
1690 if (dentry->d_op && dentry->d_op->d_init) {
1691 err = dentry->d_op->d_init(dentry);
1692 if (err) {
1693 if (dname_external(dentry))
1694 kfree(external_name(dentry));
1695 kmem_cache_free(dentry_cache, dentry);
1696 return NULL;
1697 }
1698 }
1699
1700 if (unlikely(ext)) {
1701 pg_data_t *pgdat = page_pgdat(virt_to_page(ext));
1702 mod_node_page_state(pgdat, NR_INDIRECTLY_RECLAIMABLE_BYTES,
1703 ksize(ext));
1704 }
1705
1706 this_cpu_inc(nr_dentry);
1707
1708 return dentry;
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1721{
1722 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1723 if (!dentry)
1724 return NULL;
1725 dentry->d_flags |= DCACHE_RCUACCESS;
1726 spin_lock(&parent->d_lock);
1727
1728
1729
1730
1731 __dget_dlock(parent);
1732 dentry->d_parent = parent;
1733 list_add(&dentry->d_child, &parent->d_subdirs);
1734 spin_unlock(&parent->d_lock);
1735
1736 return dentry;
1737}
1738EXPORT_SYMBOL(d_alloc);
1739
1740struct dentry *d_alloc_anon(struct super_block *sb)
1741{
1742 return __d_alloc(sb, NULL);
1743}
1744EXPORT_SYMBOL(d_alloc_anon);
1745
1746struct dentry *d_alloc_cursor(struct dentry * parent)
1747{
1748 struct dentry *dentry = d_alloc_anon(parent->d_sb);
1749 if (dentry) {
1750 dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
1751 dentry->d_parent = dget(parent);
1752 }
1753 return dentry;
1754}
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1765{
1766 return __d_alloc(sb, name);
1767}
1768EXPORT_SYMBOL(d_alloc_pseudo);
1769
1770struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1771{
1772 struct qstr q;
1773
1774 q.name = name;
1775 q.hash_len = hashlen_string(parent, name);
1776 return d_alloc(parent, &q);
1777}
1778EXPORT_SYMBOL(d_alloc_name);
1779
1780void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1781{
1782 WARN_ON_ONCE(dentry->d_op);
1783 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1784 DCACHE_OP_COMPARE |
1785 DCACHE_OP_REVALIDATE |
1786 DCACHE_OP_WEAK_REVALIDATE |
1787 DCACHE_OP_DELETE |
1788 DCACHE_OP_REAL));
1789 dentry->d_op = op;
1790 if (!op)
1791 return;
1792 if (op->d_hash)
1793 dentry->d_flags |= DCACHE_OP_HASH;
1794 if (op->d_compare)
1795 dentry->d_flags |= DCACHE_OP_COMPARE;
1796 if (op->d_revalidate)
1797 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1798 if (op->d_weak_revalidate)
1799 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1800 if (op->d_delete)
1801 dentry->d_flags |= DCACHE_OP_DELETE;
1802 if (op->d_prune)
1803 dentry->d_flags |= DCACHE_OP_PRUNE;
1804 if (op->d_real)
1805 dentry->d_flags |= DCACHE_OP_REAL;
1806
1807}
1808EXPORT_SYMBOL(d_set_d_op);
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818void d_set_fallthru(struct dentry *dentry)
1819{
1820 spin_lock(&dentry->d_lock);
1821 dentry->d_flags |= DCACHE_FALLTHRU;
1822 spin_unlock(&dentry->d_lock);
1823}
1824EXPORT_SYMBOL(d_set_fallthru);
1825
1826static unsigned d_flags_for_inode(struct inode *inode)
1827{
1828 unsigned add_flags = DCACHE_REGULAR_TYPE;
1829
1830 if (!inode)
1831 return DCACHE_MISS_TYPE;
1832
1833 if (S_ISDIR(inode->i_mode)) {
1834 add_flags = DCACHE_DIRECTORY_TYPE;
1835 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1836 if (unlikely(!inode->i_op->lookup))
1837 add_flags = DCACHE_AUTODIR_TYPE;
1838 else
1839 inode->i_opflags |= IOP_LOOKUP;
1840 }
1841 goto type_determined;
1842 }
1843
1844 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1845 if (unlikely(inode->i_op->get_link)) {
1846 add_flags = DCACHE_SYMLINK_TYPE;
1847 goto type_determined;
1848 }
1849 inode->i_opflags |= IOP_NOFOLLOW;
1850 }
1851
1852 if (unlikely(!S_ISREG(inode->i_mode)))
1853 add_flags = DCACHE_SPECIAL_TYPE;
1854
1855type_determined:
1856 if (unlikely(IS_AUTOMOUNT(inode)))
1857 add_flags |= DCACHE_NEED_AUTOMOUNT;
1858 return add_flags;
1859}
1860
1861static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1862{
1863 unsigned add_flags = d_flags_for_inode(inode);
1864 WARN_ON(d_in_lookup(dentry));
1865
1866 spin_lock(&dentry->d_lock);
1867 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1868 raw_write_seqcount_begin(&dentry->d_seq);
1869 __d_set_inode_and_type(dentry, inode, add_flags);
1870 raw_write_seqcount_end(&dentry->d_seq);
1871 fsnotify_update_flags(dentry);
1872 spin_unlock(&dentry->d_lock);
1873}
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890void d_instantiate(struct dentry *entry, struct inode * inode)
1891{
1892 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1893 if (inode) {
1894 security_d_instantiate(entry, inode);
1895 spin_lock(&inode->i_lock);
1896 __d_instantiate(entry, inode);
1897 spin_unlock(&inode->i_lock);
1898 }
1899}
1900EXPORT_SYMBOL(d_instantiate);
1901
1902
1903
1904
1905
1906
1907
1908void d_instantiate_new(struct dentry *entry, struct inode *inode)
1909{
1910 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1911 BUG_ON(!inode);
1912 lockdep_annotate_inode_mutex_key(inode);
1913 security_d_instantiate(entry, inode);
1914 spin_lock(&inode->i_lock);
1915 __d_instantiate(entry, inode);
1916 WARN_ON(!(inode->i_state & I_NEW));
1917 inode->i_state &= ~I_NEW;
1918 smp_mb();
1919 wake_up_bit(&inode->i_state, __I_NEW);
1920 spin_unlock(&inode->i_lock);
1921}
1922EXPORT_SYMBOL(d_instantiate_new);
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1934{
1935 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1936
1937 security_d_instantiate(entry, inode);
1938 spin_lock(&inode->i_lock);
1939 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1940 spin_unlock(&inode->i_lock);
1941 iput(inode);
1942 return -EBUSY;
1943 }
1944 __d_instantiate(entry, inode);
1945 spin_unlock(&inode->i_lock);
1946
1947 return 0;
1948}
1949EXPORT_SYMBOL(d_instantiate_no_diralias);
1950
1951struct dentry *d_make_root(struct inode *root_inode)
1952{
1953 struct dentry *res = NULL;
1954
1955 if (root_inode) {
1956 res = d_alloc_anon(root_inode->i_sb);
1957 if (res)
1958 d_instantiate(res, root_inode);
1959 else
1960 iput(root_inode);
1961 }
1962 return res;
1963}
1964EXPORT_SYMBOL(d_make_root);
1965
1966static struct dentry * __d_find_any_alias(struct inode *inode)
1967{
1968 struct dentry *alias;
1969
1970 if (hlist_empty(&inode->i_dentry))
1971 return NULL;
1972 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1973 __dget(alias);
1974 return alias;
1975}
1976
1977
1978
1979
1980
1981
1982
1983
1984struct dentry *d_find_any_alias(struct inode *inode)
1985{
1986 struct dentry *de;
1987
1988 spin_lock(&inode->i_lock);
1989 de = __d_find_any_alias(inode);
1990 spin_unlock(&inode->i_lock);
1991 return de;
1992}
1993EXPORT_SYMBOL(d_find_any_alias);
1994
1995static struct dentry *__d_instantiate_anon(struct dentry *dentry,
1996 struct inode *inode,
1997 bool disconnected)
1998{
1999 struct dentry *res;
2000 unsigned add_flags;
2001
2002 security_d_instantiate(dentry, inode);
2003 spin_lock(&inode->i_lock);
2004 res = __d_find_any_alias(inode);
2005 if (res) {
2006 spin_unlock(&inode->i_lock);
2007 dput(dentry);
2008 goto out_iput;
2009 }
2010
2011
2012 add_flags = d_flags_for_inode(inode);
2013
2014 if (disconnected)
2015 add_flags |= DCACHE_DISCONNECTED;
2016
2017 spin_lock(&dentry->d_lock);
2018 __d_set_inode_and_type(dentry, inode, add_flags);
2019 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2020 if (!disconnected) {
2021 hlist_bl_lock(&dentry->d_sb->s_roots);
2022 hlist_bl_add_head(&dentry->d_hash, &dentry->d_sb->s_roots);
2023 hlist_bl_unlock(&dentry->d_sb->s_roots);
2024 }
2025 spin_unlock(&dentry->d_lock);
2026 spin_unlock(&inode->i_lock);
2027
2028 return dentry;
2029
2030 out_iput:
2031 iput(inode);
2032 return res;
2033}
2034
2035struct dentry *d_instantiate_anon(struct dentry *dentry, struct inode *inode)
2036{
2037 return __d_instantiate_anon(dentry, inode, true);
2038}
2039EXPORT_SYMBOL(d_instantiate_anon);
2040
2041static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
2042{
2043 struct dentry *tmp;
2044 struct dentry *res;
2045
2046 if (!inode)
2047 return ERR_PTR(-ESTALE);
2048 if (IS_ERR(inode))
2049 return ERR_CAST(inode);
2050
2051 res = d_find_any_alias(inode);
2052 if (res)
2053 goto out_iput;
2054
2055 tmp = d_alloc_anon(inode->i_sb);
2056 if (!tmp) {
2057 res = ERR_PTR(-ENOMEM);
2058 goto out_iput;
2059 }
2060
2061 return __d_instantiate_anon(tmp, inode, disconnected);
2062
2063out_iput:
2064 iput(inode);
2065 return res;
2066}
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086struct dentry *d_obtain_alias(struct inode *inode)
2087{
2088 return __d_obtain_alias(inode, true);
2089}
2090EXPORT_SYMBOL(d_obtain_alias);
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107struct dentry *d_obtain_root(struct inode *inode)
2108{
2109 return __d_obtain_alias(inode, false);
2110}
2111EXPORT_SYMBOL(d_obtain_root);
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2130 struct qstr *name)
2131{
2132 struct dentry *found, *res;
2133
2134
2135
2136
2137
2138 found = d_hash_and_lookup(dentry->d_parent, name);
2139 if (found) {
2140 iput(inode);
2141 return found;
2142 }
2143 if (d_in_lookup(dentry)) {
2144 found = d_alloc_parallel(dentry->d_parent, name,
2145 dentry->d_wait);
2146 if (IS_ERR(found) || !d_in_lookup(found)) {
2147 iput(inode);
2148 return found;
2149 }
2150 } else {
2151 found = d_alloc(dentry->d_parent, name);
2152 if (!found) {
2153 iput(inode);
2154 return ERR_PTR(-ENOMEM);
2155 }
2156 }
2157 res = d_splice_alias(inode, found);
2158 if (res) {
2159 dput(found);
2160 return res;
2161 }
2162 return found;
2163}
2164EXPORT_SYMBOL(d_add_ci);
2165
2166
2167static inline bool d_same_name(const struct dentry *dentry,
2168 const struct dentry *parent,
2169 const struct qstr *name)
2170{
2171 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2172 if (dentry->d_name.len != name->len)
2173 return false;
2174 return dentry_cmp(dentry, name->name, name->len) == 0;
2175 }
2176 return parent->d_op->d_compare(dentry,
2177 dentry->d_name.len, dentry->d_name.name,
2178 name) == 0;
2179}
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210struct dentry *__d_lookup_rcu(const struct dentry *parent,
2211 const struct qstr *name,
2212 unsigned *seqp)
2213{
2214 u64 hashlen = name->hash_len;
2215 const unsigned char *str = name->name;
2216 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2217 struct hlist_bl_node *node;
2218 struct dentry *dentry;
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2241 unsigned seq;
2242
2243seqretry:
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261 seq = raw_seqcount_begin(&dentry->d_seq);
2262 if (dentry->d_parent != parent)
2263 continue;
2264 if (d_unhashed(dentry))
2265 continue;
2266
2267 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2268 int tlen;
2269 const char *tname;
2270 if (dentry->d_name.hash != hashlen_hash(hashlen))
2271 continue;
2272 tlen = dentry->d_name.len;
2273 tname = dentry->d_name.name;
2274
2275 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2276 cpu_relax();
2277 goto seqretry;
2278 }
2279 if (parent->d_op->d_compare(dentry,
2280 tlen, tname, name) != 0)
2281 continue;
2282 } else {
2283 if (dentry->d_name.hash_len != hashlen)
2284 continue;
2285 if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2286 continue;
2287 }
2288 *seqp = seq;
2289 return dentry;
2290 }
2291 return NULL;
2292}
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2306{
2307 struct dentry *dentry;
2308 unsigned seq;
2309
2310 do {
2311 seq = read_seqbegin(&rename_lock);
2312 dentry = __d_lookup(parent, name);
2313 if (dentry)
2314 break;
2315 } while (read_seqretry(&rename_lock, seq));
2316 return dentry;
2317}
2318EXPORT_SYMBOL(d_lookup);
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2336{
2337 unsigned int hash = name->hash;
2338 struct hlist_bl_head *b = d_hash(hash);
2339 struct hlist_bl_node *node;
2340 struct dentry *found = NULL;
2341 struct dentry *dentry;
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363 rcu_read_lock();
2364
2365 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2366
2367 if (dentry->d_name.hash != hash)
2368 continue;
2369
2370 spin_lock(&dentry->d_lock);
2371 if (dentry->d_parent != parent)
2372 goto next;
2373 if (d_unhashed(dentry))
2374 goto next;
2375
2376 if (!d_same_name(dentry, parent, name))
2377 goto next;
2378
2379 dentry->d_lockref.count++;
2380 found = dentry;
2381 spin_unlock(&dentry->d_lock);
2382 break;
2383next:
2384 spin_unlock(&dentry->d_lock);
2385 }
2386 rcu_read_unlock();
2387
2388 return found;
2389}
2390
2391
2392
2393
2394
2395
2396
2397
2398struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2399{
2400
2401
2402
2403
2404
2405 name->hash = full_name_hash(dir, name->name, name->len);
2406 if (dir->d_flags & DCACHE_OP_HASH) {
2407 int err = dir->d_op->d_hash(dir, name);
2408 if (unlikely(err < 0))
2409 return ERR_PTR(err);
2410 }
2411 return d_lookup(dir, name);
2412}
2413EXPORT_SYMBOL(d_hash_and_lookup);
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436void d_delete(struct dentry * dentry)
2437{
2438 struct inode *inode = dentry->d_inode;
2439 int isdir = d_is_dir(dentry);
2440
2441 spin_lock(&inode->i_lock);
2442 spin_lock(&dentry->d_lock);
2443
2444
2445
2446 if (dentry->d_lockref.count == 1) {
2447 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2448 dentry_unlink_inode(dentry);
2449 } else {
2450 __d_drop(dentry);
2451 spin_unlock(&dentry->d_lock);
2452 spin_unlock(&inode->i_lock);
2453 }
2454 fsnotify_nameremove(dentry, isdir);
2455}
2456EXPORT_SYMBOL(d_delete);
2457
2458static void __d_rehash(struct dentry *entry)
2459{
2460 struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2461
2462 hlist_bl_lock(b);
2463 hlist_bl_add_head_rcu(&entry->d_hash, b);
2464 hlist_bl_unlock(b);
2465}
2466
2467
2468
2469
2470
2471
2472
2473
2474void d_rehash(struct dentry * entry)
2475{
2476 spin_lock(&entry->d_lock);
2477 __d_rehash(entry);
2478 spin_unlock(&entry->d_lock);
2479}
2480EXPORT_SYMBOL(d_rehash);
2481
2482static inline unsigned start_dir_add(struct inode *dir)
2483{
2484
2485 for (;;) {
2486 unsigned n = dir->i_dir_seq;
2487 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2488 return n;
2489 cpu_relax();
2490 }
2491}
2492
2493static inline void end_dir_add(struct inode *dir, unsigned n)
2494{
2495 smp_store_release(&dir->i_dir_seq, n + 2);
2496}
2497
2498static void d_wait_lookup(struct dentry *dentry)
2499{
2500 if (d_in_lookup(dentry)) {
2501 DECLARE_WAITQUEUE(wait, current);
2502 add_wait_queue(dentry->d_wait, &wait);
2503 do {
2504 set_current_state(TASK_UNINTERRUPTIBLE);
2505 spin_unlock(&dentry->d_lock);
2506 schedule();
2507 spin_lock(&dentry->d_lock);
2508 } while (d_in_lookup(dentry));
2509 }
2510}
2511
2512struct dentry *d_alloc_parallel(struct dentry *parent,
2513 const struct qstr *name,
2514 wait_queue_head_t *wq)
2515{
2516 unsigned int hash = name->hash;
2517 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2518 struct hlist_bl_node *node;
2519 struct dentry *new = d_alloc(parent, name);
2520 struct dentry *dentry;
2521 unsigned seq, r_seq, d_seq;
2522
2523 if (unlikely(!new))
2524 return ERR_PTR(-ENOMEM);
2525
2526retry:
2527 rcu_read_lock();
2528 seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2529 r_seq = read_seqbegin(&rename_lock);
2530 dentry = __d_lookup_rcu(parent, name, &d_seq);
2531 if (unlikely(dentry)) {
2532 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2533 rcu_read_unlock();
2534 goto retry;
2535 }
2536 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2537 rcu_read_unlock();
2538 dput(dentry);
2539 goto retry;
2540 }
2541 rcu_read_unlock();
2542 dput(new);
2543 return dentry;
2544 }
2545 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2546 rcu_read_unlock();
2547 goto retry;
2548 }
2549
2550 if (unlikely(seq & 1)) {
2551 rcu_read_unlock();
2552 goto retry;
2553 }
2554
2555 hlist_bl_lock(b);
2556 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2557 hlist_bl_unlock(b);
2558 rcu_read_unlock();
2559 goto retry;
2560 }
2561
2562
2563
2564
2565
2566
2567
2568 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2569 if (dentry->d_name.hash != hash)
2570 continue;
2571 if (dentry->d_parent != parent)
2572 continue;
2573 if (!d_same_name(dentry, parent, name))
2574 continue;
2575 hlist_bl_unlock(b);
2576
2577 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2578 rcu_read_unlock();
2579 goto retry;
2580 }
2581
2582 rcu_read_unlock();
2583
2584
2585
2586
2587 spin_lock(&dentry->d_lock);
2588 d_wait_lookup(dentry);
2589
2590
2591
2592
2593
2594
2595 if (unlikely(dentry->d_name.hash != hash))
2596 goto mismatch;
2597 if (unlikely(dentry->d_parent != parent))
2598 goto mismatch;
2599 if (unlikely(d_unhashed(dentry)))
2600 goto mismatch;
2601 if (unlikely(!d_same_name(dentry, parent, name)))
2602 goto mismatch;
2603
2604 spin_unlock(&dentry->d_lock);
2605 dput(new);
2606 return dentry;
2607 }
2608 rcu_read_unlock();
2609
2610 new->d_flags |= DCACHE_PAR_LOOKUP;
2611 new->d_wait = wq;
2612 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2613 hlist_bl_unlock(b);
2614 return new;
2615mismatch:
2616 spin_unlock(&dentry->d_lock);
2617 dput(dentry);
2618 goto retry;
2619}
2620EXPORT_SYMBOL(d_alloc_parallel);
2621
2622void __d_lookup_done(struct dentry *dentry)
2623{
2624 struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2625 dentry->d_name.hash);
2626 hlist_bl_lock(b);
2627 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2628 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2629 wake_up_all(dentry->d_wait);
2630 dentry->d_wait = NULL;
2631 hlist_bl_unlock(b);
2632 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2633 INIT_LIST_HEAD(&dentry->d_lru);
2634}
2635EXPORT_SYMBOL(__d_lookup_done);
2636
2637
2638
2639static inline void __d_add(struct dentry *dentry, struct inode *inode)
2640{
2641 struct inode *dir = NULL;
2642 unsigned n;
2643 spin_lock(&dentry->d_lock);
2644 if (unlikely(d_in_lookup(dentry))) {
2645 dir = dentry->d_parent->d_inode;
2646 n = start_dir_add(dir);
2647 __d_lookup_done(dentry);
2648 }
2649 if (inode) {
2650 unsigned add_flags = d_flags_for_inode(inode);
2651 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2652 raw_write_seqcount_begin(&dentry->d_seq);
2653 __d_set_inode_and_type(dentry, inode, add_flags);
2654 raw_write_seqcount_end(&dentry->d_seq);
2655 fsnotify_update_flags(dentry);
2656 }
2657 __d_rehash(dentry);
2658 if (dir)
2659 end_dir_add(dir, n);
2660 spin_unlock(&dentry->d_lock);
2661 if (inode)
2662 spin_unlock(&inode->i_lock);
2663}
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674void d_add(struct dentry *entry, struct inode *inode)
2675{
2676 if (inode) {
2677 security_d_instantiate(entry, inode);
2678 spin_lock(&inode->i_lock);
2679 }
2680 __d_add(entry, inode);
2681}
2682EXPORT_SYMBOL(d_add);
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2696{
2697 struct dentry *alias;
2698 unsigned int hash = entry->d_name.hash;
2699
2700 spin_lock(&inode->i_lock);
2701 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2702
2703
2704
2705
2706
2707 if (alias->d_name.hash != hash)
2708 continue;
2709 if (alias->d_parent != entry->d_parent)
2710 continue;
2711 if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2712 continue;
2713 spin_lock(&alias->d_lock);
2714 if (!d_unhashed(alias)) {
2715 spin_unlock(&alias->d_lock);
2716 alias = NULL;
2717 } else {
2718 __dget_dlock(alias);
2719 __d_rehash(alias);
2720 spin_unlock(&alias->d_lock);
2721 }
2722 spin_unlock(&inode->i_lock);
2723 return alias;
2724 }
2725 spin_unlock(&inode->i_lock);
2726 return NULL;
2727}
2728EXPORT_SYMBOL(d_exact_alias);
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744void dentry_update_name_case(struct dentry *dentry, const struct qstr *name)
2745{
2746 BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
2747 BUG_ON(dentry->d_name.len != name->len);
2748
2749 spin_lock(&dentry->d_lock);
2750 write_seqcount_begin(&dentry->d_seq);
2751 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2752 write_seqcount_end(&dentry->d_seq);
2753 spin_unlock(&dentry->d_lock);
2754}
2755EXPORT_SYMBOL(dentry_update_name_case);
2756
2757static void swap_names(struct dentry *dentry, struct dentry *target)
2758{
2759 if (unlikely(dname_external(target))) {
2760 if (unlikely(dname_external(dentry))) {
2761
2762
2763
2764 swap(target->d_name.name, dentry->d_name.name);
2765 } else {
2766
2767
2768
2769
2770 memcpy(target->d_iname, dentry->d_name.name,
2771 dentry->d_name.len + 1);
2772 dentry->d_name.name = target->d_name.name;
2773 target->d_name.name = target->d_iname;
2774 }
2775 } else {
2776 if (unlikely(dname_external(dentry))) {
2777
2778
2779
2780
2781 memcpy(dentry->d_iname, target->d_name.name,
2782 target->d_name.len + 1);
2783 target->d_name.name = dentry->d_name.name;
2784 dentry->d_name.name = dentry->d_iname;
2785 } else {
2786
2787
2788
2789 unsigned int i;
2790 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2791 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2792 swap(((long *) &dentry->d_iname)[i],
2793 ((long *) &target->d_iname)[i]);
2794 }
2795 }
2796 }
2797 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2798}
2799
2800static void copy_name(struct dentry *dentry, struct dentry *target)
2801{
2802 struct external_name *old_name = NULL;
2803 if (unlikely(dname_external(dentry)))
2804 old_name = external_name(dentry);
2805 if (unlikely(dname_external(target))) {
2806 atomic_inc(&external_name(target)->u.count);
2807 dentry->d_name = target->d_name;
2808 } else {
2809 memcpy(dentry->d_iname, target->d_name.name,
2810 target->d_name.len + 1);
2811 dentry->d_name.name = dentry->d_iname;
2812 dentry->d_name.hash_len = target->d_name.hash_len;
2813 }
2814 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2815 call_rcu(&old_name->u.head, __d_free_external_name);
2816}
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829static void __d_move(struct dentry *dentry, struct dentry *target,
2830 bool exchange)
2831{
2832 struct dentry *old_parent, *p;
2833 struct inode *dir = NULL;
2834 unsigned n;
2835
2836 WARN_ON(!dentry->d_inode);
2837 if (WARN_ON(dentry == target))
2838 return;
2839
2840 BUG_ON(d_ancestor(target, dentry));
2841 old_parent = dentry->d_parent;
2842 p = d_ancestor(old_parent, target);
2843 if (IS_ROOT(dentry)) {
2844 BUG_ON(p);
2845 spin_lock(&target->d_parent->d_lock);
2846 } else if (!p) {
2847
2848 spin_lock(&target->d_parent->d_lock);
2849 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2850 } else {
2851 BUG_ON(p == dentry);
2852 spin_lock(&old_parent->d_lock);
2853 if (p != target)
2854 spin_lock_nested(&target->d_parent->d_lock,
2855 DENTRY_D_LOCK_NESTED);
2856 }
2857 spin_lock_nested(&dentry->d_lock, 2);
2858 spin_lock_nested(&target->d_lock, 3);
2859
2860 if (unlikely(d_in_lookup(target))) {
2861 dir = target->d_parent->d_inode;
2862 n = start_dir_add(dir);
2863 __d_lookup_done(target);
2864 }
2865
2866 write_seqcount_begin(&dentry->d_seq);
2867 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2868
2869
2870 if (!d_unhashed(dentry))
2871 ___d_drop(dentry);
2872 if (!d_unhashed(target))
2873 ___d_drop(target);
2874
2875
2876 dentry->d_parent = target->d_parent;
2877 if (!exchange) {
2878 copy_name(dentry, target);
2879 target->d_hash.pprev = NULL;
2880 dentry->d_parent->d_lockref.count++;
2881 if (dentry == old_parent)
2882 dentry->d_flags |= DCACHE_RCUACCESS;
2883 else
2884 WARN_ON(!--old_parent->d_lockref.count);
2885 } else {
2886 target->d_parent = old_parent;
2887 swap_names(dentry, target);
2888 list_move(&target->d_child, &target->d_parent->d_subdirs);
2889 __d_rehash(target);
2890 fsnotify_update_flags(target);
2891 }
2892 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2893 __d_rehash(dentry);
2894 fsnotify_update_flags(dentry);
2895
2896 write_seqcount_end(&target->d_seq);
2897 write_seqcount_end(&dentry->d_seq);
2898
2899 if (dir)
2900 end_dir_add(dir, n);
2901
2902 if (dentry->d_parent != old_parent)
2903 spin_unlock(&dentry->d_parent->d_lock);
2904 if (dentry != old_parent)
2905 spin_unlock(&old_parent->d_lock);
2906 spin_unlock(&target->d_lock);
2907 spin_unlock(&dentry->d_lock);
2908}
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919void d_move(struct dentry *dentry, struct dentry *target)
2920{
2921 write_seqlock(&rename_lock);
2922 __d_move(dentry, target, false);
2923 write_sequnlock(&rename_lock);
2924}
2925EXPORT_SYMBOL(d_move);
2926
2927
2928
2929
2930
2931
2932void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2933{
2934 write_seqlock(&rename_lock);
2935
2936 WARN_ON(!dentry1->d_inode);
2937 WARN_ON(!dentry2->d_inode);
2938 WARN_ON(IS_ROOT(dentry1));
2939 WARN_ON(IS_ROOT(dentry2));
2940
2941 __d_move(dentry1, dentry2, true);
2942
2943 write_sequnlock(&rename_lock);
2944}
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2955{
2956 struct dentry *p;
2957
2958 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2959 if (p->d_parent == p1)
2960 return p;
2961 }
2962 return NULL;
2963}
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974static int __d_unalias(struct inode *inode,
2975 struct dentry *dentry, struct dentry *alias)
2976{
2977 struct mutex *m1 = NULL;
2978 struct rw_semaphore *m2 = NULL;
2979 int ret = -ESTALE;
2980
2981
2982 if (alias->d_parent == dentry->d_parent)
2983 goto out_unalias;
2984
2985
2986 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2987 goto out_err;
2988 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2989 if (!inode_trylock_shared(alias->d_parent->d_inode))
2990 goto out_err;
2991 m2 = &alias->d_parent->d_inode->i_rwsem;
2992out_unalias:
2993 __d_move(alias, dentry, false);
2994 ret = 0;
2995out_err:
2996 if (m2)
2997 up_read(m2);
2998 if (m1)
2999 mutex_unlock(m1);
3000 return ret;
3001}
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
3027{
3028 if (IS_ERR(inode))
3029 return ERR_CAST(inode);
3030
3031 BUG_ON(!d_unhashed(dentry));
3032
3033 if (!inode)
3034 goto out;
3035
3036 security_d_instantiate(dentry, inode);
3037 spin_lock(&inode->i_lock);
3038 if (S_ISDIR(inode->i_mode)) {
3039 struct dentry *new = __d_find_any_alias(inode);
3040 if (unlikely(new)) {
3041
3042 spin_unlock(&inode->i_lock);
3043 write_seqlock(&rename_lock);
3044 if (unlikely(d_ancestor(new, dentry))) {
3045 write_sequnlock(&rename_lock);
3046 dput(new);
3047 new = ERR_PTR(-ELOOP);
3048 pr_warn_ratelimited(
3049 "VFS: Lookup of '%s' in %s %s"
3050 " would have caused loop\n",
3051 dentry->d_name.name,
3052 inode->i_sb->s_type->name,
3053 inode->i_sb->s_id);
3054 } else if (!IS_ROOT(new)) {
3055 struct dentry *old_parent = dget(new->d_parent);
3056 int err = __d_unalias(inode, dentry, new);
3057 write_sequnlock(&rename_lock);
3058 if (err) {
3059 dput(new);
3060 new = ERR_PTR(err);
3061 }
3062 dput(old_parent);
3063 } else {
3064 __d_move(new, dentry, false);
3065 write_sequnlock(&rename_lock);
3066 }
3067 iput(inode);
3068 return new;
3069 }
3070 }
3071out:
3072 __d_add(dentry, inode);
3073 return NULL;
3074}
3075EXPORT_SYMBOL(d_splice_alias);
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3094{
3095 bool result;
3096 unsigned seq;
3097
3098 if (new_dentry == old_dentry)
3099 return true;
3100
3101 do {
3102
3103 seq = read_seqbegin(&rename_lock);
3104
3105
3106
3107
3108 rcu_read_lock();
3109 if (d_ancestor(old_dentry, new_dentry))
3110 result = true;
3111 else
3112 result = false;
3113 rcu_read_unlock();
3114 } while (read_seqretry(&rename_lock, seq));
3115
3116 return result;
3117}
3118EXPORT_SYMBOL(is_subdir);
3119
3120static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3121{
3122 struct dentry *root = data;
3123 if (dentry != root) {
3124 if (d_unhashed(dentry) || !dentry->d_inode)
3125 return D_WALK_SKIP;
3126
3127 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3128 dentry->d_flags |= DCACHE_GENOCIDE;
3129 dentry->d_lockref.count--;
3130 }
3131 }
3132 return D_WALK_CONTINUE;
3133}
3134
3135void d_genocide(struct dentry *parent)
3136{
3137 d_walk(parent, parent, d_genocide_kill, NULL);
3138}
3139
3140EXPORT_SYMBOL(d_genocide);
3141
3142void d_tmpfile(struct dentry *dentry, struct inode *inode)
3143{
3144 inode_dec_link_count(inode);
3145 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3146 !hlist_unhashed(&dentry->d_u.d_alias) ||
3147 !d_unlinked(dentry));
3148 spin_lock(&dentry->d_parent->d_lock);
3149 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3150 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3151 (unsigned long long)inode->i_ino);
3152 spin_unlock(&dentry->d_lock);
3153 spin_unlock(&dentry->d_parent->d_lock);
3154 d_instantiate(dentry, inode);
3155}
3156EXPORT_SYMBOL(d_tmpfile);
3157
3158static __initdata unsigned long dhash_entries;
3159static int __init set_dhash_entries(char *str)
3160{
3161 if (!str)
3162 return 0;
3163 dhash_entries = simple_strtoul(str, &str, 0);
3164 return 1;
3165}
3166__setup("dhash_entries=", set_dhash_entries);
3167
3168static void __init dcache_init_early(void)
3169{
3170
3171
3172
3173 if (hashdist)
3174 return;
3175
3176 dentry_hashtable =
3177 alloc_large_system_hash("Dentry cache",
3178 sizeof(struct hlist_bl_head),
3179 dhash_entries,
3180 13,
3181 HASH_EARLY | HASH_ZERO,
3182 &d_hash_shift,
3183 NULL,
3184 0,
3185 0);
3186 d_hash_shift = 32 - d_hash_shift;
3187}
3188
3189static void __init dcache_init(void)
3190{
3191
3192
3193
3194
3195
3196 dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3197 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
3198 d_iname);
3199
3200
3201 if (!hashdist)
3202 return;
3203
3204 dentry_hashtable =
3205 alloc_large_system_hash("Dentry cache",
3206 sizeof(struct hlist_bl_head),
3207 dhash_entries,
3208 13,
3209 HASH_ZERO,
3210 &d_hash_shift,
3211 NULL,
3212 0,
3213 0);
3214 d_hash_shift = 32 - d_hash_shift;
3215}
3216
3217
3218struct kmem_cache *names_cachep __read_mostly;
3219EXPORT_SYMBOL(names_cachep);
3220
3221void __init vfs_caches_init_early(void)
3222{
3223 int i;
3224
3225 for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3226 INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3227
3228 dcache_init_early();
3229 inode_init_early();
3230}
3231
3232void __init vfs_caches_init(void)
3233{
3234 names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3235 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3236
3237 dcache_init();
3238 inode_init();
3239 files_init();
3240 files_maxfiles_init();
3241 mnt_init();
3242 bdev_cache_init();
3243 chrdev_init();
3244}
3245