1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/ratelimit.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/fs.h>
22#include <linux/fscrypt.h>
23#include <linux/fsnotify.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/hash.h>
27#include <linux/cache.h>
28#include <linux/export.h>
29#include <linux/security.h>
30#include <linux/seqlock.h>
31#include <linux/memblock.h>
32#include <linux/bit_spinlock.h>
33#include <linux/rculist_bl.h>
34#include <linux/list_lru.h>
35#include "internal.h"
36#include "mount.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74int sysctl_vfs_cache_pressure __read_mostly = 100;
75EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
76
77__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
78
79EXPORT_SYMBOL(rename_lock);
80
81static struct kmem_cache *dentry_cache __read_mostly;
82
83const struct qstr empty_name = QSTR_INIT("", 0);
84EXPORT_SYMBOL(empty_name);
85const struct qstr slash_name = QSTR_INIT("/", 1);
86EXPORT_SYMBOL(slash_name);
87
88
89
90
91
92
93
94
95
96
97static unsigned int d_hash_shift __read_mostly;
98
99static struct hlist_bl_head *dentry_hashtable __read_mostly;
100
101static inline struct hlist_bl_head *d_hash(unsigned int hash)
102{
103 return dentry_hashtable + (hash >> d_hash_shift);
104}
105
106#define IN_LOOKUP_SHIFT 10
107static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
108
109static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
110 unsigned int hash)
111{
112 hash += (unsigned long) parent / L1_CACHE_BYTES;
113 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
114}
115
116
117
118struct dentry_stat_t dentry_stat = {
119 .age_limit = 45,
120};
121
122static DEFINE_PER_CPU(long, nr_dentry);
123static DEFINE_PER_CPU(long, nr_dentry_unused);
124static DEFINE_PER_CPU(long, nr_dentry_negative);
125
126#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
127
128
129
130
131
132
133
134
135
136
137
138
139
140static long get_nr_dentry(void)
141{
142 int i;
143 long sum = 0;
144 for_each_possible_cpu(i)
145 sum += per_cpu(nr_dentry, i);
146 return sum < 0 ? 0 : sum;
147}
148
149static long get_nr_dentry_unused(void)
150{
151 int i;
152 long sum = 0;
153 for_each_possible_cpu(i)
154 sum += per_cpu(nr_dentry_unused, i);
155 return sum < 0 ? 0 : sum;
156}
157
158static long get_nr_dentry_negative(void)
159{
160 int i;
161 long sum = 0;
162
163 for_each_possible_cpu(i)
164 sum += per_cpu(nr_dentry_negative, i);
165 return sum < 0 ? 0 : sum;
166}
167
168int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
169 size_t *lenp, loff_t *ppos)
170{
171 dentry_stat.nr_dentry = get_nr_dentry();
172 dentry_stat.nr_unused = get_nr_dentry_unused();
173 dentry_stat.nr_negative = get_nr_dentry_negative();
174 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
175}
176#endif
177
178
179
180
181
182#ifdef CONFIG_DCACHE_WORD_ACCESS
183
184#include <asm/word-at-a-time.h>
185
186
187
188
189
190
191
192
193
194static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
195{
196 unsigned long a,b,mask;
197
198 for (;;) {
199 a = read_word_at_a_time(cs);
200 b = load_unaligned_zeropad(ct);
201 if (tcount < sizeof(unsigned long))
202 break;
203 if (unlikely(a != b))
204 return 1;
205 cs += sizeof(unsigned long);
206 ct += sizeof(unsigned long);
207 tcount -= sizeof(unsigned long);
208 if (!tcount)
209 return 0;
210 }
211 mask = bytemask_from_count(tcount);
212 return unlikely(!!((a ^ b) & mask));
213}
214
215#else
216
217static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
218{
219 do {
220 if (*cs != *ct)
221 return 1;
222 cs++;
223 ct++;
224 tcount--;
225 } while (tcount);
226 return 0;
227}
228
229#endif
230
231static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
232{
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249 const unsigned char *cs = READ_ONCE(dentry->d_name.name);
250
251 return dentry_string_cmp(cs, ct, tcount);
252}
253
254struct external_name {
255 union {
256 atomic_t count;
257 struct rcu_head head;
258 } u;
259 unsigned char name[];
260};
261
262static inline struct external_name *external_name(struct dentry *dentry)
263{
264 return container_of(dentry->d_name.name, struct external_name, name[0]);
265}
266
267static void __d_free(struct rcu_head *head)
268{
269 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
270
271 kmem_cache_free(dentry_cache, dentry);
272}
273
274static void __d_free_external(struct rcu_head *head)
275{
276 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
277 kfree(external_name(dentry));
278 kmem_cache_free(dentry_cache, dentry);
279}
280
281static inline int dname_external(const struct dentry *dentry)
282{
283 return dentry->d_name.name != dentry->d_iname;
284}
285
286void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
287{
288 spin_lock(&dentry->d_lock);
289 name->name = dentry->d_name;
290 if (unlikely(dname_external(dentry))) {
291 atomic_inc(&external_name(dentry)->u.count);
292 } else {
293 memcpy(name->inline_name, dentry->d_iname,
294 dentry->d_name.len + 1);
295 name->name.name = name->inline_name;
296 }
297 spin_unlock(&dentry->d_lock);
298}
299EXPORT_SYMBOL(take_dentry_name_snapshot);
300
301void release_dentry_name_snapshot(struct name_snapshot *name)
302{
303 if (unlikely(name->name.name != name->inline_name)) {
304 struct external_name *p;
305 p = container_of(name->name.name, struct external_name, name[0]);
306 if (unlikely(atomic_dec_and_test(&p->u.count)))
307 kfree_rcu(p, u.head);
308 }
309}
310EXPORT_SYMBOL(release_dentry_name_snapshot);
311
312static inline void __d_set_inode_and_type(struct dentry *dentry,
313 struct inode *inode,
314 unsigned type_flags)
315{
316 unsigned flags;
317
318 dentry->d_inode = inode;
319 flags = READ_ONCE(dentry->d_flags);
320 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
321 flags |= type_flags;
322 smp_store_release(&dentry->d_flags, flags);
323}
324
325static inline void __d_clear_type_and_inode(struct dentry *dentry)
326{
327 unsigned flags = READ_ONCE(dentry->d_flags);
328
329 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
330 WRITE_ONCE(dentry->d_flags, flags);
331 dentry->d_inode = NULL;
332 if (dentry->d_flags & DCACHE_LRU_LIST)
333 this_cpu_inc(nr_dentry_negative);
334}
335
336static void dentry_free(struct dentry *dentry)
337{
338 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
339 if (unlikely(dname_external(dentry))) {
340 struct external_name *p = external_name(dentry);
341 if (likely(atomic_dec_and_test(&p->u.count))) {
342 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
343 return;
344 }
345 }
346
347 if (dentry->d_flags & DCACHE_NORCU)
348 __d_free(&dentry->d_u.d_rcu);
349 else
350 call_rcu(&dentry->d_u.d_rcu, __d_free);
351}
352
353
354
355
356
357static void dentry_unlink_inode(struct dentry * dentry)
358 __releases(dentry->d_lock)
359 __releases(dentry->d_inode->i_lock)
360{
361 struct inode *inode = dentry->d_inode;
362
363 raw_write_seqcount_begin(&dentry->d_seq);
364 __d_clear_type_and_inode(dentry);
365 hlist_del_init(&dentry->d_u.d_alias);
366 raw_write_seqcount_end(&dentry->d_seq);
367 spin_unlock(&dentry->d_lock);
368 spin_unlock(&inode->i_lock);
369 if (!inode->i_nlink)
370 fsnotify_inoderemove(inode);
371 if (dentry->d_op && dentry->d_op->d_iput)
372 dentry->d_op->d_iput(dentry, inode);
373 else
374 iput(inode);
375}
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
397static void d_lru_add(struct dentry *dentry)
398{
399 D_FLAG_VERIFY(dentry, 0);
400 dentry->d_flags |= DCACHE_LRU_LIST;
401 this_cpu_inc(nr_dentry_unused);
402 if (d_is_negative(dentry))
403 this_cpu_inc(nr_dentry_negative);
404 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
405}
406
407static void d_lru_del(struct dentry *dentry)
408{
409 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
410 dentry->d_flags &= ~DCACHE_LRU_LIST;
411 this_cpu_dec(nr_dentry_unused);
412 if (d_is_negative(dentry))
413 this_cpu_dec(nr_dentry_negative);
414 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
415}
416
417static void d_shrink_del(struct dentry *dentry)
418{
419 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
420 list_del_init(&dentry->d_lru);
421 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
422 this_cpu_dec(nr_dentry_unused);
423}
424
425static void d_shrink_add(struct dentry *dentry, struct list_head *list)
426{
427 D_FLAG_VERIFY(dentry, 0);
428 list_add(&dentry->d_lru, list);
429 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
430 this_cpu_inc(nr_dentry_unused);
431}
432
433
434
435
436
437
438
439static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
440{
441 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
442 dentry->d_flags &= ~DCACHE_LRU_LIST;
443 this_cpu_dec(nr_dentry_unused);
444 if (d_is_negative(dentry))
445 this_cpu_dec(nr_dentry_negative);
446 list_lru_isolate(lru, &dentry->d_lru);
447}
448
449static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
450 struct list_head *list)
451{
452 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
453 dentry->d_flags |= DCACHE_SHRINK_LIST;
454 if (d_is_negative(dentry))
455 this_cpu_dec(nr_dentry_negative);
456 list_lru_isolate_move(lru, &dentry->d_lru, list);
457}
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476static void ___d_drop(struct dentry *dentry)
477{
478 struct hlist_bl_head *b;
479
480
481
482
483
484 if (unlikely(IS_ROOT(dentry)))
485 b = &dentry->d_sb->s_roots;
486 else
487 b = d_hash(dentry->d_name.hash);
488
489 hlist_bl_lock(b);
490 __hlist_bl_del(&dentry->d_hash);
491 hlist_bl_unlock(b);
492}
493
494void __d_drop(struct dentry *dentry)
495{
496 if (!d_unhashed(dentry)) {
497 ___d_drop(dentry);
498 dentry->d_hash.pprev = NULL;
499 write_seqcount_invalidate(&dentry->d_seq);
500 }
501}
502EXPORT_SYMBOL(__d_drop);
503
504void d_drop(struct dentry *dentry)
505{
506 spin_lock(&dentry->d_lock);
507 __d_drop(dentry);
508 spin_unlock(&dentry->d_lock);
509}
510EXPORT_SYMBOL(d_drop);
511
512static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
513{
514 struct dentry *next;
515
516
517
518
519 dentry->d_flags |= DCACHE_DENTRY_KILLED;
520 if (unlikely(list_empty(&dentry->d_child)))
521 return;
522 __list_del_entry(&dentry->d_child);
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542 while (dentry->d_child.next != &parent->d_subdirs) {
543 next = list_entry(dentry->d_child.next, struct dentry, d_child);
544 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
545 break;
546 dentry->d_child.next = next->d_child.next;
547 }
548}
549
550static void __dentry_kill(struct dentry *dentry)
551{
552 struct dentry *parent = NULL;
553 bool can_free = true;
554 if (!IS_ROOT(dentry))
555 parent = dentry->d_parent;
556
557
558
559
560 lockref_mark_dead(&dentry->d_lockref);
561
562
563
564
565
566 if (dentry->d_flags & DCACHE_OP_PRUNE)
567 dentry->d_op->d_prune(dentry);
568
569 if (dentry->d_flags & DCACHE_LRU_LIST) {
570 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
571 d_lru_del(dentry);
572 }
573
574 __d_drop(dentry);
575 dentry_unlist(dentry, parent);
576 if (parent)
577 spin_unlock(&parent->d_lock);
578 if (dentry->d_inode)
579 dentry_unlink_inode(dentry);
580 else
581 spin_unlock(&dentry->d_lock);
582 this_cpu_dec(nr_dentry);
583 if (dentry->d_op && dentry->d_op->d_release)
584 dentry->d_op->d_release(dentry);
585
586 spin_lock(&dentry->d_lock);
587 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
588 dentry->d_flags |= DCACHE_MAY_FREE;
589 can_free = false;
590 }
591 spin_unlock(&dentry->d_lock);
592 if (likely(can_free))
593 dentry_free(dentry);
594 cond_resched();
595}
596
597static struct dentry *__lock_parent(struct dentry *dentry)
598{
599 struct dentry *parent;
600 rcu_read_lock();
601 spin_unlock(&dentry->d_lock);
602again:
603 parent = READ_ONCE(dentry->d_parent);
604 spin_lock(&parent->d_lock);
605
606
607
608
609
610
611
612
613 if (unlikely(parent != dentry->d_parent)) {
614 spin_unlock(&parent->d_lock);
615 goto again;
616 }
617 rcu_read_unlock();
618 if (parent != dentry)
619 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
620 else
621 parent = NULL;
622 return parent;
623}
624
625static inline struct dentry *lock_parent(struct dentry *dentry)
626{
627 struct dentry *parent = dentry->d_parent;
628 if (IS_ROOT(dentry))
629 return NULL;
630 if (likely(spin_trylock(&parent->d_lock)))
631 return parent;
632 return __lock_parent(dentry);
633}
634
635static inline bool retain_dentry(struct dentry *dentry)
636{
637 WARN_ON(d_in_lookup(dentry));
638
639
640 if (unlikely(d_unhashed(dentry)))
641 return false;
642
643 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
644 return false;
645
646 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
647 if (dentry->d_op->d_delete(dentry))
648 return false;
649 }
650
651 if (unlikely(dentry->d_flags & DCACHE_DONTCACHE))
652 return false;
653
654
655 dentry->d_lockref.count--;
656 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
657 d_lru_add(dentry);
658 else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
659 dentry->d_flags |= DCACHE_REFERENCED;
660 return true;
661}
662
663void d_mark_dontcache(struct inode *inode)
664{
665 struct dentry *de;
666
667 spin_lock(&inode->i_lock);
668 hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) {
669 spin_lock(&de->d_lock);
670 de->d_flags |= DCACHE_DONTCACHE;
671 spin_unlock(&de->d_lock);
672 }
673 inode->i_state |= I_DONTCACHE;
674 spin_unlock(&inode->i_lock);
675}
676EXPORT_SYMBOL(d_mark_dontcache);
677
678
679
680
681
682
683static struct dentry *dentry_kill(struct dentry *dentry)
684 __releases(dentry->d_lock)
685{
686 struct inode *inode = dentry->d_inode;
687 struct dentry *parent = NULL;
688
689 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
690 goto slow_positive;
691
692 if (!IS_ROOT(dentry)) {
693 parent = dentry->d_parent;
694 if (unlikely(!spin_trylock(&parent->d_lock))) {
695 parent = __lock_parent(dentry);
696 if (likely(inode || !dentry->d_inode))
697 goto got_locks;
698
699 if (parent)
700 spin_unlock(&parent->d_lock);
701 inode = dentry->d_inode;
702 goto slow_positive;
703 }
704 }
705 __dentry_kill(dentry);
706 return parent;
707
708slow_positive:
709 spin_unlock(&dentry->d_lock);
710 spin_lock(&inode->i_lock);
711 spin_lock(&dentry->d_lock);
712 parent = lock_parent(dentry);
713got_locks:
714 if (unlikely(dentry->d_lockref.count != 1)) {
715 dentry->d_lockref.count--;
716 } else if (likely(!retain_dentry(dentry))) {
717 __dentry_kill(dentry);
718 return parent;
719 }
720
721 if (inode)
722 spin_unlock(&inode->i_lock);
723 if (parent)
724 spin_unlock(&parent->d_lock);
725 spin_unlock(&dentry->d_lock);
726 return NULL;
727}
728
729
730
731
732
733
734
735
736
737static inline bool fast_dput(struct dentry *dentry)
738{
739 int ret;
740 unsigned int d_flags;
741
742
743
744
745
746 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
747 return lockref_put_or_lock(&dentry->d_lockref);
748
749
750
751
752
753 ret = lockref_put_return(&dentry->d_lockref);
754
755
756
757
758
759
760 if (unlikely(ret < 0)) {
761 spin_lock(&dentry->d_lock);
762 if (dentry->d_lockref.count > 1) {
763 dentry->d_lockref.count--;
764 spin_unlock(&dentry->d_lock);
765 return true;
766 }
767 return false;
768 }
769
770
771
772
773 if (ret)
774 return true;
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797 smp_rmb();
798 d_flags = READ_ONCE(dentry->d_flags);
799 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
800
801
802 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
803 return true;
804
805
806
807
808
809
810 spin_lock(&dentry->d_lock);
811
812
813
814
815
816
817
818 if (dentry->d_lockref.count) {
819 spin_unlock(&dentry->d_lock);
820 return true;
821 }
822
823
824
825
826
827
828 dentry->d_lockref.count = 1;
829 return false;
830}
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859void dput(struct dentry *dentry)
860{
861 while (dentry) {
862 might_sleep();
863
864 rcu_read_lock();
865 if (likely(fast_dput(dentry))) {
866 rcu_read_unlock();
867 return;
868 }
869
870
871 rcu_read_unlock();
872
873 if (likely(retain_dentry(dentry))) {
874 spin_unlock(&dentry->d_lock);
875 return;
876 }
877
878 dentry = dentry_kill(dentry);
879 }
880}
881EXPORT_SYMBOL(dput);
882
883static void __dput_to_list(struct dentry *dentry, struct list_head *list)
884__must_hold(&dentry->d_lock)
885{
886 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
887
888 --dentry->d_lockref.count;
889 } else {
890 if (dentry->d_flags & DCACHE_LRU_LIST)
891 d_lru_del(dentry);
892 if (!--dentry->d_lockref.count)
893 d_shrink_add(dentry, list);
894 }
895}
896
897void dput_to_list(struct dentry *dentry, struct list_head *list)
898{
899 rcu_read_lock();
900 if (likely(fast_dput(dentry))) {
901 rcu_read_unlock();
902 return;
903 }
904 rcu_read_unlock();
905 if (!retain_dentry(dentry))
906 __dput_to_list(dentry, list);
907 spin_unlock(&dentry->d_lock);
908}
909
910
911static inline void __dget_dlock(struct dentry *dentry)
912{
913 dentry->d_lockref.count++;
914}
915
916static inline void __dget(struct dentry *dentry)
917{
918 lockref_get(&dentry->d_lockref);
919}
920
921struct dentry *dget_parent(struct dentry *dentry)
922{
923 int gotref;
924 struct dentry *ret;
925 unsigned seq;
926
927
928
929
930
931 rcu_read_lock();
932 seq = raw_seqcount_begin(&dentry->d_seq);
933 ret = READ_ONCE(dentry->d_parent);
934 gotref = lockref_get_not_zero(&ret->d_lockref);
935 rcu_read_unlock();
936 if (likely(gotref)) {
937 if (!read_seqcount_retry(&dentry->d_seq, seq))
938 return ret;
939 dput(ret);
940 }
941
942repeat:
943
944
945
946
947 rcu_read_lock();
948 ret = dentry->d_parent;
949 spin_lock(&ret->d_lock);
950 if (unlikely(ret != dentry->d_parent)) {
951 spin_unlock(&ret->d_lock);
952 rcu_read_unlock();
953 goto repeat;
954 }
955 rcu_read_unlock();
956 BUG_ON(!ret->d_lockref.count);
957 ret->d_lockref.count++;
958 spin_unlock(&ret->d_lock);
959 return ret;
960}
961EXPORT_SYMBOL(dget_parent);
962
963static struct dentry * __d_find_any_alias(struct inode *inode)
964{
965 struct dentry *alias;
966
967 if (hlist_empty(&inode->i_dentry))
968 return NULL;
969 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
970 __dget(alias);
971 return alias;
972}
973
974
975
976
977
978
979
980
981struct dentry *d_find_any_alias(struct inode *inode)
982{
983 struct dentry *de;
984
985 spin_lock(&inode->i_lock);
986 de = __d_find_any_alias(inode);
987 spin_unlock(&inode->i_lock);
988 return de;
989}
990EXPORT_SYMBOL(d_find_any_alias);
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006static struct dentry *__d_find_alias(struct inode *inode)
1007{
1008 struct dentry *alias;
1009
1010 if (S_ISDIR(inode->i_mode))
1011 return __d_find_any_alias(inode);
1012
1013 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1014 spin_lock(&alias->d_lock);
1015 if (!d_unhashed(alias)) {
1016 __dget_dlock(alias);
1017 spin_unlock(&alias->d_lock);
1018 return alias;
1019 }
1020 spin_unlock(&alias->d_lock);
1021 }
1022 return NULL;
1023}
1024
1025struct dentry *d_find_alias(struct inode *inode)
1026{
1027 struct dentry *de = NULL;
1028
1029 if (!hlist_empty(&inode->i_dentry)) {
1030 spin_lock(&inode->i_lock);
1031 de = __d_find_alias(inode);
1032 spin_unlock(&inode->i_lock);
1033 }
1034 return de;
1035}
1036EXPORT_SYMBOL(d_find_alias);
1037
1038
1039
1040
1041
1042void d_prune_aliases(struct inode *inode)
1043{
1044 struct dentry *dentry;
1045restart:
1046 spin_lock(&inode->i_lock);
1047 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1048 spin_lock(&dentry->d_lock);
1049 if (!dentry->d_lockref.count) {
1050 struct dentry *parent = lock_parent(dentry);
1051 if (likely(!dentry->d_lockref.count)) {
1052 __dentry_kill(dentry);
1053 dput(parent);
1054 goto restart;
1055 }
1056 if (parent)
1057 spin_unlock(&parent->d_lock);
1058 }
1059 spin_unlock(&dentry->d_lock);
1060 }
1061 spin_unlock(&inode->i_lock);
1062}
1063EXPORT_SYMBOL(d_prune_aliases);
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076static bool shrink_lock_dentry(struct dentry *dentry)
1077{
1078 struct inode *inode;
1079 struct dentry *parent;
1080
1081 if (dentry->d_lockref.count)
1082 return false;
1083
1084 inode = dentry->d_inode;
1085 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1086 spin_unlock(&dentry->d_lock);
1087 spin_lock(&inode->i_lock);
1088 spin_lock(&dentry->d_lock);
1089 if (unlikely(dentry->d_lockref.count))
1090 goto out;
1091
1092 if (unlikely(inode != dentry->d_inode))
1093 goto out;
1094 }
1095
1096 parent = dentry->d_parent;
1097 if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
1098 return true;
1099
1100 spin_unlock(&dentry->d_lock);
1101 spin_lock(&parent->d_lock);
1102 if (unlikely(parent != dentry->d_parent)) {
1103 spin_unlock(&parent->d_lock);
1104 spin_lock(&dentry->d_lock);
1105 goto out;
1106 }
1107 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1108 if (likely(!dentry->d_lockref.count))
1109 return true;
1110 spin_unlock(&parent->d_lock);
1111out:
1112 if (inode)
1113 spin_unlock(&inode->i_lock);
1114 return false;
1115}
1116
1117void shrink_dentry_list(struct list_head *list)
1118{
1119 while (!list_empty(list)) {
1120 struct dentry *dentry, *parent;
1121
1122 dentry = list_entry(list->prev, struct dentry, d_lru);
1123 spin_lock(&dentry->d_lock);
1124 rcu_read_lock();
1125 if (!shrink_lock_dentry(dentry)) {
1126 bool can_free = false;
1127 rcu_read_unlock();
1128 d_shrink_del(dentry);
1129 if (dentry->d_lockref.count < 0)
1130 can_free = dentry->d_flags & DCACHE_MAY_FREE;
1131 spin_unlock(&dentry->d_lock);
1132 if (can_free)
1133 dentry_free(dentry);
1134 continue;
1135 }
1136 rcu_read_unlock();
1137 d_shrink_del(dentry);
1138 parent = dentry->d_parent;
1139 if (parent != dentry)
1140 __dput_to_list(parent, list);
1141 __dentry_kill(dentry);
1142 }
1143}
1144
1145static enum lru_status dentry_lru_isolate(struct list_head *item,
1146 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1147{
1148 struct list_head *freeable = arg;
1149 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1150
1151
1152
1153
1154
1155
1156
1157 if (!spin_trylock(&dentry->d_lock))
1158 return LRU_SKIP;
1159
1160
1161
1162
1163
1164
1165 if (dentry->d_lockref.count) {
1166 d_lru_isolate(lru, dentry);
1167 spin_unlock(&dentry->d_lock);
1168 return LRU_REMOVED;
1169 }
1170
1171 if (dentry->d_flags & DCACHE_REFERENCED) {
1172 dentry->d_flags &= ~DCACHE_REFERENCED;
1173 spin_unlock(&dentry->d_lock);
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 return LRU_ROTATE;
1195 }
1196
1197 d_lru_shrink_move(lru, dentry, freeable);
1198 spin_unlock(&dentry->d_lock);
1199
1200 return LRU_REMOVED;
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1216{
1217 LIST_HEAD(dispose);
1218 long freed;
1219
1220 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1221 dentry_lru_isolate, &dispose);
1222 shrink_dentry_list(&dispose);
1223 return freed;
1224}
1225
1226static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1227 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1228{
1229 struct list_head *freeable = arg;
1230 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1231
1232
1233
1234
1235
1236
1237 if (!spin_trylock(&dentry->d_lock))
1238 return LRU_SKIP;
1239
1240 d_lru_shrink_move(lru, dentry, freeable);
1241 spin_unlock(&dentry->d_lock);
1242
1243 return LRU_REMOVED;
1244}
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254void shrink_dcache_sb(struct super_block *sb)
1255{
1256 do {
1257 LIST_HEAD(dispose);
1258
1259 list_lru_walk(&sb->s_dentry_lru,
1260 dentry_lru_isolate_shrink, &dispose, 1024);
1261 shrink_dentry_list(&dispose);
1262 } while (list_lru_count(&sb->s_dentry_lru) > 0);
1263}
1264EXPORT_SYMBOL(shrink_dcache_sb);
1265
1266
1267
1268
1269
1270
1271
1272
1273enum d_walk_ret {
1274 D_WALK_CONTINUE,
1275 D_WALK_QUIT,
1276 D_WALK_NORETRY,
1277 D_WALK_SKIP,
1278};
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288static void d_walk(struct dentry *parent, void *data,
1289 enum d_walk_ret (*enter)(void *, struct dentry *))
1290{
1291 struct dentry *this_parent;
1292 struct list_head *next;
1293 unsigned seq = 0;
1294 enum d_walk_ret ret;
1295 bool retry = true;
1296
1297again:
1298 read_seqbegin_or_lock(&rename_lock, &seq);
1299 this_parent = parent;
1300 spin_lock(&this_parent->d_lock);
1301
1302 ret = enter(data, this_parent);
1303 switch (ret) {
1304 case D_WALK_CONTINUE:
1305 break;
1306 case D_WALK_QUIT:
1307 case D_WALK_SKIP:
1308 goto out_unlock;
1309 case D_WALK_NORETRY:
1310 retry = false;
1311 break;
1312 }
1313repeat:
1314 next = this_parent->d_subdirs.next;
1315resume:
1316 while (next != &this_parent->d_subdirs) {
1317 struct list_head *tmp = next;
1318 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1319 next = tmp->next;
1320
1321 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1322 continue;
1323
1324 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1325
1326 ret = enter(data, dentry);
1327 switch (ret) {
1328 case D_WALK_CONTINUE:
1329 break;
1330 case D_WALK_QUIT:
1331 spin_unlock(&dentry->d_lock);
1332 goto out_unlock;
1333 case D_WALK_NORETRY:
1334 retry = false;
1335 break;
1336 case D_WALK_SKIP:
1337 spin_unlock(&dentry->d_lock);
1338 continue;
1339 }
1340
1341 if (!list_empty(&dentry->d_subdirs)) {
1342 spin_unlock(&this_parent->d_lock);
1343 spin_release(&dentry->d_lock.dep_map, _RET_IP_);
1344 this_parent = dentry;
1345 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1346 goto repeat;
1347 }
1348 spin_unlock(&dentry->d_lock);
1349 }
1350
1351
1352
1353 rcu_read_lock();
1354ascend:
1355 if (this_parent != parent) {
1356 struct dentry *child = this_parent;
1357 this_parent = child->d_parent;
1358
1359 spin_unlock(&child->d_lock);
1360 spin_lock(&this_parent->d_lock);
1361
1362
1363 if (need_seqretry(&rename_lock, seq))
1364 goto rename_retry;
1365
1366 do {
1367 next = child->d_child.next;
1368 if (next == &this_parent->d_subdirs)
1369 goto ascend;
1370 child = list_entry(next, struct dentry, d_child);
1371 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1372 rcu_read_unlock();
1373 goto resume;
1374 }
1375 if (need_seqretry(&rename_lock, seq))
1376 goto rename_retry;
1377 rcu_read_unlock();
1378
1379out_unlock:
1380 spin_unlock(&this_parent->d_lock);
1381 done_seqretry(&rename_lock, seq);
1382 return;
1383
1384rename_retry:
1385 spin_unlock(&this_parent->d_lock);
1386 rcu_read_unlock();
1387 BUG_ON(seq & 1);
1388 if (!retry)
1389 return;
1390 seq = 1;
1391 goto again;
1392}
1393
1394struct check_mount {
1395 struct vfsmount *mnt;
1396 unsigned int mounted;
1397};
1398
1399static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1400{
1401 struct check_mount *info = data;
1402 struct path path = { .mnt = info->mnt, .dentry = dentry };
1403
1404 if (likely(!d_mountpoint(dentry)))
1405 return D_WALK_CONTINUE;
1406 if (__path_is_mountpoint(&path)) {
1407 info->mounted = 1;
1408 return D_WALK_QUIT;
1409 }
1410 return D_WALK_CONTINUE;
1411}
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421int path_has_submounts(const struct path *parent)
1422{
1423 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1424
1425 read_seqlock_excl(&mount_lock);
1426 d_walk(parent->dentry, &data, path_check_mount);
1427 read_sequnlock_excl(&mount_lock);
1428
1429 return data.mounted;
1430}
1431EXPORT_SYMBOL(path_has_submounts);
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441int d_set_mounted(struct dentry *dentry)
1442{
1443 struct dentry *p;
1444 int ret = -ENOENT;
1445 write_seqlock(&rename_lock);
1446 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1447
1448 spin_lock(&p->d_lock);
1449 if (unlikely(d_unhashed(p))) {
1450 spin_unlock(&p->d_lock);
1451 goto out;
1452 }
1453 spin_unlock(&p->d_lock);
1454 }
1455 spin_lock(&dentry->d_lock);
1456 if (!d_unlinked(dentry)) {
1457 ret = -EBUSY;
1458 if (!d_mountpoint(dentry)) {
1459 dentry->d_flags |= DCACHE_MOUNTED;
1460 ret = 0;
1461 }
1462 }
1463 spin_unlock(&dentry->d_lock);
1464out:
1465 write_sequnlock(&rename_lock);
1466 return ret;
1467}
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484struct select_data {
1485 struct dentry *start;
1486 union {
1487 long found;
1488 struct dentry *victim;
1489 };
1490 struct list_head dispose;
1491};
1492
1493static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1494{
1495 struct select_data *data = _data;
1496 enum d_walk_ret ret = D_WALK_CONTINUE;
1497
1498 if (data->start == dentry)
1499 goto out;
1500
1501 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1502 data->found++;
1503 } else {
1504 if (dentry->d_flags & DCACHE_LRU_LIST)
1505 d_lru_del(dentry);
1506 if (!dentry->d_lockref.count) {
1507 d_shrink_add(dentry, &data->dispose);
1508 data->found++;
1509 }
1510 }
1511
1512
1513
1514
1515
1516 if (!list_empty(&data->dispose))
1517 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1518out:
1519 return ret;
1520}
1521
1522static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
1523{
1524 struct select_data *data = _data;
1525 enum d_walk_ret ret = D_WALK_CONTINUE;
1526
1527 if (data->start == dentry)
1528 goto out;
1529
1530 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1531 if (!dentry->d_lockref.count) {
1532 rcu_read_lock();
1533 data->victim = dentry;
1534 return D_WALK_QUIT;
1535 }
1536 } else {
1537 if (dentry->d_flags & DCACHE_LRU_LIST)
1538 d_lru_del(dentry);
1539 if (!dentry->d_lockref.count)
1540 d_shrink_add(dentry, &data->dispose);
1541 }
1542
1543
1544
1545
1546
1547 if (!list_empty(&data->dispose))
1548 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1549out:
1550 return ret;
1551}
1552
1553
1554
1555
1556
1557
1558
1559void shrink_dcache_parent(struct dentry *parent)
1560{
1561 for (;;) {
1562 struct select_data data = {.start = parent};
1563
1564 INIT_LIST_HEAD(&data.dispose);
1565 d_walk(parent, &data, select_collect);
1566
1567 if (!list_empty(&data.dispose)) {
1568 shrink_dentry_list(&data.dispose);
1569 continue;
1570 }
1571
1572 cond_resched();
1573 if (!data.found)
1574 break;
1575 data.victim = NULL;
1576 d_walk(parent, &data, select_collect2);
1577 if (data.victim) {
1578 struct dentry *parent;
1579 spin_lock(&data.victim->d_lock);
1580 if (!shrink_lock_dentry(data.victim)) {
1581 spin_unlock(&data.victim->d_lock);
1582 rcu_read_unlock();
1583 } else {
1584 rcu_read_unlock();
1585 parent = data.victim->d_parent;
1586 if (parent != data.victim)
1587 __dput_to_list(parent, &data.dispose);
1588 __dentry_kill(data.victim);
1589 }
1590 }
1591 if (!list_empty(&data.dispose))
1592 shrink_dentry_list(&data.dispose);
1593 }
1594}
1595EXPORT_SYMBOL(shrink_dcache_parent);
1596
1597static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1598{
1599
1600 if (!list_empty(&dentry->d_subdirs))
1601 return D_WALK_CONTINUE;
1602
1603
1604 if (dentry == _data && dentry->d_lockref.count == 1)
1605 return D_WALK_CONTINUE;
1606
1607 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1608 " still in use (%d) [unmount of %s %s]\n",
1609 dentry,
1610 dentry->d_inode ?
1611 dentry->d_inode->i_ino : 0UL,
1612 dentry,
1613 dentry->d_lockref.count,
1614 dentry->d_sb->s_type->name,
1615 dentry->d_sb->s_id);
1616 WARN_ON(1);
1617 return D_WALK_CONTINUE;
1618}
1619
1620static void do_one_tree(struct dentry *dentry)
1621{
1622 shrink_dcache_parent(dentry);
1623 d_walk(dentry, dentry, umount_check);
1624 d_drop(dentry);
1625 dput(dentry);
1626}
1627
1628
1629
1630
1631void shrink_dcache_for_umount(struct super_block *sb)
1632{
1633 struct dentry *dentry;
1634
1635 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1636
1637 dentry = sb->s_root;
1638 sb->s_root = NULL;
1639 do_one_tree(dentry);
1640
1641 while (!hlist_bl_empty(&sb->s_roots)) {
1642 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1643 do_one_tree(dentry);
1644 }
1645}
1646
1647static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
1648{
1649 struct dentry **victim = _data;
1650 if (d_mountpoint(dentry)) {
1651 __dget_dlock(dentry);
1652 *victim = dentry;
1653 return D_WALK_QUIT;
1654 }
1655 return D_WALK_CONTINUE;
1656}
1657
1658
1659
1660
1661
1662void d_invalidate(struct dentry *dentry)
1663{
1664 bool had_submounts = false;
1665 spin_lock(&dentry->d_lock);
1666 if (d_unhashed(dentry)) {
1667 spin_unlock(&dentry->d_lock);
1668 return;
1669 }
1670 __d_drop(dentry);
1671 spin_unlock(&dentry->d_lock);
1672
1673
1674 if (!dentry->d_inode)
1675 return;
1676
1677 shrink_dcache_parent(dentry);
1678 for (;;) {
1679 struct dentry *victim = NULL;
1680 d_walk(dentry, &victim, find_submount);
1681 if (!victim) {
1682 if (had_submounts)
1683 shrink_dcache_parent(dentry);
1684 return;
1685 }
1686 had_submounts = true;
1687 detach_mounts(victim);
1688 dput(victim);
1689 }
1690}
1691EXPORT_SYMBOL(d_invalidate);
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1704{
1705 struct dentry *dentry;
1706 char *dname;
1707 int err;
1708
1709 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1710 if (!dentry)
1711 return NULL;
1712
1713
1714
1715
1716
1717
1718
1719 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1720 if (unlikely(!name)) {
1721 name = &slash_name;
1722 dname = dentry->d_iname;
1723 } else if (name->len > DNAME_INLINE_LEN-1) {
1724 size_t size = offsetof(struct external_name, name[1]);
1725 struct external_name *p = kmalloc(size + name->len,
1726 GFP_KERNEL_ACCOUNT |
1727 __GFP_RECLAIMABLE);
1728 if (!p) {
1729 kmem_cache_free(dentry_cache, dentry);
1730 return NULL;
1731 }
1732 atomic_set(&p->u.count, 1);
1733 dname = p->name;
1734 } else {
1735 dname = dentry->d_iname;
1736 }
1737
1738 dentry->d_name.len = name->len;
1739 dentry->d_name.hash = name->hash;
1740 memcpy(dname, name->name, name->len);
1741 dname[name->len] = 0;
1742
1743
1744 smp_store_release(&dentry->d_name.name, dname);
1745
1746 dentry->d_lockref.count = 1;
1747 dentry->d_flags = 0;
1748 spin_lock_init(&dentry->d_lock);
1749 seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
1750 dentry->d_inode = NULL;
1751 dentry->d_parent = dentry;
1752 dentry->d_sb = sb;
1753 dentry->d_op = NULL;
1754 dentry->d_fsdata = NULL;
1755 INIT_HLIST_BL_NODE(&dentry->d_hash);
1756 INIT_LIST_HEAD(&dentry->d_lru);
1757 INIT_LIST_HEAD(&dentry->d_subdirs);
1758 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1759 INIT_LIST_HEAD(&dentry->d_child);
1760 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1761
1762 if (dentry->d_op && dentry->d_op->d_init) {
1763 err = dentry->d_op->d_init(dentry);
1764 if (err) {
1765 if (dname_external(dentry))
1766 kfree(external_name(dentry));
1767 kmem_cache_free(dentry_cache, dentry);
1768 return NULL;
1769 }
1770 }
1771
1772 this_cpu_inc(nr_dentry);
1773
1774 return dentry;
1775}
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1787{
1788 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1789 if (!dentry)
1790 return NULL;
1791 spin_lock(&parent->d_lock);
1792
1793
1794
1795
1796 __dget_dlock(parent);
1797 dentry->d_parent = parent;
1798 list_add(&dentry->d_child, &parent->d_subdirs);
1799 spin_unlock(&parent->d_lock);
1800
1801 return dentry;
1802}
1803EXPORT_SYMBOL(d_alloc);
1804
1805struct dentry *d_alloc_anon(struct super_block *sb)
1806{
1807 return __d_alloc(sb, NULL);
1808}
1809EXPORT_SYMBOL(d_alloc_anon);
1810
1811struct dentry *d_alloc_cursor(struct dentry * parent)
1812{
1813 struct dentry *dentry = d_alloc_anon(parent->d_sb);
1814 if (dentry) {
1815 dentry->d_flags |= DCACHE_DENTRY_CURSOR;
1816 dentry->d_parent = dget(parent);
1817 }
1818 return dentry;
1819}
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1837{
1838 struct dentry *dentry = __d_alloc(sb, name);
1839 if (likely(dentry))
1840 dentry->d_flags |= DCACHE_NORCU;
1841 return dentry;
1842}
1843
1844struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1845{
1846 struct qstr q;
1847
1848 q.name = name;
1849 q.hash_len = hashlen_string(parent, name);
1850 return d_alloc(parent, &q);
1851}
1852EXPORT_SYMBOL(d_alloc_name);
1853
1854void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1855{
1856 WARN_ON_ONCE(dentry->d_op);
1857 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1858 DCACHE_OP_COMPARE |
1859 DCACHE_OP_REVALIDATE |
1860 DCACHE_OP_WEAK_REVALIDATE |
1861 DCACHE_OP_DELETE |
1862 DCACHE_OP_REAL));
1863 dentry->d_op = op;
1864 if (!op)
1865 return;
1866 if (op->d_hash)
1867 dentry->d_flags |= DCACHE_OP_HASH;
1868 if (op->d_compare)
1869 dentry->d_flags |= DCACHE_OP_COMPARE;
1870 if (op->d_revalidate)
1871 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1872 if (op->d_weak_revalidate)
1873 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1874 if (op->d_delete)
1875 dentry->d_flags |= DCACHE_OP_DELETE;
1876 if (op->d_prune)
1877 dentry->d_flags |= DCACHE_OP_PRUNE;
1878 if (op->d_real)
1879 dentry->d_flags |= DCACHE_OP_REAL;
1880
1881}
1882EXPORT_SYMBOL(d_set_d_op);
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892void d_set_fallthru(struct dentry *dentry)
1893{
1894 spin_lock(&dentry->d_lock);
1895 dentry->d_flags |= DCACHE_FALLTHRU;
1896 spin_unlock(&dentry->d_lock);
1897}
1898EXPORT_SYMBOL(d_set_fallthru);
1899
1900static unsigned d_flags_for_inode(struct inode *inode)
1901{
1902 unsigned add_flags = DCACHE_REGULAR_TYPE;
1903
1904 if (!inode)
1905 return DCACHE_MISS_TYPE;
1906
1907 if (S_ISDIR(inode->i_mode)) {
1908 add_flags = DCACHE_DIRECTORY_TYPE;
1909 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1910 if (unlikely(!inode->i_op->lookup))
1911 add_flags = DCACHE_AUTODIR_TYPE;
1912 else
1913 inode->i_opflags |= IOP_LOOKUP;
1914 }
1915 goto type_determined;
1916 }
1917
1918 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1919 if (unlikely(inode->i_op->get_link)) {
1920 add_flags = DCACHE_SYMLINK_TYPE;
1921 goto type_determined;
1922 }
1923 inode->i_opflags |= IOP_NOFOLLOW;
1924 }
1925
1926 if (unlikely(!S_ISREG(inode->i_mode)))
1927 add_flags = DCACHE_SPECIAL_TYPE;
1928
1929type_determined:
1930 if (unlikely(IS_AUTOMOUNT(inode)))
1931 add_flags |= DCACHE_NEED_AUTOMOUNT;
1932 return add_flags;
1933}
1934
1935static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1936{
1937 unsigned add_flags = d_flags_for_inode(inode);
1938 WARN_ON(d_in_lookup(dentry));
1939
1940 spin_lock(&dentry->d_lock);
1941
1942
1943
1944 if (dentry->d_flags & DCACHE_LRU_LIST)
1945 this_cpu_dec(nr_dentry_negative);
1946 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1947 raw_write_seqcount_begin(&dentry->d_seq);
1948 __d_set_inode_and_type(dentry, inode, add_flags);
1949 raw_write_seqcount_end(&dentry->d_seq);
1950 fsnotify_update_flags(dentry);
1951 spin_unlock(&dentry->d_lock);
1952}
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969void d_instantiate(struct dentry *entry, struct inode * inode)
1970{
1971 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1972 if (inode) {
1973 security_d_instantiate(entry, inode);
1974 spin_lock(&inode->i_lock);
1975 __d_instantiate(entry, inode);
1976 spin_unlock(&inode->i_lock);
1977 }
1978}
1979EXPORT_SYMBOL(d_instantiate);
1980
1981
1982
1983
1984
1985
1986
1987void d_instantiate_new(struct dentry *entry, struct inode *inode)
1988{
1989 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1990 BUG_ON(!inode);
1991 lockdep_annotate_inode_mutex_key(inode);
1992 security_d_instantiate(entry, inode);
1993 spin_lock(&inode->i_lock);
1994 __d_instantiate(entry, inode);
1995 WARN_ON(!(inode->i_state & I_NEW));
1996 inode->i_state &= ~I_NEW & ~I_CREATING;
1997 smp_mb();
1998 wake_up_bit(&inode->i_state, __I_NEW);
1999 spin_unlock(&inode->i_lock);
2000}
2001EXPORT_SYMBOL(d_instantiate_new);
2002
2003struct dentry *d_make_root(struct inode *root_inode)
2004{
2005 struct dentry *res = NULL;
2006
2007 if (root_inode) {
2008 res = d_alloc_anon(root_inode->i_sb);
2009 if (res)
2010 d_instantiate(res, root_inode);
2011 else
2012 iput(root_inode);
2013 }
2014 return res;
2015}
2016EXPORT_SYMBOL(d_make_root);
2017
2018static struct dentry *__d_instantiate_anon(struct dentry *dentry,
2019 struct inode *inode,
2020 bool disconnected)
2021{
2022 struct dentry *res;
2023 unsigned add_flags;
2024
2025 security_d_instantiate(dentry, inode);
2026 spin_lock(&inode->i_lock);
2027 res = __d_find_any_alias(inode);
2028 if (res) {
2029 spin_unlock(&inode->i_lock);
2030 dput(dentry);
2031 goto out_iput;
2032 }
2033
2034
2035 add_flags = d_flags_for_inode(inode);
2036
2037 if (disconnected)
2038 add_flags |= DCACHE_DISCONNECTED;
2039
2040 spin_lock(&dentry->d_lock);
2041 __d_set_inode_and_type(dentry, inode, add_flags);
2042 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2043 if (!disconnected) {
2044 hlist_bl_lock(&dentry->d_sb->s_roots);
2045 hlist_bl_add_head(&dentry->d_hash, &dentry->d_sb->s_roots);
2046 hlist_bl_unlock(&dentry->d_sb->s_roots);
2047 }
2048 spin_unlock(&dentry->d_lock);
2049 spin_unlock(&inode->i_lock);
2050
2051 return dentry;
2052
2053 out_iput:
2054 iput(inode);
2055 return res;
2056}
2057
2058struct dentry *d_instantiate_anon(struct dentry *dentry, struct inode *inode)
2059{
2060 return __d_instantiate_anon(dentry, inode, true);
2061}
2062EXPORT_SYMBOL(d_instantiate_anon);
2063
2064static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
2065{
2066 struct dentry *tmp;
2067 struct dentry *res;
2068
2069 if (!inode)
2070 return ERR_PTR(-ESTALE);
2071 if (IS_ERR(inode))
2072 return ERR_CAST(inode);
2073
2074 res = d_find_any_alias(inode);
2075 if (res)
2076 goto out_iput;
2077
2078 tmp = d_alloc_anon(inode->i_sb);
2079 if (!tmp) {
2080 res = ERR_PTR(-ENOMEM);
2081 goto out_iput;
2082 }
2083
2084 return __d_instantiate_anon(tmp, inode, disconnected);
2085
2086out_iput:
2087 iput(inode);
2088 return res;
2089}
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109struct dentry *d_obtain_alias(struct inode *inode)
2110{
2111 return __d_obtain_alias(inode, true);
2112}
2113EXPORT_SYMBOL(d_obtain_alias);
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130struct dentry *d_obtain_root(struct inode *inode)
2131{
2132 return __d_obtain_alias(inode, false);
2133}
2134EXPORT_SYMBOL(d_obtain_root);
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2153 struct qstr *name)
2154{
2155 struct dentry *found, *res;
2156
2157
2158
2159
2160
2161 found = d_hash_and_lookup(dentry->d_parent, name);
2162 if (found) {
2163 iput(inode);
2164 return found;
2165 }
2166 if (d_in_lookup(dentry)) {
2167 found = d_alloc_parallel(dentry->d_parent, name,
2168 dentry->d_wait);
2169 if (IS_ERR(found) || !d_in_lookup(found)) {
2170 iput(inode);
2171 return found;
2172 }
2173 } else {
2174 found = d_alloc(dentry->d_parent, name);
2175 if (!found) {
2176 iput(inode);
2177 return ERR_PTR(-ENOMEM);
2178 }
2179 }
2180 res = d_splice_alias(inode, found);
2181 if (res) {
2182 dput(found);
2183 return res;
2184 }
2185 return found;
2186}
2187EXPORT_SYMBOL(d_add_ci);
2188
2189
2190static inline bool d_same_name(const struct dentry *dentry,
2191 const struct dentry *parent,
2192 const struct qstr *name)
2193{
2194 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2195 if (dentry->d_name.len != name->len)
2196 return false;
2197 return dentry_cmp(dentry, name->name, name->len) == 0;
2198 }
2199 return parent->d_op->d_compare(dentry,
2200 dentry->d_name.len, dentry->d_name.name,
2201 name) == 0;
2202}
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233struct dentry *__d_lookup_rcu(const struct dentry *parent,
2234 const struct qstr *name,
2235 unsigned *seqp)
2236{
2237 u64 hashlen = name->hash_len;
2238 const unsigned char *str = name->name;
2239 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2240 struct hlist_bl_node *node;
2241 struct dentry *dentry;
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2264 unsigned seq;
2265
2266seqretry:
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284 seq = raw_seqcount_begin(&dentry->d_seq);
2285 if (dentry->d_parent != parent)
2286 continue;
2287 if (d_unhashed(dentry))
2288 continue;
2289
2290 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2291 int tlen;
2292 const char *tname;
2293 if (dentry->d_name.hash != hashlen_hash(hashlen))
2294 continue;
2295 tlen = dentry->d_name.len;
2296 tname = dentry->d_name.name;
2297
2298 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2299 cpu_relax();
2300 goto seqretry;
2301 }
2302 if (parent->d_op->d_compare(dentry,
2303 tlen, tname, name) != 0)
2304 continue;
2305 } else {
2306 if (dentry->d_name.hash_len != hashlen)
2307 continue;
2308 if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2309 continue;
2310 }
2311 *seqp = seq;
2312 return dentry;
2313 }
2314 return NULL;
2315}
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2329{
2330 struct dentry *dentry;
2331 unsigned seq;
2332
2333 do {
2334 seq = read_seqbegin(&rename_lock);
2335 dentry = __d_lookup(parent, name);
2336 if (dentry)
2337 break;
2338 } while (read_seqretry(&rename_lock, seq));
2339 return dentry;
2340}
2341EXPORT_SYMBOL(d_lookup);
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2359{
2360 unsigned int hash = name->hash;
2361 struct hlist_bl_head *b = d_hash(hash);
2362 struct hlist_bl_node *node;
2363 struct dentry *found = NULL;
2364 struct dentry *dentry;
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386 rcu_read_lock();
2387
2388 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2389
2390 if (dentry->d_name.hash != hash)
2391 continue;
2392
2393 spin_lock(&dentry->d_lock);
2394 if (dentry->d_parent != parent)
2395 goto next;
2396 if (d_unhashed(dentry))
2397 goto next;
2398
2399 if (!d_same_name(dentry, parent, name))
2400 goto next;
2401
2402 dentry->d_lockref.count++;
2403 found = dentry;
2404 spin_unlock(&dentry->d_lock);
2405 break;
2406next:
2407 spin_unlock(&dentry->d_lock);
2408 }
2409 rcu_read_unlock();
2410
2411 return found;
2412}
2413
2414
2415
2416
2417
2418
2419
2420
2421struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2422{
2423
2424
2425
2426
2427
2428 name->hash = full_name_hash(dir, name->name, name->len);
2429 if (dir->d_flags & DCACHE_OP_HASH) {
2430 int err = dir->d_op->d_hash(dir, name);
2431 if (unlikely(err < 0))
2432 return ERR_PTR(err);
2433 }
2434 return d_lookup(dir, name);
2435}
2436EXPORT_SYMBOL(d_hash_and_lookup);
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459void d_delete(struct dentry * dentry)
2460{
2461 struct inode *inode = dentry->d_inode;
2462
2463 spin_lock(&inode->i_lock);
2464 spin_lock(&dentry->d_lock);
2465
2466
2467
2468 if (dentry->d_lockref.count == 1) {
2469 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2470 dentry_unlink_inode(dentry);
2471 } else {
2472 __d_drop(dentry);
2473 spin_unlock(&dentry->d_lock);
2474 spin_unlock(&inode->i_lock);
2475 }
2476}
2477EXPORT_SYMBOL(d_delete);
2478
2479static void __d_rehash(struct dentry *entry)
2480{
2481 struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2482
2483 hlist_bl_lock(b);
2484 hlist_bl_add_head_rcu(&entry->d_hash, b);
2485 hlist_bl_unlock(b);
2486}
2487
2488
2489
2490
2491
2492
2493
2494
2495void d_rehash(struct dentry * entry)
2496{
2497 spin_lock(&entry->d_lock);
2498 __d_rehash(entry);
2499 spin_unlock(&entry->d_lock);
2500}
2501EXPORT_SYMBOL(d_rehash);
2502
2503static inline unsigned start_dir_add(struct inode *dir)
2504{
2505
2506 for (;;) {
2507 unsigned n = dir->i_dir_seq;
2508 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2509 return n;
2510 cpu_relax();
2511 }
2512}
2513
2514static inline void end_dir_add(struct inode *dir, unsigned n)
2515{
2516 smp_store_release(&dir->i_dir_seq, n + 2);
2517}
2518
2519static void d_wait_lookup(struct dentry *dentry)
2520{
2521 if (d_in_lookup(dentry)) {
2522 DECLARE_WAITQUEUE(wait, current);
2523 add_wait_queue(dentry->d_wait, &wait);
2524 do {
2525 set_current_state(TASK_UNINTERRUPTIBLE);
2526 spin_unlock(&dentry->d_lock);
2527 schedule();
2528 spin_lock(&dentry->d_lock);
2529 } while (d_in_lookup(dentry));
2530 }
2531}
2532
2533struct dentry *d_alloc_parallel(struct dentry *parent,
2534 const struct qstr *name,
2535 wait_queue_head_t *wq)
2536{
2537 unsigned int hash = name->hash;
2538 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2539 struct hlist_bl_node *node;
2540 struct dentry *new = d_alloc(parent, name);
2541 struct dentry *dentry;
2542 unsigned seq, r_seq, d_seq;
2543
2544 if (unlikely(!new))
2545 return ERR_PTR(-ENOMEM);
2546
2547retry:
2548 rcu_read_lock();
2549 seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2550 r_seq = read_seqbegin(&rename_lock);
2551 dentry = __d_lookup_rcu(parent, name, &d_seq);
2552 if (unlikely(dentry)) {
2553 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2554 rcu_read_unlock();
2555 goto retry;
2556 }
2557 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2558 rcu_read_unlock();
2559 dput(dentry);
2560 goto retry;
2561 }
2562 rcu_read_unlock();
2563 dput(new);
2564 return dentry;
2565 }
2566 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2567 rcu_read_unlock();
2568 goto retry;
2569 }
2570
2571 if (unlikely(seq & 1)) {
2572 rcu_read_unlock();
2573 goto retry;
2574 }
2575
2576 hlist_bl_lock(b);
2577 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2578 hlist_bl_unlock(b);
2579 rcu_read_unlock();
2580 goto retry;
2581 }
2582
2583
2584
2585
2586
2587
2588
2589 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2590 if (dentry->d_name.hash != hash)
2591 continue;
2592 if (dentry->d_parent != parent)
2593 continue;
2594 if (!d_same_name(dentry, parent, name))
2595 continue;
2596 hlist_bl_unlock(b);
2597
2598 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2599 rcu_read_unlock();
2600 goto retry;
2601 }
2602
2603 rcu_read_unlock();
2604
2605
2606
2607
2608 spin_lock(&dentry->d_lock);
2609 d_wait_lookup(dentry);
2610
2611
2612
2613
2614
2615
2616 if (unlikely(dentry->d_name.hash != hash))
2617 goto mismatch;
2618 if (unlikely(dentry->d_parent != parent))
2619 goto mismatch;
2620 if (unlikely(d_unhashed(dentry)))
2621 goto mismatch;
2622 if (unlikely(!d_same_name(dentry, parent, name)))
2623 goto mismatch;
2624
2625 spin_unlock(&dentry->d_lock);
2626 dput(new);
2627 return dentry;
2628 }
2629 rcu_read_unlock();
2630
2631 new->d_flags |= DCACHE_PAR_LOOKUP;
2632 new->d_wait = wq;
2633 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2634 hlist_bl_unlock(b);
2635 return new;
2636mismatch:
2637 spin_unlock(&dentry->d_lock);
2638 dput(dentry);
2639 goto retry;
2640}
2641EXPORT_SYMBOL(d_alloc_parallel);
2642
2643void __d_lookup_done(struct dentry *dentry)
2644{
2645 struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2646 dentry->d_name.hash);
2647 hlist_bl_lock(b);
2648 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2649 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2650 wake_up_all(dentry->d_wait);
2651 dentry->d_wait = NULL;
2652 hlist_bl_unlock(b);
2653 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2654 INIT_LIST_HEAD(&dentry->d_lru);
2655}
2656EXPORT_SYMBOL(__d_lookup_done);
2657
2658
2659
2660static inline void __d_add(struct dentry *dentry, struct inode *inode)
2661{
2662 struct inode *dir = NULL;
2663 unsigned n;
2664 spin_lock(&dentry->d_lock);
2665 if (unlikely(d_in_lookup(dentry))) {
2666 dir = dentry->d_parent->d_inode;
2667 n = start_dir_add(dir);
2668 __d_lookup_done(dentry);
2669 }
2670 if (inode) {
2671 unsigned add_flags = d_flags_for_inode(inode);
2672 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2673 raw_write_seqcount_begin(&dentry->d_seq);
2674 __d_set_inode_and_type(dentry, inode, add_flags);
2675 raw_write_seqcount_end(&dentry->d_seq);
2676 fsnotify_update_flags(dentry);
2677 }
2678 __d_rehash(dentry);
2679 if (dir)
2680 end_dir_add(dir, n);
2681 spin_unlock(&dentry->d_lock);
2682 if (inode)
2683 spin_unlock(&inode->i_lock);
2684}
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695void d_add(struct dentry *entry, struct inode *inode)
2696{
2697 if (inode) {
2698 security_d_instantiate(entry, inode);
2699 spin_lock(&inode->i_lock);
2700 }
2701 __d_add(entry, inode);
2702}
2703EXPORT_SYMBOL(d_add);
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2717{
2718 struct dentry *alias;
2719 unsigned int hash = entry->d_name.hash;
2720
2721 spin_lock(&inode->i_lock);
2722 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2723
2724
2725
2726
2727
2728 if (alias->d_name.hash != hash)
2729 continue;
2730 if (alias->d_parent != entry->d_parent)
2731 continue;
2732 if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2733 continue;
2734 spin_lock(&alias->d_lock);
2735 if (!d_unhashed(alias)) {
2736 spin_unlock(&alias->d_lock);
2737 alias = NULL;
2738 } else {
2739 __dget_dlock(alias);
2740 __d_rehash(alias);
2741 spin_unlock(&alias->d_lock);
2742 }
2743 spin_unlock(&inode->i_lock);
2744 return alias;
2745 }
2746 spin_unlock(&inode->i_lock);
2747 return NULL;
2748}
2749EXPORT_SYMBOL(d_exact_alias);
2750
2751static void swap_names(struct dentry *dentry, struct dentry *target)
2752{
2753 if (unlikely(dname_external(target))) {
2754 if (unlikely(dname_external(dentry))) {
2755
2756
2757
2758 swap(target->d_name.name, dentry->d_name.name);
2759 } else {
2760
2761
2762
2763
2764 memcpy(target->d_iname, dentry->d_name.name,
2765 dentry->d_name.len + 1);
2766 dentry->d_name.name = target->d_name.name;
2767 target->d_name.name = target->d_iname;
2768 }
2769 } else {
2770 if (unlikely(dname_external(dentry))) {
2771
2772
2773
2774
2775 memcpy(dentry->d_iname, target->d_name.name,
2776 target->d_name.len + 1);
2777 target->d_name.name = dentry->d_name.name;
2778 dentry->d_name.name = dentry->d_iname;
2779 } else {
2780
2781
2782
2783 unsigned int i;
2784 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2785 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2786 swap(((long *) &dentry->d_iname)[i],
2787 ((long *) &target->d_iname)[i]);
2788 }
2789 }
2790 }
2791 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2792}
2793
2794static void copy_name(struct dentry *dentry, struct dentry *target)
2795{
2796 struct external_name *old_name = NULL;
2797 if (unlikely(dname_external(dentry)))
2798 old_name = external_name(dentry);
2799 if (unlikely(dname_external(target))) {
2800 atomic_inc(&external_name(target)->u.count);
2801 dentry->d_name = target->d_name;
2802 } else {
2803 memcpy(dentry->d_iname, target->d_name.name,
2804 target->d_name.len + 1);
2805 dentry->d_name.name = dentry->d_iname;
2806 dentry->d_name.hash_len = target->d_name.hash_len;
2807 }
2808 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2809 kfree_rcu(old_name, u.head);
2810}
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823static void __d_move(struct dentry *dentry, struct dentry *target,
2824 bool exchange)
2825{
2826 struct dentry *old_parent, *p;
2827 struct inode *dir = NULL;
2828 unsigned n;
2829
2830 WARN_ON(!dentry->d_inode);
2831 if (WARN_ON(dentry == target))
2832 return;
2833
2834 BUG_ON(d_ancestor(target, dentry));
2835 old_parent = dentry->d_parent;
2836 p = d_ancestor(old_parent, target);
2837 if (IS_ROOT(dentry)) {
2838 BUG_ON(p);
2839 spin_lock(&target->d_parent->d_lock);
2840 } else if (!p) {
2841
2842 spin_lock(&target->d_parent->d_lock);
2843 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2844 } else {
2845 BUG_ON(p == dentry);
2846 spin_lock(&old_parent->d_lock);
2847 if (p != target)
2848 spin_lock_nested(&target->d_parent->d_lock,
2849 DENTRY_D_LOCK_NESTED);
2850 }
2851 spin_lock_nested(&dentry->d_lock, 2);
2852 spin_lock_nested(&target->d_lock, 3);
2853
2854 if (unlikely(d_in_lookup(target))) {
2855 dir = target->d_parent->d_inode;
2856 n = start_dir_add(dir);
2857 __d_lookup_done(target);
2858 }
2859
2860 write_seqcount_begin(&dentry->d_seq);
2861 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2862
2863
2864 if (!d_unhashed(dentry))
2865 ___d_drop(dentry);
2866 if (!d_unhashed(target))
2867 ___d_drop(target);
2868
2869
2870 dentry->d_parent = target->d_parent;
2871 if (!exchange) {
2872 copy_name(dentry, target);
2873 target->d_hash.pprev = NULL;
2874 dentry->d_parent->d_lockref.count++;
2875 if (dentry != old_parent)
2876 WARN_ON(!--old_parent->d_lockref.count);
2877 } else {
2878 target->d_parent = old_parent;
2879 swap_names(dentry, target);
2880 list_move(&target->d_child, &target->d_parent->d_subdirs);
2881 __d_rehash(target);
2882 fsnotify_update_flags(target);
2883 }
2884 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2885 __d_rehash(dentry);
2886 fsnotify_update_flags(dentry);
2887 fscrypt_handle_d_move(dentry);
2888
2889 write_seqcount_end(&target->d_seq);
2890 write_seqcount_end(&dentry->d_seq);
2891
2892 if (dir)
2893 end_dir_add(dir, n);
2894
2895 if (dentry->d_parent != old_parent)
2896 spin_unlock(&dentry->d_parent->d_lock);
2897 if (dentry != old_parent)
2898 spin_unlock(&old_parent->d_lock);
2899 spin_unlock(&target->d_lock);
2900 spin_unlock(&dentry->d_lock);
2901}
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912void d_move(struct dentry *dentry, struct dentry *target)
2913{
2914 write_seqlock(&rename_lock);
2915 __d_move(dentry, target, false);
2916 write_sequnlock(&rename_lock);
2917}
2918EXPORT_SYMBOL(d_move);
2919
2920
2921
2922
2923
2924
2925void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2926{
2927 write_seqlock(&rename_lock);
2928
2929 WARN_ON(!dentry1->d_inode);
2930 WARN_ON(!dentry2->d_inode);
2931 WARN_ON(IS_ROOT(dentry1));
2932 WARN_ON(IS_ROOT(dentry2));
2933
2934 __d_move(dentry1, dentry2, true);
2935
2936 write_sequnlock(&rename_lock);
2937}
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2948{
2949 struct dentry *p;
2950
2951 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2952 if (p->d_parent == p1)
2953 return p;
2954 }
2955 return NULL;
2956}
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967static int __d_unalias(struct inode *inode,
2968 struct dentry *dentry, struct dentry *alias)
2969{
2970 struct mutex *m1 = NULL;
2971 struct rw_semaphore *m2 = NULL;
2972 int ret = -ESTALE;
2973
2974
2975 if (alias->d_parent == dentry->d_parent)
2976 goto out_unalias;
2977
2978
2979 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2980 goto out_err;
2981 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2982 if (!inode_trylock_shared(alias->d_parent->d_inode))
2983 goto out_err;
2984 m2 = &alias->d_parent->d_inode->i_rwsem;
2985out_unalias:
2986 __d_move(alias, dentry, false);
2987 ret = 0;
2988out_err:
2989 if (m2)
2990 up_read(m2);
2991 if (m1)
2992 mutex_unlock(m1);
2993 return ret;
2994}
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
3020{
3021 if (IS_ERR(inode))
3022 return ERR_CAST(inode);
3023
3024 BUG_ON(!d_unhashed(dentry));
3025
3026 if (!inode)
3027 goto out;
3028
3029 security_d_instantiate(dentry, inode);
3030 spin_lock(&inode->i_lock);
3031 if (S_ISDIR(inode->i_mode)) {
3032 struct dentry *new = __d_find_any_alias(inode);
3033 if (unlikely(new)) {
3034
3035 spin_unlock(&inode->i_lock);
3036 write_seqlock(&rename_lock);
3037 if (unlikely(d_ancestor(new, dentry))) {
3038 write_sequnlock(&rename_lock);
3039 dput(new);
3040 new = ERR_PTR(-ELOOP);
3041 pr_warn_ratelimited(
3042 "VFS: Lookup of '%s' in %s %s"
3043 " would have caused loop\n",
3044 dentry->d_name.name,
3045 inode->i_sb->s_type->name,
3046 inode->i_sb->s_id);
3047 } else if (!IS_ROOT(new)) {
3048 struct dentry *old_parent = dget(new->d_parent);
3049 int err = __d_unalias(inode, dentry, new);
3050 write_sequnlock(&rename_lock);
3051 if (err) {
3052 dput(new);
3053 new = ERR_PTR(err);
3054 }
3055 dput(old_parent);
3056 } else {
3057 __d_move(new, dentry, false);
3058 write_sequnlock(&rename_lock);
3059 }
3060 iput(inode);
3061 return new;
3062 }
3063 }
3064out:
3065 __d_add(dentry, inode);
3066 return NULL;
3067}
3068EXPORT_SYMBOL(d_splice_alias);
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3087{
3088 bool result;
3089 unsigned seq;
3090
3091 if (new_dentry == old_dentry)
3092 return true;
3093
3094 do {
3095
3096 seq = read_seqbegin(&rename_lock);
3097
3098
3099
3100
3101 rcu_read_lock();
3102 if (d_ancestor(old_dentry, new_dentry))
3103 result = true;
3104 else
3105 result = false;
3106 rcu_read_unlock();
3107 } while (read_seqretry(&rename_lock, seq));
3108
3109 return result;
3110}
3111EXPORT_SYMBOL(is_subdir);
3112
3113static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3114{
3115 struct dentry *root = data;
3116 if (dentry != root) {
3117 if (d_unhashed(dentry) || !dentry->d_inode)
3118 return D_WALK_SKIP;
3119
3120 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3121 dentry->d_flags |= DCACHE_GENOCIDE;
3122 dentry->d_lockref.count--;
3123 }
3124 }
3125 return D_WALK_CONTINUE;
3126}
3127
3128void d_genocide(struct dentry *parent)
3129{
3130 d_walk(parent, parent, d_genocide_kill);
3131}
3132
3133EXPORT_SYMBOL(d_genocide);
3134
3135void d_tmpfile(struct dentry *dentry, struct inode *inode)
3136{
3137 inode_dec_link_count(inode);
3138 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3139 !hlist_unhashed(&dentry->d_u.d_alias) ||
3140 !d_unlinked(dentry));
3141 spin_lock(&dentry->d_parent->d_lock);
3142 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3143 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3144 (unsigned long long)inode->i_ino);
3145 spin_unlock(&dentry->d_lock);
3146 spin_unlock(&dentry->d_parent->d_lock);
3147 d_instantiate(dentry, inode);
3148}
3149EXPORT_SYMBOL(d_tmpfile);
3150
3151static __initdata unsigned long dhash_entries;
3152static int __init set_dhash_entries(char *str)
3153{
3154 if (!str)
3155 return 0;
3156 dhash_entries = simple_strtoul(str, &str, 0);
3157 return 1;
3158}
3159__setup("dhash_entries=", set_dhash_entries);
3160
3161static void __init dcache_init_early(void)
3162{
3163
3164
3165
3166 if (hashdist)
3167 return;
3168
3169 dentry_hashtable =
3170 alloc_large_system_hash("Dentry cache",
3171 sizeof(struct hlist_bl_head),
3172 dhash_entries,
3173 13,
3174 HASH_EARLY | HASH_ZERO,
3175 &d_hash_shift,
3176 NULL,
3177 0,
3178 0);
3179 d_hash_shift = 32 - d_hash_shift;
3180}
3181
3182static void __init dcache_init(void)
3183{
3184
3185
3186
3187
3188
3189 dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3190 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
3191 d_iname);
3192
3193
3194 if (!hashdist)
3195 return;
3196
3197 dentry_hashtable =
3198 alloc_large_system_hash("Dentry cache",
3199 sizeof(struct hlist_bl_head),
3200 dhash_entries,
3201 13,
3202 HASH_ZERO,
3203 &d_hash_shift,
3204 NULL,
3205 0,
3206 0);
3207 d_hash_shift = 32 - d_hash_shift;
3208}
3209
3210
3211struct kmem_cache *names_cachep __read_mostly;
3212EXPORT_SYMBOL(names_cachep);
3213
3214void __init vfs_caches_init_early(void)
3215{
3216 int i;
3217
3218 for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3219 INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3220
3221 dcache_init_early();
3222 inode_init_early();
3223}
3224
3225void __init vfs_caches_init(void)
3226{
3227 names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3228 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3229
3230 dcache_init();
3231 inode_init();
3232 files_init();
3233 files_maxfiles_init();
3234 mnt_init();
3235 bdev_cache_init();
3236 chrdev_init();
3237}
3238