1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/ratelimit.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/fsnotify.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/hash.h>
25#include <linux/cache.h>
26#include <linux/export.h>
27#include <linux/security.h>
28#include <linux/seqlock.h>
29#include <linux/bootmem.h>
30#include <linux/bit_spinlock.h>
31#include <linux/rculist_bl.h>
32#include <linux/list_lru.h>
33#include "internal.h"
34#include "mount.h"
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72int sysctl_vfs_cache_pressure __read_mostly = 100;
73EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
74
75__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
76
77EXPORT_SYMBOL(rename_lock);
78
79static struct kmem_cache *dentry_cache __read_mostly;
80
81const struct qstr empty_name = QSTR_INIT("", 0);
82EXPORT_SYMBOL(empty_name);
83const struct qstr slash_name = QSTR_INIT("/", 1);
84EXPORT_SYMBOL(slash_name);
85
86
87
88
89
90
91
92
93
94
95static unsigned int d_hash_shift __read_mostly;
96
97static struct hlist_bl_head *dentry_hashtable __read_mostly;
98
99static inline struct hlist_bl_head *d_hash(unsigned int hash)
100{
101 return dentry_hashtable + (hash >> d_hash_shift);
102}
103
104#define IN_LOOKUP_SHIFT 10
105static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
106
107static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
108 unsigned int hash)
109{
110 hash += (unsigned long) parent / L1_CACHE_BYTES;
111 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
112}
113
114
115
116struct dentry_stat_t dentry_stat = {
117 .age_limit = 45,
118};
119
120static DEFINE_PER_CPU(long, nr_dentry);
121static DEFINE_PER_CPU(long, nr_dentry_unused);
122
123#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
124
125
126
127
128
129
130
131
132
133
134
135
136
137static long get_nr_dentry(void)
138{
139 int i;
140 long sum = 0;
141 for_each_possible_cpu(i)
142 sum += per_cpu(nr_dentry, i);
143 return sum < 0 ? 0 : sum;
144}
145
146static long get_nr_dentry_unused(void)
147{
148 int i;
149 long sum = 0;
150 for_each_possible_cpu(i)
151 sum += per_cpu(nr_dentry_unused, i);
152 return sum < 0 ? 0 : sum;
153}
154
155int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
156 size_t *lenp, loff_t *ppos)
157{
158 dentry_stat.nr_dentry = get_nr_dentry();
159 dentry_stat.nr_unused = get_nr_dentry_unused();
160 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
161}
162#endif
163
164
165
166
167
168#ifdef CONFIG_DCACHE_WORD_ACCESS
169
170#include <asm/word-at-a-time.h>
171
172
173
174
175
176
177
178
179
180static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
181{
182 unsigned long a,b,mask;
183
184 for (;;) {
185 a = read_word_at_a_time(cs);
186 b = load_unaligned_zeropad(ct);
187 if (tcount < sizeof(unsigned long))
188 break;
189 if (unlikely(a != b))
190 return 1;
191 cs += sizeof(unsigned long);
192 ct += sizeof(unsigned long);
193 tcount -= sizeof(unsigned long);
194 if (!tcount)
195 return 0;
196 }
197 mask = bytemask_from_count(tcount);
198 return unlikely(!!((a ^ b) & mask));
199}
200
201#else
202
203static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
204{
205 do {
206 if (*cs != *ct)
207 return 1;
208 cs++;
209 ct++;
210 tcount--;
211 } while (tcount);
212 return 0;
213}
214
215#endif
216
217static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
218{
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235 const unsigned char *cs = READ_ONCE(dentry->d_name.name);
236
237 return dentry_string_cmp(cs, ct, tcount);
238}
239
240struct external_name {
241 union {
242 atomic_t count;
243 struct rcu_head head;
244 } u;
245 unsigned char name[];
246};
247
248static inline struct external_name *external_name(struct dentry *dentry)
249{
250 return container_of(dentry->d_name.name, struct external_name, name[0]);
251}
252
253static void __d_free(struct rcu_head *head)
254{
255 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
256
257 kmem_cache_free(dentry_cache, dentry);
258}
259
260static void __d_free_external_name(struct rcu_head *head)
261{
262 struct external_name *name = container_of(head, struct external_name,
263 u.head);
264
265 mod_node_page_state(page_pgdat(virt_to_page(name)),
266 NR_INDIRECTLY_RECLAIMABLE_BYTES,
267 -ksize(name));
268
269 kfree(name);
270}
271
272static void __d_free_external(struct rcu_head *head)
273{
274 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
275
276 __d_free_external_name(&external_name(dentry)->u.head);
277
278 kmem_cache_free(dentry_cache, dentry);
279}
280
281static inline int dname_external(const struct dentry *dentry)
282{
283 return dentry->d_name.name != dentry->d_iname;
284}
285
286void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
287{
288 spin_lock(&dentry->d_lock);
289 if (unlikely(dname_external(dentry))) {
290 struct external_name *p = external_name(dentry);
291 atomic_inc(&p->u.count);
292 spin_unlock(&dentry->d_lock);
293 name->name = p->name;
294 } else {
295 memcpy(name->inline_name, dentry->d_iname,
296 dentry->d_name.len + 1);
297 spin_unlock(&dentry->d_lock);
298 name->name = name->inline_name;
299 }
300}
301EXPORT_SYMBOL(take_dentry_name_snapshot);
302
303void release_dentry_name_snapshot(struct name_snapshot *name)
304{
305 if (unlikely(name->name != name->inline_name)) {
306 struct external_name *p;
307 p = container_of(name->name, struct external_name, name[0]);
308 if (unlikely(atomic_dec_and_test(&p->u.count)))
309 call_rcu(&p->u.head, __d_free_external_name);
310 }
311}
312EXPORT_SYMBOL(release_dentry_name_snapshot);
313
314static inline void __d_set_inode_and_type(struct dentry *dentry,
315 struct inode *inode,
316 unsigned type_flags)
317{
318 unsigned flags;
319
320 dentry->d_inode = inode;
321 flags = READ_ONCE(dentry->d_flags);
322 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
323 flags |= type_flags;
324 WRITE_ONCE(dentry->d_flags, flags);
325}
326
327static inline void __d_clear_type_and_inode(struct dentry *dentry)
328{
329 unsigned flags = READ_ONCE(dentry->d_flags);
330
331 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
332 WRITE_ONCE(dentry->d_flags, flags);
333 dentry->d_inode = NULL;
334}
335
336static void dentry_free(struct dentry *dentry)
337{
338 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
339 if (unlikely(dname_external(dentry))) {
340 struct external_name *p = external_name(dentry);
341 if (likely(atomic_dec_and_test(&p->u.count))) {
342 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
343 return;
344 }
345 }
346
347 if (!(dentry->d_flags & DCACHE_RCUACCESS))
348 __d_free(&dentry->d_u.d_rcu);
349 else
350 call_rcu(&dentry->d_u.d_rcu, __d_free);
351}
352
353
354
355
356
357static void dentry_unlink_inode(struct dentry * dentry)
358 __releases(dentry->d_lock)
359 __releases(dentry->d_inode->i_lock)
360{
361 struct inode *inode = dentry->d_inode;
362
363 raw_write_seqcount_begin(&dentry->d_seq);
364 __d_clear_type_and_inode(dentry);
365 hlist_del_init(&dentry->d_u.d_alias);
366 raw_write_seqcount_end(&dentry->d_seq);
367 spin_unlock(&dentry->d_lock);
368 spin_unlock(&inode->i_lock);
369 if (!inode->i_nlink)
370 fsnotify_inoderemove(inode);
371 if (dentry->d_op && dentry->d_op->d_iput)
372 dentry->d_op->d_iput(dentry, inode);
373 else
374 iput(inode);
375}
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
392static void d_lru_add(struct dentry *dentry)
393{
394 D_FLAG_VERIFY(dentry, 0);
395 dentry->d_flags |= DCACHE_LRU_LIST;
396 this_cpu_inc(nr_dentry_unused);
397 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
398}
399
400static void d_lru_del(struct dentry *dentry)
401{
402 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
403 dentry->d_flags &= ~DCACHE_LRU_LIST;
404 this_cpu_dec(nr_dentry_unused);
405 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
406}
407
408static void d_shrink_del(struct dentry *dentry)
409{
410 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
411 list_del_init(&dentry->d_lru);
412 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
413 this_cpu_dec(nr_dentry_unused);
414}
415
416static void d_shrink_add(struct dentry *dentry, struct list_head *list)
417{
418 D_FLAG_VERIFY(dentry, 0);
419 list_add(&dentry->d_lru, list);
420 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
421 this_cpu_inc(nr_dentry_unused);
422}
423
424
425
426
427
428
429
430static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
431{
432 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
433 dentry->d_flags &= ~DCACHE_LRU_LIST;
434 this_cpu_dec(nr_dentry_unused);
435 list_lru_isolate(lru, &dentry->d_lru);
436}
437
438static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
439 struct list_head *list)
440{
441 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
442 dentry->d_flags |= DCACHE_SHRINK_LIST;
443 list_lru_isolate_move(lru, &dentry->d_lru, list);
444}
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463static void ___d_drop(struct dentry *dentry)
464{
465 struct hlist_bl_head *b;
466
467
468
469
470
471 if (unlikely(IS_ROOT(dentry)))
472 b = &dentry->d_sb->s_roots;
473 else
474 b = d_hash(dentry->d_name.hash);
475
476 hlist_bl_lock(b);
477 __hlist_bl_del(&dentry->d_hash);
478 hlist_bl_unlock(b);
479}
480
481void __d_drop(struct dentry *dentry)
482{
483 if (!d_unhashed(dentry)) {
484 ___d_drop(dentry);
485 dentry->d_hash.pprev = NULL;
486 write_seqcount_invalidate(&dentry->d_seq);
487 }
488}
489EXPORT_SYMBOL(__d_drop);
490
491void d_drop(struct dentry *dentry)
492{
493 spin_lock(&dentry->d_lock);
494 __d_drop(dentry);
495 spin_unlock(&dentry->d_lock);
496}
497EXPORT_SYMBOL(d_drop);
498
499static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
500{
501 struct dentry *next;
502
503
504
505
506 dentry->d_flags |= DCACHE_DENTRY_KILLED;
507 if (unlikely(list_empty(&dentry->d_child)))
508 return;
509 __list_del_entry(&dentry->d_child);
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529 while (dentry->d_child.next != &parent->d_subdirs) {
530 next = list_entry(dentry->d_child.next, struct dentry, d_child);
531 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
532 break;
533 dentry->d_child.next = next->d_child.next;
534 }
535}
536
537static void __dentry_kill(struct dentry *dentry)
538{
539 struct dentry *parent = NULL;
540 bool can_free = true;
541 if (!IS_ROOT(dentry))
542 parent = dentry->d_parent;
543
544
545
546
547 lockref_mark_dead(&dentry->d_lockref);
548
549
550
551
552
553 if (dentry->d_flags & DCACHE_OP_PRUNE)
554 dentry->d_op->d_prune(dentry);
555
556 if (dentry->d_flags & DCACHE_LRU_LIST) {
557 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
558 d_lru_del(dentry);
559 }
560
561 __d_drop(dentry);
562 dentry_unlist(dentry, parent);
563 if (parent)
564 spin_unlock(&parent->d_lock);
565 if (dentry->d_inode)
566 dentry_unlink_inode(dentry);
567 else
568 spin_unlock(&dentry->d_lock);
569 this_cpu_dec(nr_dentry);
570 if (dentry->d_op && dentry->d_op->d_release)
571 dentry->d_op->d_release(dentry);
572
573 spin_lock(&dentry->d_lock);
574 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
575 dentry->d_flags |= DCACHE_MAY_FREE;
576 can_free = false;
577 }
578 spin_unlock(&dentry->d_lock);
579 if (likely(can_free))
580 dentry_free(dentry);
581 cond_resched();
582}
583
584static struct dentry *__lock_parent(struct dentry *dentry)
585{
586 struct dentry *parent;
587 rcu_read_lock();
588 spin_unlock(&dentry->d_lock);
589again:
590 parent = READ_ONCE(dentry->d_parent);
591 spin_lock(&parent->d_lock);
592
593
594
595
596
597
598
599
600 if (unlikely(parent != dentry->d_parent)) {
601 spin_unlock(&parent->d_lock);
602 goto again;
603 }
604 rcu_read_unlock();
605 if (parent != dentry)
606 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
607 else
608 parent = NULL;
609 return parent;
610}
611
612static inline struct dentry *lock_parent(struct dentry *dentry)
613{
614 struct dentry *parent = dentry->d_parent;
615 if (IS_ROOT(dentry))
616 return NULL;
617 if (likely(spin_trylock(&parent->d_lock)))
618 return parent;
619 return __lock_parent(dentry);
620}
621
622static inline bool retain_dentry(struct dentry *dentry)
623{
624 WARN_ON(d_in_lookup(dentry));
625
626
627 if (unlikely(d_unhashed(dentry)))
628 return false;
629
630 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
631 return false;
632
633 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
634 if (dentry->d_op->d_delete(dentry))
635 return false;
636 }
637
638 dentry->d_lockref.count--;
639 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
640 d_lru_add(dentry);
641 else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
642 dentry->d_flags |= DCACHE_REFERENCED;
643 return true;
644}
645
646
647
648
649
650
651static struct dentry *dentry_kill(struct dentry *dentry)
652 __releases(dentry->d_lock)
653{
654 struct inode *inode = dentry->d_inode;
655 struct dentry *parent = NULL;
656
657 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
658 goto slow_positive;
659
660 if (!IS_ROOT(dentry)) {
661 parent = dentry->d_parent;
662 if (unlikely(!spin_trylock(&parent->d_lock))) {
663 parent = __lock_parent(dentry);
664 if (likely(inode || !dentry->d_inode))
665 goto got_locks;
666
667 if (parent)
668 spin_unlock(&parent->d_lock);
669 inode = dentry->d_inode;
670 goto slow_positive;
671 }
672 }
673 __dentry_kill(dentry);
674 return parent;
675
676slow_positive:
677 spin_unlock(&dentry->d_lock);
678 spin_lock(&inode->i_lock);
679 spin_lock(&dentry->d_lock);
680 parent = lock_parent(dentry);
681got_locks:
682 if (unlikely(dentry->d_lockref.count != 1)) {
683 dentry->d_lockref.count--;
684 } else if (likely(!retain_dentry(dentry))) {
685 __dentry_kill(dentry);
686 return parent;
687 }
688
689 if (inode)
690 spin_unlock(&inode->i_lock);
691 if (parent)
692 spin_unlock(&parent->d_lock);
693 spin_unlock(&dentry->d_lock);
694 return NULL;
695}
696
697
698
699
700
701
702
703
704
705static inline bool fast_dput(struct dentry *dentry)
706{
707 int ret;
708 unsigned int d_flags;
709
710
711
712
713
714 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
715 return lockref_put_or_lock(&dentry->d_lockref);
716
717
718
719
720
721 ret = lockref_put_return(&dentry->d_lockref);
722
723
724
725
726
727
728 if (unlikely(ret < 0)) {
729 spin_lock(&dentry->d_lock);
730 if (dentry->d_lockref.count > 1) {
731 dentry->d_lockref.count--;
732 spin_unlock(&dentry->d_lock);
733 return true;
734 }
735 return false;
736 }
737
738
739
740
741 if (ret)
742 return true;
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765 smp_rmb();
766 d_flags = READ_ONCE(dentry->d_flags);
767 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
768
769
770 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
771 return true;
772
773
774
775
776
777
778 spin_lock(&dentry->d_lock);
779
780
781
782
783
784
785
786 if (dentry->d_lockref.count) {
787 spin_unlock(&dentry->d_lock);
788 return true;
789 }
790
791
792
793
794
795
796 dentry->d_lockref.count = 1;
797 return false;
798}
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827void dput(struct dentry *dentry)
828{
829 while (dentry) {
830 might_sleep();
831
832 rcu_read_lock();
833 if (likely(fast_dput(dentry))) {
834 rcu_read_unlock();
835 return;
836 }
837
838
839 rcu_read_unlock();
840
841 if (likely(retain_dentry(dentry))) {
842 spin_unlock(&dentry->d_lock);
843 return;
844 }
845
846 dentry = dentry_kill(dentry);
847 }
848}
849EXPORT_SYMBOL(dput);
850
851
852
853static inline void __dget_dlock(struct dentry *dentry)
854{
855 dentry->d_lockref.count++;
856}
857
858static inline void __dget(struct dentry *dentry)
859{
860 lockref_get(&dentry->d_lockref);
861}
862
863struct dentry *dget_parent(struct dentry *dentry)
864{
865 int gotref;
866 struct dentry *ret;
867
868
869
870
871
872 rcu_read_lock();
873 ret = READ_ONCE(dentry->d_parent);
874 gotref = lockref_get_not_zero(&ret->d_lockref);
875 rcu_read_unlock();
876 if (likely(gotref)) {
877 if (likely(ret == READ_ONCE(dentry->d_parent)))
878 return ret;
879 dput(ret);
880 }
881
882repeat:
883
884
885
886
887 rcu_read_lock();
888 ret = dentry->d_parent;
889 spin_lock(&ret->d_lock);
890 if (unlikely(ret != dentry->d_parent)) {
891 spin_unlock(&ret->d_lock);
892 rcu_read_unlock();
893 goto repeat;
894 }
895 rcu_read_unlock();
896 BUG_ON(!ret->d_lockref.count);
897 ret->d_lockref.count++;
898 spin_unlock(&ret->d_lock);
899 return ret;
900}
901EXPORT_SYMBOL(dget_parent);
902
903static struct dentry * __d_find_any_alias(struct inode *inode)
904{
905 struct dentry *alias;
906
907 if (hlist_empty(&inode->i_dentry))
908 return NULL;
909 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
910 __dget(alias);
911 return alias;
912}
913
914
915
916
917
918
919
920
921struct dentry *d_find_any_alias(struct inode *inode)
922{
923 struct dentry *de;
924
925 spin_lock(&inode->i_lock);
926 de = __d_find_any_alias(inode);
927 spin_unlock(&inode->i_lock);
928 return de;
929}
930EXPORT_SYMBOL(d_find_any_alias);
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946static struct dentry *__d_find_alias(struct inode *inode)
947{
948 struct dentry *alias;
949
950 if (S_ISDIR(inode->i_mode))
951 return __d_find_any_alias(inode);
952
953 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
954 spin_lock(&alias->d_lock);
955 if (!d_unhashed(alias)) {
956 __dget_dlock(alias);
957 spin_unlock(&alias->d_lock);
958 return alias;
959 }
960 spin_unlock(&alias->d_lock);
961 }
962 return NULL;
963}
964
965struct dentry *d_find_alias(struct inode *inode)
966{
967 struct dentry *de = NULL;
968
969 if (!hlist_empty(&inode->i_dentry)) {
970 spin_lock(&inode->i_lock);
971 de = __d_find_alias(inode);
972 spin_unlock(&inode->i_lock);
973 }
974 return de;
975}
976EXPORT_SYMBOL(d_find_alias);
977
978
979
980
981
982void d_prune_aliases(struct inode *inode)
983{
984 struct dentry *dentry;
985restart:
986 spin_lock(&inode->i_lock);
987 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
988 spin_lock(&dentry->d_lock);
989 if (!dentry->d_lockref.count) {
990 struct dentry *parent = lock_parent(dentry);
991 if (likely(!dentry->d_lockref.count)) {
992 __dentry_kill(dentry);
993 dput(parent);
994 goto restart;
995 }
996 if (parent)
997 spin_unlock(&parent->d_lock);
998 }
999 spin_unlock(&dentry->d_lock);
1000 }
1001 spin_unlock(&inode->i_lock);
1002}
1003EXPORT_SYMBOL(d_prune_aliases);
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016static bool shrink_lock_dentry(struct dentry *dentry)
1017{
1018 struct inode *inode;
1019 struct dentry *parent;
1020
1021 if (dentry->d_lockref.count)
1022 return false;
1023
1024 inode = dentry->d_inode;
1025 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1026 spin_unlock(&dentry->d_lock);
1027 spin_lock(&inode->i_lock);
1028 spin_lock(&dentry->d_lock);
1029 if (unlikely(dentry->d_lockref.count))
1030 goto out;
1031
1032 if (unlikely(inode != dentry->d_inode))
1033 goto out;
1034 }
1035
1036 parent = dentry->d_parent;
1037 if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
1038 return true;
1039
1040 spin_unlock(&dentry->d_lock);
1041 spin_lock(&parent->d_lock);
1042 if (unlikely(parent != dentry->d_parent)) {
1043 spin_unlock(&parent->d_lock);
1044 spin_lock(&dentry->d_lock);
1045 goto out;
1046 }
1047 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1048 if (likely(!dentry->d_lockref.count))
1049 return true;
1050 spin_unlock(&parent->d_lock);
1051out:
1052 if (inode)
1053 spin_unlock(&inode->i_lock);
1054 return false;
1055}
1056
1057static void shrink_dentry_list(struct list_head *list)
1058{
1059 while (!list_empty(list)) {
1060 struct dentry *dentry, *parent;
1061
1062 dentry = list_entry(list->prev, struct dentry, d_lru);
1063 spin_lock(&dentry->d_lock);
1064 rcu_read_lock();
1065 if (!shrink_lock_dentry(dentry)) {
1066 bool can_free = false;
1067 rcu_read_unlock();
1068 d_shrink_del(dentry);
1069 if (dentry->d_lockref.count < 0)
1070 can_free = dentry->d_flags & DCACHE_MAY_FREE;
1071 spin_unlock(&dentry->d_lock);
1072 if (can_free)
1073 dentry_free(dentry);
1074 continue;
1075 }
1076 rcu_read_unlock();
1077 d_shrink_del(dentry);
1078 parent = dentry->d_parent;
1079 __dentry_kill(dentry);
1080 if (parent == dentry)
1081 continue;
1082
1083
1084
1085
1086
1087
1088 dentry = parent;
1089 while (dentry && !lockref_put_or_lock(&dentry->d_lockref))
1090 dentry = dentry_kill(dentry);
1091 }
1092}
1093
1094static enum lru_status dentry_lru_isolate(struct list_head *item,
1095 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1096{
1097 struct list_head *freeable = arg;
1098 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1099
1100
1101
1102
1103
1104
1105
1106 if (!spin_trylock(&dentry->d_lock))
1107 return LRU_SKIP;
1108
1109
1110
1111
1112
1113
1114 if (dentry->d_lockref.count) {
1115 d_lru_isolate(lru, dentry);
1116 spin_unlock(&dentry->d_lock);
1117 return LRU_REMOVED;
1118 }
1119
1120 if (dentry->d_flags & DCACHE_REFERENCED) {
1121 dentry->d_flags &= ~DCACHE_REFERENCED;
1122 spin_unlock(&dentry->d_lock);
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 return LRU_ROTATE;
1144 }
1145
1146 d_lru_shrink_move(lru, dentry, freeable);
1147 spin_unlock(&dentry->d_lock);
1148
1149 return LRU_REMOVED;
1150}
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1165{
1166 LIST_HEAD(dispose);
1167 long freed;
1168
1169 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1170 dentry_lru_isolate, &dispose);
1171 shrink_dentry_list(&dispose);
1172 return freed;
1173}
1174
1175static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1176 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1177{
1178 struct list_head *freeable = arg;
1179 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1180
1181
1182
1183
1184
1185
1186 if (!spin_trylock(&dentry->d_lock))
1187 return LRU_SKIP;
1188
1189 d_lru_shrink_move(lru, dentry, freeable);
1190 spin_unlock(&dentry->d_lock);
1191
1192 return LRU_REMOVED;
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203void shrink_dcache_sb(struct super_block *sb)
1204{
1205 long freed;
1206
1207 do {
1208 LIST_HEAD(dispose);
1209
1210 freed = list_lru_walk(&sb->s_dentry_lru,
1211 dentry_lru_isolate_shrink, &dispose, 1024);
1212
1213 this_cpu_sub(nr_dentry_unused, freed);
1214 shrink_dentry_list(&dispose);
1215 } while (list_lru_count(&sb->s_dentry_lru) > 0);
1216}
1217EXPORT_SYMBOL(shrink_dcache_sb);
1218
1219
1220
1221
1222
1223
1224
1225
1226enum d_walk_ret {
1227 D_WALK_CONTINUE,
1228 D_WALK_QUIT,
1229 D_WALK_NORETRY,
1230 D_WALK_SKIP,
1231};
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241static void d_walk(struct dentry *parent, void *data,
1242 enum d_walk_ret (*enter)(void *, struct dentry *))
1243{
1244 struct dentry *this_parent;
1245 struct list_head *next;
1246 unsigned seq = 0;
1247 enum d_walk_ret ret;
1248 bool retry = true;
1249
1250again:
1251 read_seqbegin_or_lock(&rename_lock, &seq);
1252 this_parent = parent;
1253 spin_lock(&this_parent->d_lock);
1254
1255 ret = enter(data, this_parent);
1256 switch (ret) {
1257 case D_WALK_CONTINUE:
1258 break;
1259 case D_WALK_QUIT:
1260 case D_WALK_SKIP:
1261 goto out_unlock;
1262 case D_WALK_NORETRY:
1263 retry = false;
1264 break;
1265 }
1266repeat:
1267 next = this_parent->d_subdirs.next;
1268resume:
1269 while (next != &this_parent->d_subdirs) {
1270 struct list_head *tmp = next;
1271 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1272 next = tmp->next;
1273
1274 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1275 continue;
1276
1277 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1278
1279 ret = enter(data, dentry);
1280 switch (ret) {
1281 case D_WALK_CONTINUE:
1282 break;
1283 case D_WALK_QUIT:
1284 spin_unlock(&dentry->d_lock);
1285 goto out_unlock;
1286 case D_WALK_NORETRY:
1287 retry = false;
1288 break;
1289 case D_WALK_SKIP:
1290 spin_unlock(&dentry->d_lock);
1291 continue;
1292 }
1293
1294 if (!list_empty(&dentry->d_subdirs)) {
1295 spin_unlock(&this_parent->d_lock);
1296 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1297 this_parent = dentry;
1298 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1299 goto repeat;
1300 }
1301 spin_unlock(&dentry->d_lock);
1302 }
1303
1304
1305
1306 rcu_read_lock();
1307ascend:
1308 if (this_parent != parent) {
1309 struct dentry *child = this_parent;
1310 this_parent = child->d_parent;
1311
1312 spin_unlock(&child->d_lock);
1313 spin_lock(&this_parent->d_lock);
1314
1315
1316 if (need_seqretry(&rename_lock, seq))
1317 goto rename_retry;
1318
1319 do {
1320 next = child->d_child.next;
1321 if (next == &this_parent->d_subdirs)
1322 goto ascend;
1323 child = list_entry(next, struct dentry, d_child);
1324 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1325 rcu_read_unlock();
1326 goto resume;
1327 }
1328 if (need_seqretry(&rename_lock, seq))
1329 goto rename_retry;
1330 rcu_read_unlock();
1331
1332out_unlock:
1333 spin_unlock(&this_parent->d_lock);
1334 done_seqretry(&rename_lock, seq);
1335 return;
1336
1337rename_retry:
1338 spin_unlock(&this_parent->d_lock);
1339 rcu_read_unlock();
1340 BUG_ON(seq & 1);
1341 if (!retry)
1342 return;
1343 seq = 1;
1344 goto again;
1345}
1346
1347struct check_mount {
1348 struct vfsmount *mnt;
1349 unsigned int mounted;
1350};
1351
1352static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1353{
1354 struct check_mount *info = data;
1355 struct path path = { .mnt = info->mnt, .dentry = dentry };
1356
1357 if (likely(!d_mountpoint(dentry)))
1358 return D_WALK_CONTINUE;
1359 if (__path_is_mountpoint(&path)) {
1360 info->mounted = 1;
1361 return D_WALK_QUIT;
1362 }
1363 return D_WALK_CONTINUE;
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374int path_has_submounts(const struct path *parent)
1375{
1376 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1377
1378 read_seqlock_excl(&mount_lock);
1379 d_walk(parent->dentry, &data, path_check_mount);
1380 read_sequnlock_excl(&mount_lock);
1381
1382 return data.mounted;
1383}
1384EXPORT_SYMBOL(path_has_submounts);
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394int d_set_mounted(struct dentry *dentry)
1395{
1396 struct dentry *p;
1397 int ret = -ENOENT;
1398 write_seqlock(&rename_lock);
1399 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1400
1401 spin_lock(&p->d_lock);
1402 if (unlikely(d_unhashed(p))) {
1403 spin_unlock(&p->d_lock);
1404 goto out;
1405 }
1406 spin_unlock(&p->d_lock);
1407 }
1408 spin_lock(&dentry->d_lock);
1409 if (!d_unlinked(dentry)) {
1410 ret = -EBUSY;
1411 if (!d_mountpoint(dentry)) {
1412 dentry->d_flags |= DCACHE_MOUNTED;
1413 ret = 0;
1414 }
1415 }
1416 spin_unlock(&dentry->d_lock);
1417out:
1418 write_sequnlock(&rename_lock);
1419 return ret;
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437struct select_data {
1438 struct dentry *start;
1439 struct list_head dispose;
1440 int found;
1441};
1442
1443static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1444{
1445 struct select_data *data = _data;
1446 enum d_walk_ret ret = D_WALK_CONTINUE;
1447
1448 if (data->start == dentry)
1449 goto out;
1450
1451 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1452 data->found++;
1453 } else {
1454 if (dentry->d_flags & DCACHE_LRU_LIST)
1455 d_lru_del(dentry);
1456 if (!dentry->d_lockref.count) {
1457 d_shrink_add(dentry, &data->dispose);
1458 data->found++;
1459 }
1460 }
1461
1462
1463
1464
1465
1466 if (!list_empty(&data->dispose))
1467 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1468out:
1469 return ret;
1470}
1471
1472
1473
1474
1475
1476
1477
1478void shrink_dcache_parent(struct dentry *parent)
1479{
1480 for (;;) {
1481 struct select_data data;
1482
1483 INIT_LIST_HEAD(&data.dispose);
1484 data.start = parent;
1485 data.found = 0;
1486
1487 d_walk(parent, &data, select_collect);
1488
1489 if (!list_empty(&data.dispose)) {
1490 shrink_dentry_list(&data.dispose);
1491 continue;
1492 }
1493
1494 cond_resched();
1495 if (!data.found)
1496 break;
1497 }
1498}
1499EXPORT_SYMBOL(shrink_dcache_parent);
1500
1501static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1502{
1503
1504 if (!list_empty(&dentry->d_subdirs))
1505 return D_WALK_CONTINUE;
1506
1507
1508 if (dentry == _data && dentry->d_lockref.count == 1)
1509 return D_WALK_CONTINUE;
1510
1511 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1512 " still in use (%d) [unmount of %s %s]\n",
1513 dentry,
1514 dentry->d_inode ?
1515 dentry->d_inode->i_ino : 0UL,
1516 dentry,
1517 dentry->d_lockref.count,
1518 dentry->d_sb->s_type->name,
1519 dentry->d_sb->s_id);
1520 WARN_ON(1);
1521 return D_WALK_CONTINUE;
1522}
1523
1524static void do_one_tree(struct dentry *dentry)
1525{
1526 shrink_dcache_parent(dentry);
1527 d_walk(dentry, dentry, umount_check);
1528 d_drop(dentry);
1529 dput(dentry);
1530}
1531
1532
1533
1534
1535void shrink_dcache_for_umount(struct super_block *sb)
1536{
1537 struct dentry *dentry;
1538
1539 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1540
1541 dentry = sb->s_root;
1542 sb->s_root = NULL;
1543 do_one_tree(dentry);
1544
1545 while (!hlist_bl_empty(&sb->s_roots)) {
1546 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1547 do_one_tree(dentry);
1548 }
1549}
1550
1551static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
1552{
1553 struct dentry **victim = _data;
1554 if (d_mountpoint(dentry)) {
1555 __dget_dlock(dentry);
1556 *victim = dentry;
1557 return D_WALK_QUIT;
1558 }
1559 return D_WALK_CONTINUE;
1560}
1561
1562
1563
1564
1565
1566void d_invalidate(struct dentry *dentry)
1567{
1568 bool had_submounts = false;
1569 spin_lock(&dentry->d_lock);
1570 if (d_unhashed(dentry)) {
1571 spin_unlock(&dentry->d_lock);
1572 return;
1573 }
1574 __d_drop(dentry);
1575 spin_unlock(&dentry->d_lock);
1576
1577
1578 if (!dentry->d_inode)
1579 return;
1580
1581 shrink_dcache_parent(dentry);
1582 for (;;) {
1583 struct dentry *victim = NULL;
1584 d_walk(dentry, &victim, find_submount);
1585 if (!victim) {
1586 if (had_submounts)
1587 shrink_dcache_parent(dentry);
1588 return;
1589 }
1590 had_submounts = true;
1591 detach_mounts(victim);
1592 dput(victim);
1593 }
1594}
1595EXPORT_SYMBOL(d_invalidate);
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1608{
1609 struct external_name *ext = NULL;
1610 struct dentry *dentry;
1611 char *dname;
1612 int err;
1613
1614 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1615 if (!dentry)
1616 return NULL;
1617
1618
1619
1620
1621
1622
1623
1624 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1625 if (unlikely(!name)) {
1626 name = &slash_name;
1627 dname = dentry->d_iname;
1628 } else if (name->len > DNAME_INLINE_LEN-1) {
1629 size_t size = offsetof(struct external_name, name[1]);
1630
1631 ext = kmalloc(size + name->len, GFP_KERNEL_ACCOUNT);
1632 if (!ext) {
1633 kmem_cache_free(dentry_cache, dentry);
1634 return NULL;
1635 }
1636 atomic_set(&ext->u.count, 1);
1637 dname = ext->name;
1638 } else {
1639 dname = dentry->d_iname;
1640 }
1641
1642 dentry->d_name.len = name->len;
1643 dentry->d_name.hash = name->hash;
1644 memcpy(dname, name->name, name->len);
1645 dname[name->len] = 0;
1646
1647
1648 smp_store_release(&dentry->d_name.name, dname);
1649
1650 dentry->d_lockref.count = 1;
1651 dentry->d_flags = 0;
1652 spin_lock_init(&dentry->d_lock);
1653 seqcount_init(&dentry->d_seq);
1654 dentry->d_inode = NULL;
1655 dentry->d_parent = dentry;
1656 dentry->d_sb = sb;
1657 dentry->d_op = NULL;
1658 dentry->d_fsdata = NULL;
1659 INIT_HLIST_BL_NODE(&dentry->d_hash);
1660 INIT_LIST_HEAD(&dentry->d_lru);
1661 INIT_LIST_HEAD(&dentry->d_subdirs);
1662 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1663 INIT_LIST_HEAD(&dentry->d_child);
1664 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1665
1666 if (dentry->d_op && dentry->d_op->d_init) {
1667 err = dentry->d_op->d_init(dentry);
1668 if (err) {
1669 if (dname_external(dentry))
1670 kfree(external_name(dentry));
1671 kmem_cache_free(dentry_cache, dentry);
1672 return NULL;
1673 }
1674 }
1675
1676 if (unlikely(ext)) {
1677 pg_data_t *pgdat = page_pgdat(virt_to_page(ext));
1678 mod_node_page_state(pgdat, NR_INDIRECTLY_RECLAIMABLE_BYTES,
1679 ksize(ext));
1680 }
1681
1682 this_cpu_inc(nr_dentry);
1683
1684 return dentry;
1685}
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1697{
1698 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1699 if (!dentry)
1700 return NULL;
1701 dentry->d_flags |= DCACHE_RCUACCESS;
1702 spin_lock(&parent->d_lock);
1703
1704
1705
1706
1707 __dget_dlock(parent);
1708 dentry->d_parent = parent;
1709 list_add(&dentry->d_child, &parent->d_subdirs);
1710 spin_unlock(&parent->d_lock);
1711
1712 return dentry;
1713}
1714EXPORT_SYMBOL(d_alloc);
1715
1716struct dentry *d_alloc_anon(struct super_block *sb)
1717{
1718 return __d_alloc(sb, NULL);
1719}
1720EXPORT_SYMBOL(d_alloc_anon);
1721
1722struct dentry *d_alloc_cursor(struct dentry * parent)
1723{
1724 struct dentry *dentry = d_alloc_anon(parent->d_sb);
1725 if (dentry) {
1726 dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
1727 dentry->d_parent = dget(parent);
1728 }
1729 return dentry;
1730}
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1741{
1742 return __d_alloc(sb, name);
1743}
1744EXPORT_SYMBOL(d_alloc_pseudo);
1745
1746struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1747{
1748 struct qstr q;
1749
1750 q.name = name;
1751 q.hash_len = hashlen_string(parent, name);
1752 return d_alloc(parent, &q);
1753}
1754EXPORT_SYMBOL(d_alloc_name);
1755
1756void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1757{
1758 WARN_ON_ONCE(dentry->d_op);
1759 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1760 DCACHE_OP_COMPARE |
1761 DCACHE_OP_REVALIDATE |
1762 DCACHE_OP_WEAK_REVALIDATE |
1763 DCACHE_OP_DELETE |
1764 DCACHE_OP_REAL));
1765 dentry->d_op = op;
1766 if (!op)
1767 return;
1768 if (op->d_hash)
1769 dentry->d_flags |= DCACHE_OP_HASH;
1770 if (op->d_compare)
1771 dentry->d_flags |= DCACHE_OP_COMPARE;
1772 if (op->d_revalidate)
1773 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1774 if (op->d_weak_revalidate)
1775 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1776 if (op->d_delete)
1777 dentry->d_flags |= DCACHE_OP_DELETE;
1778 if (op->d_prune)
1779 dentry->d_flags |= DCACHE_OP_PRUNE;
1780 if (op->d_real)
1781 dentry->d_flags |= DCACHE_OP_REAL;
1782
1783}
1784EXPORT_SYMBOL(d_set_d_op);
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794void d_set_fallthru(struct dentry *dentry)
1795{
1796 spin_lock(&dentry->d_lock);
1797 dentry->d_flags |= DCACHE_FALLTHRU;
1798 spin_unlock(&dentry->d_lock);
1799}
1800EXPORT_SYMBOL(d_set_fallthru);
1801
1802static unsigned d_flags_for_inode(struct inode *inode)
1803{
1804 unsigned add_flags = DCACHE_REGULAR_TYPE;
1805
1806 if (!inode)
1807 return DCACHE_MISS_TYPE;
1808
1809 if (S_ISDIR(inode->i_mode)) {
1810 add_flags = DCACHE_DIRECTORY_TYPE;
1811 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1812 if (unlikely(!inode->i_op->lookup))
1813 add_flags = DCACHE_AUTODIR_TYPE;
1814 else
1815 inode->i_opflags |= IOP_LOOKUP;
1816 }
1817 goto type_determined;
1818 }
1819
1820 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1821 if (unlikely(inode->i_op->get_link)) {
1822 add_flags = DCACHE_SYMLINK_TYPE;
1823 goto type_determined;
1824 }
1825 inode->i_opflags |= IOP_NOFOLLOW;
1826 }
1827
1828 if (unlikely(!S_ISREG(inode->i_mode)))
1829 add_flags = DCACHE_SPECIAL_TYPE;
1830
1831type_determined:
1832 if (unlikely(IS_AUTOMOUNT(inode)))
1833 add_flags |= DCACHE_NEED_AUTOMOUNT;
1834 return add_flags;
1835}
1836
1837static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1838{
1839 unsigned add_flags = d_flags_for_inode(inode);
1840 WARN_ON(d_in_lookup(dentry));
1841
1842 spin_lock(&dentry->d_lock);
1843 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1844 raw_write_seqcount_begin(&dentry->d_seq);
1845 __d_set_inode_and_type(dentry, inode, add_flags);
1846 raw_write_seqcount_end(&dentry->d_seq);
1847 fsnotify_update_flags(dentry);
1848 spin_unlock(&dentry->d_lock);
1849}
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866void d_instantiate(struct dentry *entry, struct inode * inode)
1867{
1868 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1869 if (inode) {
1870 security_d_instantiate(entry, inode);
1871 spin_lock(&inode->i_lock);
1872 __d_instantiate(entry, inode);
1873 spin_unlock(&inode->i_lock);
1874 }
1875}
1876EXPORT_SYMBOL(d_instantiate);
1877
1878
1879
1880
1881
1882
1883
1884void d_instantiate_new(struct dentry *entry, struct inode *inode)
1885{
1886 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1887 BUG_ON(!inode);
1888 lockdep_annotate_inode_mutex_key(inode);
1889 security_d_instantiate(entry, inode);
1890 spin_lock(&inode->i_lock);
1891 __d_instantiate(entry, inode);
1892 WARN_ON(!(inode->i_state & I_NEW));
1893 inode->i_state &= ~I_NEW & ~I_CREATING;
1894 smp_mb();
1895 wake_up_bit(&inode->i_state, __I_NEW);
1896 spin_unlock(&inode->i_lock);
1897}
1898EXPORT_SYMBOL(d_instantiate_new);
1899
1900struct dentry *d_make_root(struct inode *root_inode)
1901{
1902 struct dentry *res = NULL;
1903
1904 if (root_inode) {
1905 res = d_alloc_anon(root_inode->i_sb);
1906 if (res) {
1907 res->d_flags |= DCACHE_RCUACCESS;
1908 d_instantiate(res, root_inode);
1909 } else {
1910 iput(root_inode);
1911 }
1912 }
1913 return res;
1914}
1915EXPORT_SYMBOL(d_make_root);
1916
1917static struct dentry *__d_instantiate_anon(struct dentry *dentry,
1918 struct inode *inode,
1919 bool disconnected)
1920{
1921 struct dentry *res;
1922 unsigned add_flags;
1923
1924 security_d_instantiate(dentry, inode);
1925 spin_lock(&inode->i_lock);
1926 res = __d_find_any_alias(inode);
1927 if (res) {
1928 spin_unlock(&inode->i_lock);
1929 dput(dentry);
1930 goto out_iput;
1931 }
1932
1933
1934 add_flags = d_flags_for_inode(inode);
1935
1936 if (disconnected)
1937 add_flags |= DCACHE_DISCONNECTED;
1938
1939 spin_lock(&dentry->d_lock);
1940 __d_set_inode_and_type(dentry, inode, add_flags);
1941 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1942 if (!disconnected) {
1943 hlist_bl_lock(&dentry->d_sb->s_roots);
1944 hlist_bl_add_head(&dentry->d_hash, &dentry->d_sb->s_roots);
1945 hlist_bl_unlock(&dentry->d_sb->s_roots);
1946 }
1947 spin_unlock(&dentry->d_lock);
1948 spin_unlock(&inode->i_lock);
1949
1950 return dentry;
1951
1952 out_iput:
1953 iput(inode);
1954 return res;
1955}
1956
1957struct dentry *d_instantiate_anon(struct dentry *dentry, struct inode *inode)
1958{
1959 return __d_instantiate_anon(dentry, inode, true);
1960}
1961EXPORT_SYMBOL(d_instantiate_anon);
1962
1963static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
1964{
1965 struct dentry *tmp;
1966 struct dentry *res;
1967
1968 if (!inode)
1969 return ERR_PTR(-ESTALE);
1970 if (IS_ERR(inode))
1971 return ERR_CAST(inode);
1972
1973 res = d_find_any_alias(inode);
1974 if (res)
1975 goto out_iput;
1976
1977 tmp = d_alloc_anon(inode->i_sb);
1978 if (!tmp) {
1979 res = ERR_PTR(-ENOMEM);
1980 goto out_iput;
1981 }
1982
1983 return __d_instantiate_anon(tmp, inode, disconnected);
1984
1985out_iput:
1986 iput(inode);
1987 return res;
1988}
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008struct dentry *d_obtain_alias(struct inode *inode)
2009{
2010 return __d_obtain_alias(inode, true);
2011}
2012EXPORT_SYMBOL(d_obtain_alias);
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029struct dentry *d_obtain_root(struct inode *inode)
2030{
2031 return __d_obtain_alias(inode, false);
2032}
2033EXPORT_SYMBOL(d_obtain_root);
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2052 struct qstr *name)
2053{
2054 struct dentry *found, *res;
2055
2056
2057
2058
2059
2060 found = d_hash_and_lookup(dentry->d_parent, name);
2061 if (found) {
2062 iput(inode);
2063 return found;
2064 }
2065 if (d_in_lookup(dentry)) {
2066 found = d_alloc_parallel(dentry->d_parent, name,
2067 dentry->d_wait);
2068 if (IS_ERR(found) || !d_in_lookup(found)) {
2069 iput(inode);
2070 return found;
2071 }
2072 } else {
2073 found = d_alloc(dentry->d_parent, name);
2074 if (!found) {
2075 iput(inode);
2076 return ERR_PTR(-ENOMEM);
2077 }
2078 }
2079 res = d_splice_alias(inode, found);
2080 if (res) {
2081 dput(found);
2082 return res;
2083 }
2084 return found;
2085}
2086EXPORT_SYMBOL(d_add_ci);
2087
2088
2089static inline bool d_same_name(const struct dentry *dentry,
2090 const struct dentry *parent,
2091 const struct qstr *name)
2092{
2093 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2094 if (dentry->d_name.len != name->len)
2095 return false;
2096 return dentry_cmp(dentry, name->name, name->len) == 0;
2097 }
2098 return parent->d_op->d_compare(dentry,
2099 dentry->d_name.len, dentry->d_name.name,
2100 name) == 0;
2101}
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132struct dentry *__d_lookup_rcu(const struct dentry *parent,
2133 const struct qstr *name,
2134 unsigned *seqp)
2135{
2136 u64 hashlen = name->hash_len;
2137 const unsigned char *str = name->name;
2138 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2139 struct hlist_bl_node *node;
2140 struct dentry *dentry;
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2163 unsigned seq;
2164
2165seqretry:
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183 seq = raw_seqcount_begin(&dentry->d_seq);
2184 if (dentry->d_parent != parent)
2185 continue;
2186 if (d_unhashed(dentry))
2187 continue;
2188
2189 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2190 int tlen;
2191 const char *tname;
2192 if (dentry->d_name.hash != hashlen_hash(hashlen))
2193 continue;
2194 tlen = dentry->d_name.len;
2195 tname = dentry->d_name.name;
2196
2197 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2198 cpu_relax();
2199 goto seqretry;
2200 }
2201 if (parent->d_op->d_compare(dentry,
2202 tlen, tname, name) != 0)
2203 continue;
2204 } else {
2205 if (dentry->d_name.hash_len != hashlen)
2206 continue;
2207 if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2208 continue;
2209 }
2210 *seqp = seq;
2211 return dentry;
2212 }
2213 return NULL;
2214}
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2228{
2229 struct dentry *dentry;
2230 unsigned seq;
2231
2232 do {
2233 seq = read_seqbegin(&rename_lock);
2234 dentry = __d_lookup(parent, name);
2235 if (dentry)
2236 break;
2237 } while (read_seqretry(&rename_lock, seq));
2238 return dentry;
2239}
2240EXPORT_SYMBOL(d_lookup);
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2258{
2259 unsigned int hash = name->hash;
2260 struct hlist_bl_head *b = d_hash(hash);
2261 struct hlist_bl_node *node;
2262 struct dentry *found = NULL;
2263 struct dentry *dentry;
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285 rcu_read_lock();
2286
2287 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2288
2289 if (dentry->d_name.hash != hash)
2290 continue;
2291
2292 spin_lock(&dentry->d_lock);
2293 if (dentry->d_parent != parent)
2294 goto next;
2295 if (d_unhashed(dentry))
2296 goto next;
2297
2298 if (!d_same_name(dentry, parent, name))
2299 goto next;
2300
2301 dentry->d_lockref.count++;
2302 found = dentry;
2303 spin_unlock(&dentry->d_lock);
2304 break;
2305next:
2306 spin_unlock(&dentry->d_lock);
2307 }
2308 rcu_read_unlock();
2309
2310 return found;
2311}
2312
2313
2314
2315
2316
2317
2318
2319
2320struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2321{
2322
2323
2324
2325
2326
2327 name->hash = full_name_hash(dir, name->name, name->len);
2328 if (dir->d_flags & DCACHE_OP_HASH) {
2329 int err = dir->d_op->d_hash(dir, name);
2330 if (unlikely(err < 0))
2331 return ERR_PTR(err);
2332 }
2333 return d_lookup(dir, name);
2334}
2335EXPORT_SYMBOL(d_hash_and_lookup);
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358void d_delete(struct dentry * dentry)
2359{
2360 struct inode *inode = dentry->d_inode;
2361 int isdir = d_is_dir(dentry);
2362
2363 spin_lock(&inode->i_lock);
2364 spin_lock(&dentry->d_lock);
2365
2366
2367
2368 if (dentry->d_lockref.count == 1) {
2369 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2370 dentry_unlink_inode(dentry);
2371 } else {
2372 __d_drop(dentry);
2373 spin_unlock(&dentry->d_lock);
2374 spin_unlock(&inode->i_lock);
2375 }
2376 fsnotify_nameremove(dentry, isdir);
2377}
2378EXPORT_SYMBOL(d_delete);
2379
2380static void __d_rehash(struct dentry *entry)
2381{
2382 struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2383
2384 hlist_bl_lock(b);
2385 hlist_bl_add_head_rcu(&entry->d_hash, b);
2386 hlist_bl_unlock(b);
2387}
2388
2389
2390
2391
2392
2393
2394
2395
2396void d_rehash(struct dentry * entry)
2397{
2398 spin_lock(&entry->d_lock);
2399 __d_rehash(entry);
2400 spin_unlock(&entry->d_lock);
2401}
2402EXPORT_SYMBOL(d_rehash);
2403
2404static inline unsigned start_dir_add(struct inode *dir)
2405{
2406
2407 for (;;) {
2408 unsigned n = dir->i_dir_seq;
2409 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2410 return n;
2411 cpu_relax();
2412 }
2413}
2414
2415static inline void end_dir_add(struct inode *dir, unsigned n)
2416{
2417 smp_store_release(&dir->i_dir_seq, n + 2);
2418}
2419
2420static void d_wait_lookup(struct dentry *dentry)
2421{
2422 if (d_in_lookup(dentry)) {
2423 DECLARE_WAITQUEUE(wait, current);
2424 add_wait_queue(dentry->d_wait, &wait);
2425 do {
2426 set_current_state(TASK_UNINTERRUPTIBLE);
2427 spin_unlock(&dentry->d_lock);
2428 schedule();
2429 spin_lock(&dentry->d_lock);
2430 } while (d_in_lookup(dentry));
2431 }
2432}
2433
2434struct dentry *d_alloc_parallel(struct dentry *parent,
2435 const struct qstr *name,
2436 wait_queue_head_t *wq)
2437{
2438 unsigned int hash = name->hash;
2439 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2440 struct hlist_bl_node *node;
2441 struct dentry *new = d_alloc(parent, name);
2442 struct dentry *dentry;
2443 unsigned seq, r_seq, d_seq;
2444
2445 if (unlikely(!new))
2446 return ERR_PTR(-ENOMEM);
2447
2448retry:
2449 rcu_read_lock();
2450 seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2451 r_seq = read_seqbegin(&rename_lock);
2452 dentry = __d_lookup_rcu(parent, name, &d_seq);
2453 if (unlikely(dentry)) {
2454 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2455 rcu_read_unlock();
2456 goto retry;
2457 }
2458 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2459 rcu_read_unlock();
2460 dput(dentry);
2461 goto retry;
2462 }
2463 rcu_read_unlock();
2464 dput(new);
2465 return dentry;
2466 }
2467 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2468 rcu_read_unlock();
2469 goto retry;
2470 }
2471
2472 if (unlikely(seq & 1)) {
2473 rcu_read_unlock();
2474 goto retry;
2475 }
2476
2477 hlist_bl_lock(b);
2478 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2479 hlist_bl_unlock(b);
2480 rcu_read_unlock();
2481 goto retry;
2482 }
2483
2484
2485
2486
2487
2488
2489
2490 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2491 if (dentry->d_name.hash != hash)
2492 continue;
2493 if (dentry->d_parent != parent)
2494 continue;
2495 if (!d_same_name(dentry, parent, name))
2496 continue;
2497 hlist_bl_unlock(b);
2498
2499 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2500 rcu_read_unlock();
2501 goto retry;
2502 }
2503
2504 rcu_read_unlock();
2505
2506
2507
2508
2509 spin_lock(&dentry->d_lock);
2510 d_wait_lookup(dentry);
2511
2512
2513
2514
2515
2516
2517 if (unlikely(dentry->d_name.hash != hash))
2518 goto mismatch;
2519 if (unlikely(dentry->d_parent != parent))
2520 goto mismatch;
2521 if (unlikely(d_unhashed(dentry)))
2522 goto mismatch;
2523 if (unlikely(!d_same_name(dentry, parent, name)))
2524 goto mismatch;
2525
2526 spin_unlock(&dentry->d_lock);
2527 dput(new);
2528 return dentry;
2529 }
2530 rcu_read_unlock();
2531
2532 new->d_flags |= DCACHE_PAR_LOOKUP;
2533 new->d_wait = wq;
2534 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2535 hlist_bl_unlock(b);
2536 return new;
2537mismatch:
2538 spin_unlock(&dentry->d_lock);
2539 dput(dentry);
2540 goto retry;
2541}
2542EXPORT_SYMBOL(d_alloc_parallel);
2543
2544void __d_lookup_done(struct dentry *dentry)
2545{
2546 struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2547 dentry->d_name.hash);
2548 hlist_bl_lock(b);
2549 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2550 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2551 wake_up_all(dentry->d_wait);
2552 dentry->d_wait = NULL;
2553 hlist_bl_unlock(b);
2554 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2555 INIT_LIST_HEAD(&dentry->d_lru);
2556}
2557EXPORT_SYMBOL(__d_lookup_done);
2558
2559
2560
2561static inline void __d_add(struct dentry *dentry, struct inode *inode)
2562{
2563 struct inode *dir = NULL;
2564 unsigned n;
2565 spin_lock(&dentry->d_lock);
2566 if (unlikely(d_in_lookup(dentry))) {
2567 dir = dentry->d_parent->d_inode;
2568 n = start_dir_add(dir);
2569 __d_lookup_done(dentry);
2570 }
2571 if (inode) {
2572 unsigned add_flags = d_flags_for_inode(inode);
2573 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2574 raw_write_seqcount_begin(&dentry->d_seq);
2575 __d_set_inode_and_type(dentry, inode, add_flags);
2576 raw_write_seqcount_end(&dentry->d_seq);
2577 fsnotify_update_flags(dentry);
2578 }
2579 __d_rehash(dentry);
2580 if (dir)
2581 end_dir_add(dir, n);
2582 spin_unlock(&dentry->d_lock);
2583 if (inode)
2584 spin_unlock(&inode->i_lock);
2585}
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596void d_add(struct dentry *entry, struct inode *inode)
2597{
2598 if (inode) {
2599 security_d_instantiate(entry, inode);
2600 spin_lock(&inode->i_lock);
2601 }
2602 __d_add(entry, inode);
2603}
2604EXPORT_SYMBOL(d_add);
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2618{
2619 struct dentry *alias;
2620 unsigned int hash = entry->d_name.hash;
2621
2622 spin_lock(&inode->i_lock);
2623 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2624
2625
2626
2627
2628
2629 if (alias->d_name.hash != hash)
2630 continue;
2631 if (alias->d_parent != entry->d_parent)
2632 continue;
2633 if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2634 continue;
2635 spin_lock(&alias->d_lock);
2636 if (!d_unhashed(alias)) {
2637 spin_unlock(&alias->d_lock);
2638 alias = NULL;
2639 } else {
2640 __dget_dlock(alias);
2641 __d_rehash(alias);
2642 spin_unlock(&alias->d_lock);
2643 }
2644 spin_unlock(&inode->i_lock);
2645 return alias;
2646 }
2647 spin_unlock(&inode->i_lock);
2648 return NULL;
2649}
2650EXPORT_SYMBOL(d_exact_alias);
2651
2652static void swap_names(struct dentry *dentry, struct dentry *target)
2653{
2654 if (unlikely(dname_external(target))) {
2655 if (unlikely(dname_external(dentry))) {
2656
2657
2658
2659 swap(target->d_name.name, dentry->d_name.name);
2660 } else {
2661
2662
2663
2664
2665 memcpy(target->d_iname, dentry->d_name.name,
2666 dentry->d_name.len + 1);
2667 dentry->d_name.name = target->d_name.name;
2668 target->d_name.name = target->d_iname;
2669 }
2670 } else {
2671 if (unlikely(dname_external(dentry))) {
2672
2673
2674
2675
2676 memcpy(dentry->d_iname, target->d_name.name,
2677 target->d_name.len + 1);
2678 target->d_name.name = dentry->d_name.name;
2679 dentry->d_name.name = dentry->d_iname;
2680 } else {
2681
2682
2683
2684 unsigned int i;
2685 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2686 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2687 swap(((long *) &dentry->d_iname)[i],
2688 ((long *) &target->d_iname)[i]);
2689 }
2690 }
2691 }
2692 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2693}
2694
2695static void copy_name(struct dentry *dentry, struct dentry *target)
2696{
2697 struct external_name *old_name = NULL;
2698 if (unlikely(dname_external(dentry)))
2699 old_name = external_name(dentry);
2700 if (unlikely(dname_external(target))) {
2701 atomic_inc(&external_name(target)->u.count);
2702 dentry->d_name = target->d_name;
2703 } else {
2704 memcpy(dentry->d_iname, target->d_name.name,
2705 target->d_name.len + 1);
2706 dentry->d_name.name = dentry->d_iname;
2707 dentry->d_name.hash_len = target->d_name.hash_len;
2708 }
2709 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2710 call_rcu(&old_name->u.head, __d_free_external_name);
2711}
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724static void __d_move(struct dentry *dentry, struct dentry *target,
2725 bool exchange)
2726{
2727 struct dentry *old_parent, *p;
2728 struct inode *dir = NULL;
2729 unsigned n;
2730
2731 WARN_ON(!dentry->d_inode);
2732 if (WARN_ON(dentry == target))
2733 return;
2734
2735 BUG_ON(d_ancestor(target, dentry));
2736 old_parent = dentry->d_parent;
2737 p = d_ancestor(old_parent, target);
2738 if (IS_ROOT(dentry)) {
2739 BUG_ON(p);
2740 spin_lock(&target->d_parent->d_lock);
2741 } else if (!p) {
2742
2743 spin_lock(&target->d_parent->d_lock);
2744 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2745 } else {
2746 BUG_ON(p == dentry);
2747 spin_lock(&old_parent->d_lock);
2748 if (p != target)
2749 spin_lock_nested(&target->d_parent->d_lock,
2750 DENTRY_D_LOCK_NESTED);
2751 }
2752 spin_lock_nested(&dentry->d_lock, 2);
2753 spin_lock_nested(&target->d_lock, 3);
2754
2755 if (unlikely(d_in_lookup(target))) {
2756 dir = target->d_parent->d_inode;
2757 n = start_dir_add(dir);
2758 __d_lookup_done(target);
2759 }
2760
2761 write_seqcount_begin(&dentry->d_seq);
2762 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2763
2764
2765 if (!d_unhashed(dentry))
2766 ___d_drop(dentry);
2767 if (!d_unhashed(target))
2768 ___d_drop(target);
2769
2770
2771 dentry->d_parent = target->d_parent;
2772 if (!exchange) {
2773 copy_name(dentry, target);
2774 target->d_hash.pprev = NULL;
2775 dentry->d_parent->d_lockref.count++;
2776 if (dentry == old_parent)
2777 dentry->d_flags |= DCACHE_RCUACCESS;
2778 else
2779 WARN_ON(!--old_parent->d_lockref.count);
2780 } else {
2781 target->d_parent = old_parent;
2782 swap_names(dentry, target);
2783 list_move(&target->d_child, &target->d_parent->d_subdirs);
2784 __d_rehash(target);
2785 fsnotify_update_flags(target);
2786 }
2787 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2788 __d_rehash(dentry);
2789 fsnotify_update_flags(dentry);
2790
2791 write_seqcount_end(&target->d_seq);
2792 write_seqcount_end(&dentry->d_seq);
2793
2794 if (dir)
2795 end_dir_add(dir, n);
2796
2797 if (dentry->d_parent != old_parent)
2798 spin_unlock(&dentry->d_parent->d_lock);
2799 if (dentry != old_parent)
2800 spin_unlock(&old_parent->d_lock);
2801 spin_unlock(&target->d_lock);
2802 spin_unlock(&dentry->d_lock);
2803}
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814void d_move(struct dentry *dentry, struct dentry *target)
2815{
2816 write_seqlock(&rename_lock);
2817 __d_move(dentry, target, false);
2818 write_sequnlock(&rename_lock);
2819}
2820EXPORT_SYMBOL(d_move);
2821
2822
2823
2824
2825
2826
2827void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2828{
2829 write_seqlock(&rename_lock);
2830
2831 WARN_ON(!dentry1->d_inode);
2832 WARN_ON(!dentry2->d_inode);
2833 WARN_ON(IS_ROOT(dentry1));
2834 WARN_ON(IS_ROOT(dentry2));
2835
2836 __d_move(dentry1, dentry2, true);
2837
2838 write_sequnlock(&rename_lock);
2839}
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2850{
2851 struct dentry *p;
2852
2853 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2854 if (p->d_parent == p1)
2855 return p;
2856 }
2857 return NULL;
2858}
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869static int __d_unalias(struct inode *inode,
2870 struct dentry *dentry, struct dentry *alias)
2871{
2872 struct mutex *m1 = NULL;
2873 struct rw_semaphore *m2 = NULL;
2874 int ret = -ESTALE;
2875
2876
2877 if (alias->d_parent == dentry->d_parent)
2878 goto out_unalias;
2879
2880
2881 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2882 goto out_err;
2883 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2884 if (!inode_trylock_shared(alias->d_parent->d_inode))
2885 goto out_err;
2886 m2 = &alias->d_parent->d_inode->i_rwsem;
2887out_unalias:
2888 __d_move(alias, dentry, false);
2889 ret = 0;
2890out_err:
2891 if (m2)
2892 up_read(m2);
2893 if (m1)
2894 mutex_unlock(m1);
2895 return ret;
2896}
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2922{
2923 if (IS_ERR(inode))
2924 return ERR_CAST(inode);
2925
2926 BUG_ON(!d_unhashed(dentry));
2927
2928 if (!inode)
2929 goto out;
2930
2931 security_d_instantiate(dentry, inode);
2932 spin_lock(&inode->i_lock);
2933 if (S_ISDIR(inode->i_mode)) {
2934 struct dentry *new = __d_find_any_alias(inode);
2935 if (unlikely(new)) {
2936
2937 spin_unlock(&inode->i_lock);
2938 write_seqlock(&rename_lock);
2939 if (unlikely(d_ancestor(new, dentry))) {
2940 write_sequnlock(&rename_lock);
2941 dput(new);
2942 new = ERR_PTR(-ELOOP);
2943 pr_warn_ratelimited(
2944 "VFS: Lookup of '%s' in %s %s"
2945 " would have caused loop\n",
2946 dentry->d_name.name,
2947 inode->i_sb->s_type->name,
2948 inode->i_sb->s_id);
2949 } else if (!IS_ROOT(new)) {
2950 struct dentry *old_parent = dget(new->d_parent);
2951 int err = __d_unalias(inode, dentry, new);
2952 write_sequnlock(&rename_lock);
2953 if (err) {
2954 dput(new);
2955 new = ERR_PTR(err);
2956 }
2957 dput(old_parent);
2958 } else {
2959 __d_move(new, dentry, false);
2960 write_sequnlock(&rename_lock);
2961 }
2962 iput(inode);
2963 return new;
2964 }
2965 }
2966out:
2967 __d_add(dentry, inode);
2968 return NULL;
2969}
2970EXPORT_SYMBOL(d_splice_alias);
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2989{
2990 bool result;
2991 unsigned seq;
2992
2993 if (new_dentry == old_dentry)
2994 return true;
2995
2996 do {
2997
2998 seq = read_seqbegin(&rename_lock);
2999
3000
3001
3002
3003 rcu_read_lock();
3004 if (d_ancestor(old_dentry, new_dentry))
3005 result = true;
3006 else
3007 result = false;
3008 rcu_read_unlock();
3009 } while (read_seqretry(&rename_lock, seq));
3010
3011 return result;
3012}
3013EXPORT_SYMBOL(is_subdir);
3014
3015static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3016{
3017 struct dentry *root = data;
3018 if (dentry != root) {
3019 if (d_unhashed(dentry) || !dentry->d_inode)
3020 return D_WALK_SKIP;
3021
3022 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3023 dentry->d_flags |= DCACHE_GENOCIDE;
3024 dentry->d_lockref.count--;
3025 }
3026 }
3027 return D_WALK_CONTINUE;
3028}
3029
3030void d_genocide(struct dentry *parent)
3031{
3032 d_walk(parent, parent, d_genocide_kill);
3033}
3034
3035EXPORT_SYMBOL(d_genocide);
3036
3037void d_tmpfile(struct dentry *dentry, struct inode *inode)
3038{
3039 inode_dec_link_count(inode);
3040 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3041 !hlist_unhashed(&dentry->d_u.d_alias) ||
3042 !d_unlinked(dentry));
3043 spin_lock(&dentry->d_parent->d_lock);
3044 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3045 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3046 (unsigned long long)inode->i_ino);
3047 spin_unlock(&dentry->d_lock);
3048 spin_unlock(&dentry->d_parent->d_lock);
3049 d_instantiate(dentry, inode);
3050}
3051EXPORT_SYMBOL(d_tmpfile);
3052
3053static __initdata unsigned long dhash_entries;
3054static int __init set_dhash_entries(char *str)
3055{
3056 if (!str)
3057 return 0;
3058 dhash_entries = simple_strtoul(str, &str, 0);
3059 return 1;
3060}
3061__setup("dhash_entries=", set_dhash_entries);
3062
3063static void __init dcache_init_early(void)
3064{
3065
3066
3067
3068 if (hashdist)
3069 return;
3070
3071 dentry_hashtable =
3072 alloc_large_system_hash("Dentry cache",
3073 sizeof(struct hlist_bl_head),
3074 dhash_entries,
3075 13,
3076 HASH_EARLY | HASH_ZERO,
3077 &d_hash_shift,
3078 NULL,
3079 0,
3080 0);
3081 d_hash_shift = 32 - d_hash_shift;
3082}
3083
3084static void __init dcache_init(void)
3085{
3086
3087
3088
3089
3090
3091 dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3092 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
3093 d_iname);
3094
3095
3096 if (!hashdist)
3097 return;
3098
3099 dentry_hashtable =
3100 alloc_large_system_hash("Dentry cache",
3101 sizeof(struct hlist_bl_head),
3102 dhash_entries,
3103 13,
3104 HASH_ZERO,
3105 &d_hash_shift,
3106 NULL,
3107 0,
3108 0);
3109 d_hash_shift = 32 - d_hash_shift;
3110}
3111
3112
3113struct kmem_cache *names_cachep __read_mostly;
3114EXPORT_SYMBOL(names_cachep);
3115
3116void __init vfs_caches_init_early(void)
3117{
3118 int i;
3119
3120 for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3121 INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3122
3123 dcache_init_early();
3124 inode_init_early();
3125}
3126
3127void __init vfs_caches_init(void)
3128{
3129 names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3130 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3131
3132 dcache_init();
3133 inode_init();
3134 files_init();
3135 files_maxfiles_init();
3136 mnt_init();
3137 bdev_cache_init();
3138 chrdev_init();
3139}
3140