1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/syscalls.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/fsnotify.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/hash.h>
25#include <linux/cache.h>
26#include <linux/export.h>
27#include <linux/mount.h>
28#include <linux/file.h>
29#include <asm/uaccess.h>
30#include <linux/security.h>
31#include <linux/seqlock.h>
32#include <linux/swap.h>
33#include <linux/bootmem.h>
34#include <linux/fs_struct.h>
35#include <linux/hardirq.h>
36#include <linux/bit_spinlock.h>
37#include <linux/rculist_bl.h>
38#include <linux/prefetch.h>
39#include <linux/ratelimit.h>
40#include <linux/list_lru.h>
41#include <linux/kasan.h>
42
43#include "internal.h"
44#include "mount.h"
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84int sysctl_vfs_cache_pressure __read_mostly = 100;
85EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
86
87__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
88
89EXPORT_SYMBOL(rename_lock);
90
91static struct kmem_cache *dentry_cache __read_mostly;
92
93
94
95
96
97
98
99
100
101
102static unsigned int d_hash_mask __read_mostly;
103static unsigned int d_hash_shift __read_mostly;
104
105static struct hlist_bl_head *dentry_hashtable __read_mostly;
106
107static inline struct hlist_bl_head *d_hash(unsigned int hash)
108{
109 return dentry_hashtable + (hash >> (32 - d_hash_shift));
110}
111
112#define IN_LOOKUP_SHIFT 10
113static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
114
115static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
116 unsigned int hash)
117{
118 hash += (unsigned long) parent / L1_CACHE_BYTES;
119 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
120}
121
122
123
124struct dentry_stat_t dentry_stat = {
125 .age_limit = 45,
126};
127
128static DEFINE_PER_CPU(long, nr_dentry);
129static DEFINE_PER_CPU(long, nr_dentry_unused);
130
131#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
132
133
134
135
136
137
138
139
140
141
142
143
144
145static long get_nr_dentry(void)
146{
147 int i;
148 long sum = 0;
149 for_each_possible_cpu(i)
150 sum += per_cpu(nr_dentry, i);
151 return sum < 0 ? 0 : sum;
152}
153
154static long get_nr_dentry_unused(void)
155{
156 int i;
157 long sum = 0;
158 for_each_possible_cpu(i)
159 sum += per_cpu(nr_dentry_unused, i);
160 return sum < 0 ? 0 : sum;
161}
162
163int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
164 size_t *lenp, loff_t *ppos)
165{
166 dentry_stat.nr_dentry = get_nr_dentry();
167 dentry_stat.nr_unused = get_nr_dentry_unused();
168 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
169}
170#endif
171
172
173
174
175
176#ifdef CONFIG_DCACHE_WORD_ACCESS
177
178#include <asm/word-at-a-time.h>
179
180
181
182
183
184
185
186
187
188static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
189{
190 unsigned long a,b,mask;
191
192 for (;;) {
193 a = *(unsigned long *)cs;
194 b = load_unaligned_zeropad(ct);
195 if (tcount < sizeof(unsigned long))
196 break;
197 if (unlikely(a != b))
198 return 1;
199 cs += sizeof(unsigned long);
200 ct += sizeof(unsigned long);
201 tcount -= sizeof(unsigned long);
202 if (!tcount)
203 return 0;
204 }
205 mask = bytemask_from_count(tcount);
206 return unlikely(!!((a ^ b) & mask));
207}
208
209#else
210
211static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
212{
213 do {
214 if (*cs != *ct)
215 return 1;
216 cs++;
217 ct++;
218 tcount--;
219 } while (tcount);
220 return 0;
221}
222
223#endif
224
225static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
226{
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243 const unsigned char *cs = lockless_dereference(dentry->d_name.name);
244
245 return dentry_string_cmp(cs, ct, tcount);
246}
247
248struct external_name {
249 union {
250 atomic_t count;
251 struct rcu_head head;
252 } u;
253 unsigned char name[];
254};
255
256static inline struct external_name *external_name(struct dentry *dentry)
257{
258 return container_of(dentry->d_name.name, struct external_name, name[0]);
259}
260
261static void __d_free(struct rcu_head *head)
262{
263 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
264
265 kmem_cache_free(dentry_cache, dentry);
266}
267
268static void __d_free_external(struct rcu_head *head)
269{
270 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
271 kfree(external_name(dentry));
272 kmem_cache_free(dentry_cache, dentry);
273}
274
275static inline int dname_external(const struct dentry *dentry)
276{
277 return dentry->d_name.name != dentry->d_iname;
278}
279
280static inline void __d_set_inode_and_type(struct dentry *dentry,
281 struct inode *inode,
282 unsigned type_flags)
283{
284 unsigned flags;
285
286 dentry->d_inode = inode;
287 flags = READ_ONCE(dentry->d_flags);
288 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
289 flags |= type_flags;
290 WRITE_ONCE(dentry->d_flags, flags);
291}
292
293static inline void __d_clear_type_and_inode(struct dentry *dentry)
294{
295 unsigned flags = READ_ONCE(dentry->d_flags);
296
297 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
298 WRITE_ONCE(dentry->d_flags, flags);
299 dentry->d_inode = NULL;
300}
301
302static void dentry_free(struct dentry *dentry)
303{
304 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
305 if (unlikely(dname_external(dentry))) {
306 struct external_name *p = external_name(dentry);
307 if (likely(atomic_dec_and_test(&p->u.count))) {
308 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
309 return;
310 }
311 }
312
313 if (!(dentry->d_flags & DCACHE_RCUACCESS))
314 __d_free(&dentry->d_u.d_rcu);
315 else
316 call_rcu(&dentry->d_u.d_rcu, __d_free);
317}
318
319
320
321
322
323static void dentry_unlink_inode(struct dentry * dentry)
324 __releases(dentry->d_lock)
325 __releases(dentry->d_inode->i_lock)
326{
327 struct inode *inode = dentry->d_inode;
328 bool hashed = !d_unhashed(dentry);
329
330 if (hashed)
331 raw_write_seqcount_begin(&dentry->d_seq);
332 __d_clear_type_and_inode(dentry);
333 hlist_del_init(&dentry->d_u.d_alias);
334 if (hashed)
335 raw_write_seqcount_end(&dentry->d_seq);
336 spin_unlock(&dentry->d_lock);
337 spin_unlock(&inode->i_lock);
338 if (!inode->i_nlink)
339 fsnotify_inoderemove(inode);
340 if (dentry->d_op && dentry->d_op->d_iput)
341 dentry->d_op->d_iput(dentry, inode);
342 else
343 iput(inode);
344}
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
361static void d_lru_add(struct dentry *dentry)
362{
363 D_FLAG_VERIFY(dentry, 0);
364 dentry->d_flags |= DCACHE_LRU_LIST;
365 this_cpu_inc(nr_dentry_unused);
366 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
367}
368
369static void d_lru_del(struct dentry *dentry)
370{
371 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
372 dentry->d_flags &= ~DCACHE_LRU_LIST;
373 this_cpu_dec(nr_dentry_unused);
374 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
375}
376
377static void d_shrink_del(struct dentry *dentry)
378{
379 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
380 list_del_init(&dentry->d_lru);
381 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
382 this_cpu_dec(nr_dentry_unused);
383}
384
385static void d_shrink_add(struct dentry *dentry, struct list_head *list)
386{
387 D_FLAG_VERIFY(dentry, 0);
388 list_add(&dentry->d_lru, list);
389 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
390 this_cpu_inc(nr_dentry_unused);
391}
392
393
394
395
396
397
398
399static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
400{
401 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
402 dentry->d_flags &= ~DCACHE_LRU_LIST;
403 this_cpu_dec(nr_dentry_unused);
404 list_lru_isolate(lru, &dentry->d_lru);
405}
406
407static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
408 struct list_head *list)
409{
410 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
411 dentry->d_flags |= DCACHE_SHRINK_LIST;
412 list_lru_isolate_move(lru, &dentry->d_lru, list);
413}
414
415
416
417
418static void dentry_lru_add(struct dentry *dentry)
419{
420 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
421 d_lru_add(dentry);
422}
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439void __d_drop(struct dentry *dentry)
440{
441 if (!d_unhashed(dentry)) {
442 struct hlist_bl_head *b;
443
444
445
446
447
448 if (unlikely(IS_ROOT(dentry)))
449 b = &dentry->d_sb->s_anon;
450 else
451 b = d_hash(dentry->d_name.hash);
452
453 hlist_bl_lock(b);
454 __hlist_bl_del(&dentry->d_hash);
455 dentry->d_hash.pprev = NULL;
456 hlist_bl_unlock(b);
457
458 write_seqcount_invalidate(&dentry->d_seq);
459 }
460}
461EXPORT_SYMBOL(__d_drop);
462
463void d_drop(struct dentry *dentry)
464{
465 spin_lock(&dentry->d_lock);
466 __d_drop(dentry);
467 spin_unlock(&dentry->d_lock);
468}
469EXPORT_SYMBOL(d_drop);
470
471static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
472{
473 struct dentry *next;
474
475
476
477
478 dentry->d_flags |= DCACHE_DENTRY_KILLED;
479 if (unlikely(list_empty(&dentry->d_child)))
480 return;
481 __list_del_entry(&dentry->d_child);
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501 while (dentry->d_child.next != &parent->d_subdirs) {
502 next = list_entry(dentry->d_child.next, struct dentry, d_child);
503 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
504 break;
505 dentry->d_child.next = next->d_child.next;
506 }
507}
508
509static void __dentry_kill(struct dentry *dentry)
510{
511 struct dentry *parent = NULL;
512 bool can_free = true;
513 if (!IS_ROOT(dentry))
514 parent = dentry->d_parent;
515
516
517
518
519 lockref_mark_dead(&dentry->d_lockref);
520
521
522
523
524
525 if (dentry->d_flags & DCACHE_OP_PRUNE)
526 dentry->d_op->d_prune(dentry);
527
528 if (dentry->d_flags & DCACHE_LRU_LIST) {
529 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
530 d_lru_del(dentry);
531 }
532
533 __d_drop(dentry);
534 dentry_unlist(dentry, parent);
535 if (parent)
536 spin_unlock(&parent->d_lock);
537 if (dentry->d_inode)
538 dentry_unlink_inode(dentry);
539 else
540 spin_unlock(&dentry->d_lock);
541 this_cpu_dec(nr_dentry);
542 if (dentry->d_op && dentry->d_op->d_release)
543 dentry->d_op->d_release(dentry);
544
545 spin_lock(&dentry->d_lock);
546 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
547 dentry->d_flags |= DCACHE_MAY_FREE;
548 can_free = false;
549 }
550 spin_unlock(&dentry->d_lock);
551 if (likely(can_free))
552 dentry_free(dentry);
553}
554
555
556
557
558
559
560
561static struct dentry *dentry_kill(struct dentry *dentry)
562 __releases(dentry->d_lock)
563{
564 struct inode *inode = dentry->d_inode;
565 struct dentry *parent = NULL;
566
567 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
568 goto failed;
569
570 if (!IS_ROOT(dentry)) {
571 parent = dentry->d_parent;
572 if (unlikely(!spin_trylock(&parent->d_lock))) {
573 if (inode)
574 spin_unlock(&inode->i_lock);
575 goto failed;
576 }
577 }
578
579 __dentry_kill(dentry);
580 return parent;
581
582failed:
583 spin_unlock(&dentry->d_lock);
584 return dentry;
585}
586
587static inline struct dentry *lock_parent(struct dentry *dentry)
588{
589 struct dentry *parent = dentry->d_parent;
590 if (IS_ROOT(dentry))
591 return NULL;
592 if (unlikely(dentry->d_lockref.count < 0))
593 return NULL;
594 if (likely(spin_trylock(&parent->d_lock)))
595 return parent;
596 rcu_read_lock();
597 spin_unlock(&dentry->d_lock);
598again:
599 parent = ACCESS_ONCE(dentry->d_parent);
600 spin_lock(&parent->d_lock);
601
602
603
604
605
606
607
608
609 if (unlikely(parent != dentry->d_parent)) {
610 spin_unlock(&parent->d_lock);
611 goto again;
612 }
613 rcu_read_unlock();
614 if (parent != dentry)
615 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
616 else
617 parent = NULL;
618 return parent;
619}
620
621
622
623
624
625
626
627
628
629static inline bool fast_dput(struct dentry *dentry)
630{
631 int ret;
632 unsigned int d_flags;
633
634
635
636
637
638 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
639 return lockref_put_or_lock(&dentry->d_lockref);
640
641
642
643
644
645 ret = lockref_put_return(&dentry->d_lockref);
646
647
648
649
650
651
652 if (unlikely(ret < 0)) {
653 spin_lock(&dentry->d_lock);
654 if (dentry->d_lockref.count > 1) {
655 dentry->d_lockref.count--;
656 spin_unlock(&dentry->d_lock);
657 return 1;
658 }
659 return 0;
660 }
661
662
663
664
665 if (ret)
666 return 1;
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689 smp_rmb();
690 d_flags = ACCESS_ONCE(dentry->d_flags);
691 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
692
693
694 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
695 return 1;
696
697
698
699
700
701
702 spin_lock(&dentry->d_lock);
703
704
705
706
707
708
709
710 if (dentry->d_lockref.count) {
711 spin_unlock(&dentry->d_lock);
712 return 1;
713 }
714
715
716
717
718
719
720 dentry->d_lockref.count = 1;
721 return 0;
722}
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751void dput(struct dentry *dentry)
752{
753 if (unlikely(!dentry))
754 return;
755
756repeat:
757 might_sleep();
758
759 rcu_read_lock();
760 if (likely(fast_dput(dentry))) {
761 rcu_read_unlock();
762 return;
763 }
764
765
766 rcu_read_unlock();
767
768 WARN_ON(d_in_lookup(dentry));
769
770
771 if (unlikely(d_unhashed(dentry)))
772 goto kill_it;
773
774 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
775 goto kill_it;
776
777 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
778 if (dentry->d_op->d_delete(dentry))
779 goto kill_it;
780 }
781
782 if (!(dentry->d_flags & DCACHE_REFERENCED))
783 dentry->d_flags |= DCACHE_REFERENCED;
784 dentry_lru_add(dentry);
785
786 dentry->d_lockref.count--;
787 spin_unlock(&dentry->d_lock);
788 return;
789
790kill_it:
791 dentry = dentry_kill(dentry);
792 if (dentry) {
793 cond_resched();
794 goto repeat;
795 }
796}
797EXPORT_SYMBOL(dput);
798
799
800
801static inline void __dget_dlock(struct dentry *dentry)
802{
803 dentry->d_lockref.count++;
804}
805
806static inline void __dget(struct dentry *dentry)
807{
808 lockref_get(&dentry->d_lockref);
809}
810
811struct dentry *dget_parent(struct dentry *dentry)
812{
813 int gotref;
814 struct dentry *ret;
815
816
817
818
819
820 rcu_read_lock();
821 ret = ACCESS_ONCE(dentry->d_parent);
822 gotref = lockref_get_not_zero(&ret->d_lockref);
823 rcu_read_unlock();
824 if (likely(gotref)) {
825 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
826 return ret;
827 dput(ret);
828 }
829
830repeat:
831
832
833
834
835 rcu_read_lock();
836 ret = dentry->d_parent;
837 spin_lock(&ret->d_lock);
838 if (unlikely(ret != dentry->d_parent)) {
839 spin_unlock(&ret->d_lock);
840 rcu_read_unlock();
841 goto repeat;
842 }
843 rcu_read_unlock();
844 BUG_ON(!ret->d_lockref.count);
845 ret->d_lockref.count++;
846 spin_unlock(&ret->d_lock);
847 return ret;
848}
849EXPORT_SYMBOL(dget_parent);
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865static struct dentry *__d_find_alias(struct inode *inode)
866{
867 struct dentry *alias, *discon_alias;
868
869again:
870 discon_alias = NULL;
871 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
872 spin_lock(&alias->d_lock);
873 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
874 if (IS_ROOT(alias) &&
875 (alias->d_flags & DCACHE_DISCONNECTED)) {
876 discon_alias = alias;
877 } else {
878 __dget_dlock(alias);
879 spin_unlock(&alias->d_lock);
880 return alias;
881 }
882 }
883 spin_unlock(&alias->d_lock);
884 }
885 if (discon_alias) {
886 alias = discon_alias;
887 spin_lock(&alias->d_lock);
888 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
889 __dget_dlock(alias);
890 spin_unlock(&alias->d_lock);
891 return alias;
892 }
893 spin_unlock(&alias->d_lock);
894 goto again;
895 }
896 return NULL;
897}
898
899struct dentry *d_find_alias(struct inode *inode)
900{
901 struct dentry *de = NULL;
902
903 if (!hlist_empty(&inode->i_dentry)) {
904 spin_lock(&inode->i_lock);
905 de = __d_find_alias(inode);
906 spin_unlock(&inode->i_lock);
907 }
908 return de;
909}
910EXPORT_SYMBOL(d_find_alias);
911
912
913
914
915
916void d_prune_aliases(struct inode *inode)
917{
918 struct dentry *dentry;
919restart:
920 spin_lock(&inode->i_lock);
921 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
922 spin_lock(&dentry->d_lock);
923 if (!dentry->d_lockref.count) {
924 struct dentry *parent = lock_parent(dentry);
925 if (likely(!dentry->d_lockref.count)) {
926 __dentry_kill(dentry);
927 dput(parent);
928 goto restart;
929 }
930 if (parent)
931 spin_unlock(&parent->d_lock);
932 }
933 spin_unlock(&dentry->d_lock);
934 }
935 spin_unlock(&inode->i_lock);
936}
937EXPORT_SYMBOL(d_prune_aliases);
938
939static void shrink_dentry_list(struct list_head *list)
940{
941 struct dentry *dentry, *parent;
942
943 while (!list_empty(list)) {
944 struct inode *inode;
945 dentry = list_entry(list->prev, struct dentry, d_lru);
946 spin_lock(&dentry->d_lock);
947 parent = lock_parent(dentry);
948
949
950
951
952
953
954 d_shrink_del(dentry);
955
956
957
958
959
960 if (dentry->d_lockref.count > 0) {
961 spin_unlock(&dentry->d_lock);
962 if (parent)
963 spin_unlock(&parent->d_lock);
964 continue;
965 }
966
967
968 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
969 bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
970 spin_unlock(&dentry->d_lock);
971 if (parent)
972 spin_unlock(&parent->d_lock);
973 if (can_free)
974 dentry_free(dentry);
975 continue;
976 }
977
978 inode = dentry->d_inode;
979 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
980 d_shrink_add(dentry, list);
981 spin_unlock(&dentry->d_lock);
982 if (parent)
983 spin_unlock(&parent->d_lock);
984 continue;
985 }
986
987 __dentry_kill(dentry);
988
989
990
991
992
993
994
995 dentry = parent;
996 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
997 parent = lock_parent(dentry);
998 if (dentry->d_lockref.count != 1) {
999 dentry->d_lockref.count--;
1000 spin_unlock(&dentry->d_lock);
1001 if (parent)
1002 spin_unlock(&parent->d_lock);
1003 break;
1004 }
1005 inode = dentry->d_inode;
1006 if (unlikely(!spin_trylock(&inode->i_lock))) {
1007 spin_unlock(&dentry->d_lock);
1008 if (parent)
1009 spin_unlock(&parent->d_lock);
1010 cpu_relax();
1011 continue;
1012 }
1013 __dentry_kill(dentry);
1014 dentry = parent;
1015 }
1016 }
1017}
1018
1019static enum lru_status dentry_lru_isolate(struct list_head *item,
1020 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1021{
1022 struct list_head *freeable = arg;
1023 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1024
1025
1026
1027
1028
1029
1030
1031 if (!spin_trylock(&dentry->d_lock))
1032 return LRU_SKIP;
1033
1034
1035
1036
1037
1038
1039 if (dentry->d_lockref.count) {
1040 d_lru_isolate(lru, dentry);
1041 spin_unlock(&dentry->d_lock);
1042 return LRU_REMOVED;
1043 }
1044
1045 if (dentry->d_flags & DCACHE_REFERENCED) {
1046 dentry->d_flags &= ~DCACHE_REFERENCED;
1047 spin_unlock(&dentry->d_lock);
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068 return LRU_ROTATE;
1069 }
1070
1071 d_lru_shrink_move(lru, dentry, freeable);
1072 spin_unlock(&dentry->d_lock);
1073
1074 return LRU_REMOVED;
1075}
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1090{
1091 LIST_HEAD(dispose);
1092 long freed;
1093
1094 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1095 dentry_lru_isolate, &dispose);
1096 shrink_dentry_list(&dispose);
1097 return freed;
1098}
1099
1100static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1101 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1102{
1103 struct list_head *freeable = arg;
1104 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1105
1106
1107
1108
1109
1110
1111 if (!spin_trylock(&dentry->d_lock))
1112 return LRU_SKIP;
1113
1114 d_lru_shrink_move(lru, dentry, freeable);
1115 spin_unlock(&dentry->d_lock);
1116
1117 return LRU_REMOVED;
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128void shrink_dcache_sb(struct super_block *sb)
1129{
1130 long freed;
1131
1132 do {
1133 LIST_HEAD(dispose);
1134
1135 freed = list_lru_walk(&sb->s_dentry_lru,
1136 dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1137
1138 this_cpu_sub(nr_dentry_unused, freed);
1139 shrink_dentry_list(&dispose);
1140 } while (freed > 0);
1141}
1142EXPORT_SYMBOL(shrink_dcache_sb);
1143
1144
1145
1146
1147
1148
1149
1150
1151enum d_walk_ret {
1152 D_WALK_CONTINUE,
1153 D_WALK_QUIT,
1154 D_WALK_NORETRY,
1155 D_WALK_SKIP,
1156};
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167static void d_walk(struct dentry *parent, void *data,
1168 enum d_walk_ret (*enter)(void *, struct dentry *),
1169 void (*finish)(void *))
1170{
1171 struct dentry *this_parent;
1172 struct list_head *next;
1173 unsigned seq = 0;
1174 enum d_walk_ret ret;
1175 bool retry = true;
1176
1177again:
1178 read_seqbegin_or_lock(&rename_lock, &seq);
1179 this_parent = parent;
1180 spin_lock(&this_parent->d_lock);
1181
1182 ret = enter(data, this_parent);
1183 switch (ret) {
1184 case D_WALK_CONTINUE:
1185 break;
1186 case D_WALK_QUIT:
1187 case D_WALK_SKIP:
1188 goto out_unlock;
1189 case D_WALK_NORETRY:
1190 retry = false;
1191 break;
1192 }
1193repeat:
1194 next = this_parent->d_subdirs.next;
1195resume:
1196 while (next != &this_parent->d_subdirs) {
1197 struct list_head *tmp = next;
1198 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1199 next = tmp->next;
1200
1201 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1202 continue;
1203
1204 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1205
1206 ret = enter(data, dentry);
1207 switch (ret) {
1208 case D_WALK_CONTINUE:
1209 break;
1210 case D_WALK_QUIT:
1211 spin_unlock(&dentry->d_lock);
1212 goto out_unlock;
1213 case D_WALK_NORETRY:
1214 retry = false;
1215 break;
1216 case D_WALK_SKIP:
1217 spin_unlock(&dentry->d_lock);
1218 continue;
1219 }
1220
1221 if (!list_empty(&dentry->d_subdirs)) {
1222 spin_unlock(&this_parent->d_lock);
1223 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1224 this_parent = dentry;
1225 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1226 goto repeat;
1227 }
1228 spin_unlock(&dentry->d_lock);
1229 }
1230
1231
1232
1233 rcu_read_lock();
1234ascend:
1235 if (this_parent != parent) {
1236 struct dentry *child = this_parent;
1237 this_parent = child->d_parent;
1238
1239 spin_unlock(&child->d_lock);
1240 spin_lock(&this_parent->d_lock);
1241
1242
1243 if (need_seqretry(&rename_lock, seq))
1244 goto rename_retry;
1245
1246 do {
1247 next = child->d_child.next;
1248 if (next == &this_parent->d_subdirs)
1249 goto ascend;
1250 child = list_entry(next, struct dentry, d_child);
1251 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1252 rcu_read_unlock();
1253 goto resume;
1254 }
1255 if (need_seqretry(&rename_lock, seq))
1256 goto rename_retry;
1257 rcu_read_unlock();
1258 if (finish)
1259 finish(data);
1260
1261out_unlock:
1262 spin_unlock(&this_parent->d_lock);
1263 done_seqretry(&rename_lock, seq);
1264 return;
1265
1266rename_retry:
1267 spin_unlock(&this_parent->d_lock);
1268 rcu_read_unlock();
1269 BUG_ON(seq & 1);
1270 if (!retry)
1271 return;
1272 seq = 1;
1273 goto again;
1274}
1275
1276
1277
1278
1279
1280
1281
1282static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1283{
1284 int *ret = data;
1285 if (d_mountpoint(dentry)) {
1286 *ret = 1;
1287 return D_WALK_QUIT;
1288 }
1289 return D_WALK_CONTINUE;
1290}
1291
1292
1293
1294
1295
1296
1297
1298
1299int have_submounts(struct dentry *parent)
1300{
1301 int ret = 0;
1302
1303 d_walk(parent, &ret, check_mount, NULL);
1304
1305 return ret;
1306}
1307EXPORT_SYMBOL(have_submounts);
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317int d_set_mounted(struct dentry *dentry)
1318{
1319 struct dentry *p;
1320 int ret = -ENOENT;
1321 write_seqlock(&rename_lock);
1322 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1323
1324 spin_lock(&p->d_lock);
1325 if (unlikely(d_unhashed(p))) {
1326 spin_unlock(&p->d_lock);
1327 goto out;
1328 }
1329 spin_unlock(&p->d_lock);
1330 }
1331 spin_lock(&dentry->d_lock);
1332 if (!d_unlinked(dentry)) {
1333 dentry->d_flags |= DCACHE_MOUNTED;
1334 ret = 0;
1335 }
1336 spin_unlock(&dentry->d_lock);
1337out:
1338 write_sequnlock(&rename_lock);
1339 return ret;
1340}
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357struct select_data {
1358 struct dentry *start;
1359 struct list_head dispose;
1360 int found;
1361};
1362
1363static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1364{
1365 struct select_data *data = _data;
1366 enum d_walk_ret ret = D_WALK_CONTINUE;
1367
1368 if (data->start == dentry)
1369 goto out;
1370
1371 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1372 data->found++;
1373 } else {
1374 if (dentry->d_flags & DCACHE_LRU_LIST)
1375 d_lru_del(dentry);
1376 if (!dentry->d_lockref.count) {
1377 d_shrink_add(dentry, &data->dispose);
1378 data->found++;
1379 }
1380 }
1381
1382
1383
1384
1385
1386 if (!list_empty(&data->dispose))
1387 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1388out:
1389 return ret;
1390}
1391
1392
1393
1394
1395
1396
1397
1398void shrink_dcache_parent(struct dentry *parent)
1399{
1400 for (;;) {
1401 struct select_data data;
1402
1403 INIT_LIST_HEAD(&data.dispose);
1404 data.start = parent;
1405 data.found = 0;
1406
1407 d_walk(parent, &data, select_collect, NULL);
1408 if (!data.found)
1409 break;
1410
1411 shrink_dentry_list(&data.dispose);
1412 cond_resched();
1413 }
1414}
1415EXPORT_SYMBOL(shrink_dcache_parent);
1416
1417static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1418{
1419
1420 if (!list_empty(&dentry->d_subdirs))
1421 return D_WALK_CONTINUE;
1422
1423
1424 if (dentry == _data && dentry->d_lockref.count == 1)
1425 return D_WALK_CONTINUE;
1426
1427 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1428 " still in use (%d) [unmount of %s %s]\n",
1429 dentry,
1430 dentry->d_inode ?
1431 dentry->d_inode->i_ino : 0UL,
1432 dentry,
1433 dentry->d_lockref.count,
1434 dentry->d_sb->s_type->name,
1435 dentry->d_sb->s_id);
1436 WARN_ON(1);
1437 return D_WALK_CONTINUE;
1438}
1439
1440static void do_one_tree(struct dentry *dentry)
1441{
1442 shrink_dcache_parent(dentry);
1443 d_walk(dentry, dentry, umount_check, NULL);
1444 d_drop(dentry);
1445 dput(dentry);
1446}
1447
1448
1449
1450
1451void shrink_dcache_for_umount(struct super_block *sb)
1452{
1453 struct dentry *dentry;
1454
1455 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1456
1457 dentry = sb->s_root;
1458 sb->s_root = NULL;
1459 do_one_tree(dentry);
1460
1461 while (!hlist_bl_empty(&sb->s_anon)) {
1462 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1463 do_one_tree(dentry);
1464 }
1465}
1466
1467struct detach_data {
1468 struct select_data select;
1469 struct dentry *mountpoint;
1470};
1471static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1472{
1473 struct detach_data *data = _data;
1474
1475 if (d_mountpoint(dentry)) {
1476 __dget_dlock(dentry);
1477 data->mountpoint = dentry;
1478 return D_WALK_QUIT;
1479 }
1480
1481 return select_collect(&data->select, dentry);
1482}
1483
1484static void check_and_drop(void *_data)
1485{
1486 struct detach_data *data = _data;
1487
1488 if (!data->mountpoint && !data->select.found)
1489 __d_drop(data->select.start);
1490}
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502void d_invalidate(struct dentry *dentry)
1503{
1504
1505
1506
1507 spin_lock(&dentry->d_lock);
1508 if (d_unhashed(dentry)) {
1509 spin_unlock(&dentry->d_lock);
1510 return;
1511 }
1512 spin_unlock(&dentry->d_lock);
1513
1514
1515 if (!dentry->d_inode) {
1516 d_drop(dentry);
1517 return;
1518 }
1519
1520 for (;;) {
1521 struct detach_data data;
1522
1523 data.mountpoint = NULL;
1524 INIT_LIST_HEAD(&data.select.dispose);
1525 data.select.start = dentry;
1526 data.select.found = 0;
1527
1528 d_walk(dentry, &data, detach_and_collect, check_and_drop);
1529
1530 if (data.select.found)
1531 shrink_dentry_list(&data.select.dispose);
1532
1533 if (data.mountpoint) {
1534 detach_mounts(data.mountpoint);
1535 dput(data.mountpoint);
1536 }
1537
1538 if (!data.mountpoint && !data.select.found)
1539 break;
1540
1541 cond_resched();
1542 }
1543}
1544EXPORT_SYMBOL(d_invalidate);
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1557{
1558 struct dentry *dentry;
1559 char *dname;
1560 int err;
1561
1562 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1563 if (!dentry)
1564 return NULL;
1565
1566
1567
1568
1569
1570
1571
1572 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1573 if (unlikely(!name)) {
1574 static const struct qstr anon = QSTR_INIT("/", 1);
1575 name = &anon;
1576 dname = dentry->d_iname;
1577 } else if (name->len > DNAME_INLINE_LEN-1) {
1578 size_t size = offsetof(struct external_name, name[1]);
1579 struct external_name *p = kmalloc(size + name->len,
1580 GFP_KERNEL_ACCOUNT);
1581 if (!p) {
1582 kmem_cache_free(dentry_cache, dentry);
1583 return NULL;
1584 }
1585 atomic_set(&p->u.count, 1);
1586 dname = p->name;
1587 if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
1588 kasan_unpoison_shadow(dname,
1589 round_up(name->len + 1, sizeof(unsigned long)));
1590 } else {
1591 dname = dentry->d_iname;
1592 }
1593
1594 dentry->d_name.len = name->len;
1595 dentry->d_name.hash = name->hash;
1596 memcpy(dname, name->name, name->len);
1597 dname[name->len] = 0;
1598
1599
1600 smp_wmb();
1601 dentry->d_name.name = dname;
1602
1603 dentry->d_lockref.count = 1;
1604 dentry->d_flags = 0;
1605 spin_lock_init(&dentry->d_lock);
1606 seqcount_init(&dentry->d_seq);
1607 dentry->d_inode = NULL;
1608 dentry->d_parent = dentry;
1609 dentry->d_sb = sb;
1610 dentry->d_op = NULL;
1611 dentry->d_fsdata = NULL;
1612 INIT_HLIST_BL_NODE(&dentry->d_hash);
1613 INIT_LIST_HEAD(&dentry->d_lru);
1614 INIT_LIST_HEAD(&dentry->d_subdirs);
1615 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1616 INIT_LIST_HEAD(&dentry->d_child);
1617 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1618
1619 if (dentry->d_op && dentry->d_op->d_init) {
1620 err = dentry->d_op->d_init(dentry);
1621 if (err) {
1622 if (dname_external(dentry))
1623 kfree(external_name(dentry));
1624 kmem_cache_free(dentry_cache, dentry);
1625 return NULL;
1626 }
1627 }
1628
1629 this_cpu_inc(nr_dentry);
1630
1631 return dentry;
1632}
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1644{
1645 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1646 if (!dentry)
1647 return NULL;
1648 dentry->d_flags |= DCACHE_RCUACCESS;
1649 spin_lock(&parent->d_lock);
1650
1651
1652
1653
1654 __dget_dlock(parent);
1655 dentry->d_parent = parent;
1656 list_add(&dentry->d_child, &parent->d_subdirs);
1657 spin_unlock(&parent->d_lock);
1658
1659 return dentry;
1660}
1661EXPORT_SYMBOL(d_alloc);
1662
1663struct dentry *d_alloc_cursor(struct dentry * parent)
1664{
1665 struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
1666 if (dentry) {
1667 dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
1668 dentry->d_parent = dget(parent);
1669 }
1670 return dentry;
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1682{
1683 return __d_alloc(sb, name);
1684}
1685EXPORT_SYMBOL(d_alloc_pseudo);
1686
1687struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1688{
1689 struct qstr q;
1690
1691 q.name = name;
1692 q.hash_len = hashlen_string(parent, name);
1693 return d_alloc(parent, &q);
1694}
1695EXPORT_SYMBOL(d_alloc_name);
1696
1697void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1698{
1699 WARN_ON_ONCE(dentry->d_op);
1700 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1701 DCACHE_OP_COMPARE |
1702 DCACHE_OP_REVALIDATE |
1703 DCACHE_OP_WEAK_REVALIDATE |
1704 DCACHE_OP_DELETE |
1705 DCACHE_OP_REAL));
1706 dentry->d_op = op;
1707 if (!op)
1708 return;
1709 if (op->d_hash)
1710 dentry->d_flags |= DCACHE_OP_HASH;
1711 if (op->d_compare)
1712 dentry->d_flags |= DCACHE_OP_COMPARE;
1713 if (op->d_revalidate)
1714 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1715 if (op->d_weak_revalidate)
1716 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1717 if (op->d_delete)
1718 dentry->d_flags |= DCACHE_OP_DELETE;
1719 if (op->d_prune)
1720 dentry->d_flags |= DCACHE_OP_PRUNE;
1721 if (op->d_real)
1722 dentry->d_flags |= DCACHE_OP_REAL;
1723
1724}
1725EXPORT_SYMBOL(d_set_d_op);
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735void d_set_fallthru(struct dentry *dentry)
1736{
1737 spin_lock(&dentry->d_lock);
1738 dentry->d_flags |= DCACHE_FALLTHRU;
1739 spin_unlock(&dentry->d_lock);
1740}
1741EXPORT_SYMBOL(d_set_fallthru);
1742
1743static unsigned d_flags_for_inode(struct inode *inode)
1744{
1745 unsigned add_flags = DCACHE_REGULAR_TYPE;
1746
1747 if (!inode)
1748 return DCACHE_MISS_TYPE;
1749
1750 if (S_ISDIR(inode->i_mode)) {
1751 add_flags = DCACHE_DIRECTORY_TYPE;
1752 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1753 if (unlikely(!inode->i_op->lookup))
1754 add_flags = DCACHE_AUTODIR_TYPE;
1755 else
1756 inode->i_opflags |= IOP_LOOKUP;
1757 }
1758 goto type_determined;
1759 }
1760
1761 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1762 if (unlikely(inode->i_op->get_link)) {
1763 add_flags = DCACHE_SYMLINK_TYPE;
1764 goto type_determined;
1765 }
1766 inode->i_opflags |= IOP_NOFOLLOW;
1767 }
1768
1769 if (unlikely(!S_ISREG(inode->i_mode)))
1770 add_flags = DCACHE_SPECIAL_TYPE;
1771
1772type_determined:
1773 if (unlikely(IS_AUTOMOUNT(inode)))
1774 add_flags |= DCACHE_NEED_AUTOMOUNT;
1775 return add_flags;
1776}
1777
1778static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1779{
1780 unsigned add_flags = d_flags_for_inode(inode);
1781 WARN_ON(d_in_lookup(dentry));
1782
1783 spin_lock(&dentry->d_lock);
1784 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1785 raw_write_seqcount_begin(&dentry->d_seq);
1786 __d_set_inode_and_type(dentry, inode, add_flags);
1787 raw_write_seqcount_end(&dentry->d_seq);
1788 fsnotify_update_flags(dentry);
1789 spin_unlock(&dentry->d_lock);
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807void d_instantiate(struct dentry *entry, struct inode * inode)
1808{
1809 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1810 if (inode) {
1811 security_d_instantiate(entry, inode);
1812 spin_lock(&inode->i_lock);
1813 __d_instantiate(entry, inode);
1814 spin_unlock(&inode->i_lock);
1815 }
1816}
1817EXPORT_SYMBOL(d_instantiate);
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1829{
1830 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1831
1832 security_d_instantiate(entry, inode);
1833 spin_lock(&inode->i_lock);
1834 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1835 spin_unlock(&inode->i_lock);
1836 iput(inode);
1837 return -EBUSY;
1838 }
1839 __d_instantiate(entry, inode);
1840 spin_unlock(&inode->i_lock);
1841
1842 return 0;
1843}
1844EXPORT_SYMBOL(d_instantiate_no_diralias);
1845
1846struct dentry *d_make_root(struct inode *root_inode)
1847{
1848 struct dentry *res = NULL;
1849
1850 if (root_inode) {
1851 res = __d_alloc(root_inode->i_sb, NULL);
1852 if (res)
1853 d_instantiate(res, root_inode);
1854 else
1855 iput(root_inode);
1856 }
1857 return res;
1858}
1859EXPORT_SYMBOL(d_make_root);
1860
1861static struct dentry * __d_find_any_alias(struct inode *inode)
1862{
1863 struct dentry *alias;
1864
1865 if (hlist_empty(&inode->i_dentry))
1866 return NULL;
1867 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1868 __dget(alias);
1869 return alias;
1870}
1871
1872
1873
1874
1875
1876
1877
1878
1879struct dentry *d_find_any_alias(struct inode *inode)
1880{
1881 struct dentry *de;
1882
1883 spin_lock(&inode->i_lock);
1884 de = __d_find_any_alias(inode);
1885 spin_unlock(&inode->i_lock);
1886 return de;
1887}
1888EXPORT_SYMBOL(d_find_any_alias);
1889
1890static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
1891{
1892 struct dentry *tmp;
1893 struct dentry *res;
1894 unsigned add_flags;
1895
1896 if (!inode)
1897 return ERR_PTR(-ESTALE);
1898 if (IS_ERR(inode))
1899 return ERR_CAST(inode);
1900
1901 res = d_find_any_alias(inode);
1902 if (res)
1903 goto out_iput;
1904
1905 tmp = __d_alloc(inode->i_sb, NULL);
1906 if (!tmp) {
1907 res = ERR_PTR(-ENOMEM);
1908 goto out_iput;
1909 }
1910
1911 security_d_instantiate(tmp, inode);
1912 spin_lock(&inode->i_lock);
1913 res = __d_find_any_alias(inode);
1914 if (res) {
1915 spin_unlock(&inode->i_lock);
1916 dput(tmp);
1917 goto out_iput;
1918 }
1919
1920
1921 add_flags = d_flags_for_inode(inode);
1922
1923 if (disconnected)
1924 add_flags |= DCACHE_DISCONNECTED;
1925
1926 spin_lock(&tmp->d_lock);
1927 __d_set_inode_and_type(tmp, inode, add_flags);
1928 hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1929 hlist_bl_lock(&tmp->d_sb->s_anon);
1930 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1931 hlist_bl_unlock(&tmp->d_sb->s_anon);
1932 spin_unlock(&tmp->d_lock);
1933 spin_unlock(&inode->i_lock);
1934
1935 return tmp;
1936
1937 out_iput:
1938 iput(inode);
1939 return res;
1940}
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960struct dentry *d_obtain_alias(struct inode *inode)
1961{
1962 return __d_obtain_alias(inode, 1);
1963}
1964EXPORT_SYMBOL(d_obtain_alias);
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981struct dentry *d_obtain_root(struct inode *inode)
1982{
1983 return __d_obtain_alias(inode, 0);
1984}
1985EXPORT_SYMBOL(d_obtain_root);
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2004 struct qstr *name)
2005{
2006 struct dentry *found, *res;
2007
2008
2009
2010
2011
2012 found = d_hash_and_lookup(dentry->d_parent, name);
2013 if (found) {
2014 iput(inode);
2015 return found;
2016 }
2017 if (d_in_lookup(dentry)) {
2018 found = d_alloc_parallel(dentry->d_parent, name,
2019 dentry->d_wait);
2020 if (IS_ERR(found) || !d_in_lookup(found)) {
2021 iput(inode);
2022 return found;
2023 }
2024 } else {
2025 found = d_alloc(dentry->d_parent, name);
2026 if (!found) {
2027 iput(inode);
2028 return ERR_PTR(-ENOMEM);
2029 }
2030 }
2031 res = d_splice_alias(inode, found);
2032 if (res) {
2033 dput(found);
2034 return res;
2035 }
2036 return found;
2037}
2038EXPORT_SYMBOL(d_add_ci);
2039
2040
2041static inline bool d_same_name(const struct dentry *dentry,
2042 const struct dentry *parent,
2043 const struct qstr *name)
2044{
2045 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2046 if (dentry->d_name.len != name->len)
2047 return false;
2048 return dentry_cmp(dentry, name->name, name->len) == 0;
2049 }
2050 return parent->d_op->d_compare(dentry,
2051 dentry->d_name.len, dentry->d_name.name,
2052 name) == 0;
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084struct dentry *__d_lookup_rcu(const struct dentry *parent,
2085 const struct qstr *name,
2086 unsigned *seqp)
2087{
2088 u64 hashlen = name->hash_len;
2089 const unsigned char *str = name->name;
2090 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2091 struct hlist_bl_node *node;
2092 struct dentry *dentry;
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2115 unsigned seq;
2116
2117seqretry:
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135 seq = raw_seqcount_begin(&dentry->d_seq);
2136 if (dentry->d_parent != parent)
2137 continue;
2138 if (d_unhashed(dentry))
2139 continue;
2140
2141 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2142 int tlen;
2143 const char *tname;
2144 if (dentry->d_name.hash != hashlen_hash(hashlen))
2145 continue;
2146 tlen = dentry->d_name.len;
2147 tname = dentry->d_name.name;
2148
2149 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2150 cpu_relax();
2151 goto seqretry;
2152 }
2153 if (parent->d_op->d_compare(dentry,
2154 tlen, tname, name) != 0)
2155 continue;
2156 } else {
2157 if (dentry->d_name.hash_len != hashlen)
2158 continue;
2159 if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2160 continue;
2161 }
2162 *seqp = seq;
2163 return dentry;
2164 }
2165 return NULL;
2166}
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2180{
2181 struct dentry *dentry;
2182 unsigned seq;
2183
2184 do {
2185 seq = read_seqbegin(&rename_lock);
2186 dentry = __d_lookup(parent, name);
2187 if (dentry)
2188 break;
2189 } while (read_seqretry(&rename_lock, seq));
2190 return dentry;
2191}
2192EXPORT_SYMBOL(d_lookup);
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2210{
2211 unsigned int hash = name->hash;
2212 struct hlist_bl_head *b = d_hash(hash);
2213 struct hlist_bl_node *node;
2214 struct dentry *found = NULL;
2215 struct dentry *dentry;
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237 rcu_read_lock();
2238
2239 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2240
2241 if (dentry->d_name.hash != hash)
2242 continue;
2243
2244 spin_lock(&dentry->d_lock);
2245 if (dentry->d_parent != parent)
2246 goto next;
2247 if (d_unhashed(dentry))
2248 goto next;
2249
2250 if (!d_same_name(dentry, parent, name))
2251 goto next;
2252
2253 dentry->d_lockref.count++;
2254 found = dentry;
2255 spin_unlock(&dentry->d_lock);
2256 break;
2257next:
2258 spin_unlock(&dentry->d_lock);
2259 }
2260 rcu_read_unlock();
2261
2262 return found;
2263}
2264
2265
2266
2267
2268
2269
2270
2271
2272struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2273{
2274
2275
2276
2277
2278
2279 name->hash = full_name_hash(dir, name->name, name->len);
2280 if (dir->d_flags & DCACHE_OP_HASH) {
2281 int err = dir->d_op->d_hash(dir, name);
2282 if (unlikely(err < 0))
2283 return ERR_PTR(err);
2284 }
2285 return d_lookup(dir, name);
2286}
2287EXPORT_SYMBOL(d_hash_and_lookup);
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310void d_delete(struct dentry * dentry)
2311{
2312 struct inode *inode;
2313 int isdir = 0;
2314
2315
2316
2317again:
2318 spin_lock(&dentry->d_lock);
2319 inode = dentry->d_inode;
2320 isdir = S_ISDIR(inode->i_mode);
2321 if (dentry->d_lockref.count == 1) {
2322 if (!spin_trylock(&inode->i_lock)) {
2323 spin_unlock(&dentry->d_lock);
2324 cpu_relax();
2325 goto again;
2326 }
2327 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2328 dentry_unlink_inode(dentry);
2329 fsnotify_nameremove(dentry, isdir);
2330 return;
2331 }
2332
2333 if (!d_unhashed(dentry))
2334 __d_drop(dentry);
2335
2336 spin_unlock(&dentry->d_lock);
2337
2338 fsnotify_nameremove(dentry, isdir);
2339}
2340EXPORT_SYMBOL(d_delete);
2341
2342static void __d_rehash(struct dentry *entry)
2343{
2344 struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2345 BUG_ON(!d_unhashed(entry));
2346 hlist_bl_lock(b);
2347 hlist_bl_add_head_rcu(&entry->d_hash, b);
2348 hlist_bl_unlock(b);
2349}
2350
2351
2352
2353
2354
2355
2356
2357
2358void d_rehash(struct dentry * entry)
2359{
2360 spin_lock(&entry->d_lock);
2361 __d_rehash(entry);
2362 spin_unlock(&entry->d_lock);
2363}
2364EXPORT_SYMBOL(d_rehash);
2365
2366static inline unsigned start_dir_add(struct inode *dir)
2367{
2368
2369 for (;;) {
2370 unsigned n = dir->i_dir_seq;
2371 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2372 return n;
2373 cpu_relax();
2374 }
2375}
2376
2377static inline void end_dir_add(struct inode *dir, unsigned n)
2378{
2379 smp_store_release(&dir->i_dir_seq, n + 2);
2380}
2381
2382static void d_wait_lookup(struct dentry *dentry)
2383{
2384 if (d_in_lookup(dentry)) {
2385 DECLARE_WAITQUEUE(wait, current);
2386 add_wait_queue(dentry->d_wait, &wait);
2387 do {
2388 set_current_state(TASK_UNINTERRUPTIBLE);
2389 spin_unlock(&dentry->d_lock);
2390 schedule();
2391 spin_lock(&dentry->d_lock);
2392 } while (d_in_lookup(dentry));
2393 }
2394}
2395
2396struct dentry *d_alloc_parallel(struct dentry *parent,
2397 const struct qstr *name,
2398 wait_queue_head_t *wq)
2399{
2400 unsigned int hash = name->hash;
2401 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2402 struct hlist_bl_node *node;
2403 struct dentry *new = d_alloc(parent, name);
2404 struct dentry *dentry;
2405 unsigned seq, r_seq, d_seq;
2406
2407 if (unlikely(!new))
2408 return ERR_PTR(-ENOMEM);
2409
2410retry:
2411 rcu_read_lock();
2412 seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
2413 r_seq = read_seqbegin(&rename_lock);
2414 dentry = __d_lookup_rcu(parent, name, &d_seq);
2415 if (unlikely(dentry)) {
2416 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2417 rcu_read_unlock();
2418 goto retry;
2419 }
2420 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2421 rcu_read_unlock();
2422 dput(dentry);
2423 goto retry;
2424 }
2425 rcu_read_unlock();
2426 dput(new);
2427 return dentry;
2428 }
2429 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2430 rcu_read_unlock();
2431 goto retry;
2432 }
2433 hlist_bl_lock(b);
2434 if (unlikely(parent->d_inode->i_dir_seq != seq)) {
2435 hlist_bl_unlock(b);
2436 rcu_read_unlock();
2437 goto retry;
2438 }
2439
2440
2441
2442
2443
2444
2445
2446 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2447 if (dentry->d_name.hash != hash)
2448 continue;
2449 if (dentry->d_parent != parent)
2450 continue;
2451 if (!d_same_name(dentry, parent, name))
2452 continue;
2453 hlist_bl_unlock(b);
2454
2455 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2456 rcu_read_unlock();
2457 goto retry;
2458 }
2459
2460 rcu_read_unlock();
2461
2462
2463
2464
2465 spin_lock(&dentry->d_lock);
2466 d_wait_lookup(dentry);
2467
2468
2469
2470
2471
2472
2473 if (unlikely(dentry->d_name.hash != hash))
2474 goto mismatch;
2475 if (unlikely(dentry->d_parent != parent))
2476 goto mismatch;
2477 if (unlikely(d_unhashed(dentry)))
2478 goto mismatch;
2479 if (unlikely(!d_same_name(dentry, parent, name)))
2480 goto mismatch;
2481
2482 spin_unlock(&dentry->d_lock);
2483 dput(new);
2484 return dentry;
2485 }
2486 rcu_read_unlock();
2487
2488 new->d_flags |= DCACHE_PAR_LOOKUP;
2489 new->d_wait = wq;
2490 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2491 hlist_bl_unlock(b);
2492 return new;
2493mismatch:
2494 spin_unlock(&dentry->d_lock);
2495 dput(dentry);
2496 goto retry;
2497}
2498EXPORT_SYMBOL(d_alloc_parallel);
2499
2500void __d_lookup_done(struct dentry *dentry)
2501{
2502 struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2503 dentry->d_name.hash);
2504 hlist_bl_lock(b);
2505 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2506 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2507 wake_up_all(dentry->d_wait);
2508 dentry->d_wait = NULL;
2509 hlist_bl_unlock(b);
2510 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2511 INIT_LIST_HEAD(&dentry->d_lru);
2512}
2513EXPORT_SYMBOL(__d_lookup_done);
2514
2515
2516
2517static inline void __d_add(struct dentry *dentry, struct inode *inode)
2518{
2519 struct inode *dir = NULL;
2520 unsigned n;
2521 spin_lock(&dentry->d_lock);
2522 if (unlikely(d_in_lookup(dentry))) {
2523 dir = dentry->d_parent->d_inode;
2524 n = start_dir_add(dir);
2525 __d_lookup_done(dentry);
2526 }
2527 if (inode) {
2528 unsigned add_flags = d_flags_for_inode(inode);
2529 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2530 raw_write_seqcount_begin(&dentry->d_seq);
2531 __d_set_inode_and_type(dentry, inode, add_flags);
2532 raw_write_seqcount_end(&dentry->d_seq);
2533 fsnotify_update_flags(dentry);
2534 }
2535 __d_rehash(dentry);
2536 if (dir)
2537 end_dir_add(dir, n);
2538 spin_unlock(&dentry->d_lock);
2539 if (inode)
2540 spin_unlock(&inode->i_lock);
2541}
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552void d_add(struct dentry *entry, struct inode *inode)
2553{
2554 if (inode) {
2555 security_d_instantiate(entry, inode);
2556 spin_lock(&inode->i_lock);
2557 }
2558 __d_add(entry, inode);
2559}
2560EXPORT_SYMBOL(d_add);
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2574{
2575 struct dentry *alias;
2576 unsigned int hash = entry->d_name.hash;
2577
2578 spin_lock(&inode->i_lock);
2579 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2580
2581
2582
2583
2584
2585 if (alias->d_name.hash != hash)
2586 continue;
2587 if (alias->d_parent != entry->d_parent)
2588 continue;
2589 if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2590 continue;
2591 spin_lock(&alias->d_lock);
2592 if (!d_unhashed(alias)) {
2593 spin_unlock(&alias->d_lock);
2594 alias = NULL;
2595 } else {
2596 __dget_dlock(alias);
2597 __d_rehash(alias);
2598 spin_unlock(&alias->d_lock);
2599 }
2600 spin_unlock(&inode->i_lock);
2601 return alias;
2602 }
2603 spin_unlock(&inode->i_lock);
2604 return NULL;
2605}
2606EXPORT_SYMBOL(d_exact_alias);
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622void dentry_update_name_case(struct dentry *dentry, const struct qstr *name)
2623{
2624 BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
2625 BUG_ON(dentry->d_name.len != name->len);
2626
2627 spin_lock(&dentry->d_lock);
2628 write_seqcount_begin(&dentry->d_seq);
2629 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2630 write_seqcount_end(&dentry->d_seq);
2631 spin_unlock(&dentry->d_lock);
2632}
2633EXPORT_SYMBOL(dentry_update_name_case);
2634
2635static void swap_names(struct dentry *dentry, struct dentry *target)
2636{
2637 if (unlikely(dname_external(target))) {
2638 if (unlikely(dname_external(dentry))) {
2639
2640
2641
2642 swap(target->d_name.name, dentry->d_name.name);
2643 } else {
2644
2645
2646
2647
2648 memcpy(target->d_iname, dentry->d_name.name,
2649 dentry->d_name.len + 1);
2650 dentry->d_name.name = target->d_name.name;
2651 target->d_name.name = target->d_iname;
2652 }
2653 } else {
2654 if (unlikely(dname_external(dentry))) {
2655
2656
2657
2658
2659 memcpy(dentry->d_iname, target->d_name.name,
2660 target->d_name.len + 1);
2661 target->d_name.name = dentry->d_name.name;
2662 dentry->d_name.name = dentry->d_iname;
2663 } else {
2664
2665
2666
2667 unsigned int i;
2668 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2669 kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
2670 kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
2671 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2672 swap(((long *) &dentry->d_iname)[i],
2673 ((long *) &target->d_iname)[i]);
2674 }
2675 }
2676 }
2677 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2678}
2679
2680static void copy_name(struct dentry *dentry, struct dentry *target)
2681{
2682 struct external_name *old_name = NULL;
2683 if (unlikely(dname_external(dentry)))
2684 old_name = external_name(dentry);
2685 if (unlikely(dname_external(target))) {
2686 atomic_inc(&external_name(target)->u.count);
2687 dentry->d_name = target->d_name;
2688 } else {
2689 memcpy(dentry->d_iname, target->d_name.name,
2690 target->d_name.len + 1);
2691 dentry->d_name.name = dentry->d_iname;
2692 dentry->d_name.hash_len = target->d_name.hash_len;
2693 }
2694 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2695 kfree_rcu(old_name, u.head);
2696}
2697
2698static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2699{
2700
2701
2702
2703 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2704 spin_lock(&target->d_parent->d_lock);
2705 else {
2706 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2707 spin_lock(&dentry->d_parent->d_lock);
2708 spin_lock_nested(&target->d_parent->d_lock,
2709 DENTRY_D_LOCK_NESTED);
2710 } else {
2711 spin_lock(&target->d_parent->d_lock);
2712 spin_lock_nested(&dentry->d_parent->d_lock,
2713 DENTRY_D_LOCK_NESTED);
2714 }
2715 }
2716 if (target < dentry) {
2717 spin_lock_nested(&target->d_lock, 2);
2718 spin_lock_nested(&dentry->d_lock, 3);
2719 } else {
2720 spin_lock_nested(&dentry->d_lock, 2);
2721 spin_lock_nested(&target->d_lock, 3);
2722 }
2723}
2724
2725static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
2726{
2727 if (target->d_parent != dentry->d_parent)
2728 spin_unlock(&dentry->d_parent->d_lock);
2729 if (target->d_parent != target)
2730 spin_unlock(&target->d_parent->d_lock);
2731 spin_unlock(&target->d_lock);
2732 spin_unlock(&dentry->d_lock);
2733}
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760static void __d_move(struct dentry *dentry, struct dentry *target,
2761 bool exchange)
2762{
2763 struct inode *dir = NULL;
2764 unsigned n;
2765 if (!dentry->d_inode)
2766 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2767
2768 BUG_ON(d_ancestor(dentry, target));
2769 BUG_ON(d_ancestor(target, dentry));
2770
2771 dentry_lock_for_move(dentry, target);
2772 if (unlikely(d_in_lookup(target))) {
2773 dir = target->d_parent->d_inode;
2774 n = start_dir_add(dir);
2775 __d_lookup_done(target);
2776 }
2777
2778 write_seqcount_begin(&dentry->d_seq);
2779 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2780
2781
2782
2783 __d_drop(dentry);
2784 __d_drop(target);
2785
2786
2787 if (exchange)
2788 swap_names(dentry, target);
2789 else
2790 copy_name(dentry, target);
2791
2792
2793 __d_rehash(dentry);
2794 if (exchange)
2795 __d_rehash(target);
2796
2797
2798 if (IS_ROOT(dentry)) {
2799
2800 dentry->d_flags |= DCACHE_RCUACCESS;
2801 dentry->d_parent = target->d_parent;
2802 target->d_parent = target;
2803 list_del_init(&target->d_child);
2804 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2805 } else {
2806
2807 swap(dentry->d_parent, target->d_parent);
2808 list_move(&target->d_child, &target->d_parent->d_subdirs);
2809 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2810 if (exchange)
2811 fsnotify_update_flags(target);
2812 fsnotify_update_flags(dentry);
2813 }
2814
2815 write_seqcount_end(&target->d_seq);
2816 write_seqcount_end(&dentry->d_seq);
2817
2818 if (dir)
2819 end_dir_add(dir, n);
2820 dentry_unlock_for_move(dentry, target);
2821}
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832void d_move(struct dentry *dentry, struct dentry *target)
2833{
2834 write_seqlock(&rename_lock);
2835 __d_move(dentry, target, false);
2836 write_sequnlock(&rename_lock);
2837}
2838EXPORT_SYMBOL(d_move);
2839
2840
2841
2842
2843
2844
2845void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2846{
2847 write_seqlock(&rename_lock);
2848
2849 WARN_ON(!dentry1->d_inode);
2850 WARN_ON(!dentry2->d_inode);
2851 WARN_ON(IS_ROOT(dentry1));
2852 WARN_ON(IS_ROOT(dentry2));
2853
2854 __d_move(dentry1, dentry2, true);
2855
2856 write_sequnlock(&rename_lock);
2857}
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2868{
2869 struct dentry *p;
2870
2871 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2872 if (p->d_parent == p1)
2873 return p;
2874 }
2875 return NULL;
2876}
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887static int __d_unalias(struct inode *inode,
2888 struct dentry *dentry, struct dentry *alias)
2889{
2890 struct mutex *m1 = NULL;
2891 struct rw_semaphore *m2 = NULL;
2892 int ret = -ESTALE;
2893
2894
2895 if (alias->d_parent == dentry->d_parent)
2896 goto out_unalias;
2897
2898
2899 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2900 goto out_err;
2901 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2902 if (!inode_trylock_shared(alias->d_parent->d_inode))
2903 goto out_err;
2904 m2 = &alias->d_parent->d_inode->i_rwsem;
2905out_unalias:
2906 __d_move(alias, dentry, false);
2907 ret = 0;
2908out_err:
2909 if (m2)
2910 up_read(m2);
2911 if (m1)
2912 mutex_unlock(m1);
2913 return ret;
2914}
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2940{
2941 if (IS_ERR(inode))
2942 return ERR_CAST(inode);
2943
2944 BUG_ON(!d_unhashed(dentry));
2945
2946 if (!inode)
2947 goto out;
2948
2949 security_d_instantiate(dentry, inode);
2950 spin_lock(&inode->i_lock);
2951 if (S_ISDIR(inode->i_mode)) {
2952 struct dentry *new = __d_find_any_alias(inode);
2953 if (unlikely(new)) {
2954
2955 spin_unlock(&inode->i_lock);
2956 write_seqlock(&rename_lock);
2957 if (unlikely(d_ancestor(new, dentry))) {
2958 write_sequnlock(&rename_lock);
2959 dput(new);
2960 new = ERR_PTR(-ELOOP);
2961 pr_warn_ratelimited(
2962 "VFS: Lookup of '%s' in %s %s"
2963 " would have caused loop\n",
2964 dentry->d_name.name,
2965 inode->i_sb->s_type->name,
2966 inode->i_sb->s_id);
2967 } else if (!IS_ROOT(new)) {
2968 int err = __d_unalias(inode, dentry, new);
2969 write_sequnlock(&rename_lock);
2970 if (err) {
2971 dput(new);
2972 new = ERR_PTR(err);
2973 }
2974 } else {
2975 __d_move(new, dentry, false);
2976 write_sequnlock(&rename_lock);
2977 }
2978 iput(inode);
2979 return new;
2980 }
2981 }
2982out:
2983 __d_add(dentry, inode);
2984 return NULL;
2985}
2986EXPORT_SYMBOL(d_splice_alias);
2987
2988static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2989{
2990 *buflen -= namelen;
2991 if (*buflen < 0)
2992 return -ENAMETOOLONG;
2993 *buffer -= namelen;
2994 memcpy(*buffer, str, namelen);
2995 return 0;
2996}
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
3017{
3018 const char *dname = ACCESS_ONCE(name->name);
3019 u32 dlen = ACCESS_ONCE(name->len);
3020 char *p;
3021
3022 smp_read_barrier_depends();
3023
3024 *buflen -= dlen + 1;
3025 if (*buflen < 0)
3026 return -ENAMETOOLONG;
3027 p = *buffer -= dlen + 1;
3028 *p++ = '/';
3029 while (dlen--) {
3030 char c = *dname++;
3031 if (!c)
3032 break;
3033 *p++ = c;
3034 }
3035 return 0;
3036}
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055static int prepend_path(const struct path *path,
3056 const struct path *root,
3057 char **buffer, int *buflen)
3058{
3059 struct dentry *dentry;
3060 struct vfsmount *vfsmnt;
3061 struct mount *mnt;
3062 int error = 0;
3063 unsigned seq, m_seq = 0;
3064 char *bptr;
3065 int blen;
3066
3067 rcu_read_lock();
3068restart_mnt:
3069 read_seqbegin_or_lock(&mount_lock, &m_seq);
3070 seq = 0;
3071 rcu_read_lock();
3072restart:
3073 bptr = *buffer;
3074 blen = *buflen;
3075 error = 0;
3076 dentry = path->dentry;
3077 vfsmnt = path->mnt;
3078 mnt = real_mount(vfsmnt);
3079 read_seqbegin_or_lock(&rename_lock, &seq);
3080 while (dentry != root->dentry || vfsmnt != root->mnt) {
3081 struct dentry * parent;
3082
3083 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
3084 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
3085
3086 if (dentry != vfsmnt->mnt_root) {
3087 bptr = *buffer;
3088 blen = *buflen;
3089 error = 3;
3090 break;
3091 }
3092
3093 if (mnt != parent) {
3094 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
3095 mnt = parent;
3096 vfsmnt = &mnt->mnt;
3097 continue;
3098 }
3099 if (!error)
3100 error = is_mounted(vfsmnt) ? 1 : 2;
3101 break;
3102 }
3103 parent = dentry->d_parent;
3104 prefetch(parent);
3105 error = prepend_name(&bptr, &blen, &dentry->d_name);
3106 if (error)
3107 break;
3108
3109 dentry = parent;
3110 }
3111 if (!(seq & 1))
3112 rcu_read_unlock();
3113 if (need_seqretry(&rename_lock, seq)) {
3114 seq = 1;
3115 goto restart;
3116 }
3117 done_seqretry(&rename_lock, seq);
3118
3119 if (!(m_seq & 1))
3120 rcu_read_unlock();
3121 if (need_seqretry(&mount_lock, m_seq)) {
3122 m_seq = 1;
3123 goto restart_mnt;
3124 }
3125 done_seqretry(&mount_lock, m_seq);
3126
3127 if (error >= 0 && bptr == *buffer) {
3128 if (--blen < 0)
3129 error = -ENAMETOOLONG;
3130 else
3131 *--bptr = '/';
3132 }
3133 *buffer = bptr;
3134 *buflen = blen;
3135 return error;
3136}
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154char *__d_path(const struct path *path,
3155 const struct path *root,
3156 char *buf, int buflen)
3157{
3158 char *res = buf + buflen;
3159 int error;
3160
3161 prepend(&res, &buflen, "\0", 1);
3162 error = prepend_path(path, root, &res, &buflen);
3163
3164 if (error < 0)
3165 return ERR_PTR(error);
3166 if (error > 0)
3167 return NULL;
3168 return res;
3169}
3170
3171char *d_absolute_path(const struct path *path,
3172 char *buf, int buflen)
3173{
3174 struct path root = {};
3175 char *res = buf + buflen;
3176 int error;
3177
3178 prepend(&res, &buflen, "\0", 1);
3179 error = prepend_path(path, &root, &res, &buflen);
3180
3181 if (error > 1)
3182 error = -EINVAL;
3183 if (error < 0)
3184 return ERR_PTR(error);
3185 return res;
3186}
3187
3188
3189
3190
3191static int path_with_deleted(const struct path *path,
3192 const struct path *root,
3193 char **buf, int *buflen)
3194{
3195 prepend(buf, buflen, "\0", 1);
3196 if (d_unlinked(path->dentry)) {
3197 int error = prepend(buf, buflen, " (deleted)", 10);
3198 if (error)
3199 return error;
3200 }
3201
3202 return prepend_path(path, root, buf, buflen);
3203}
3204
3205static int prepend_unreachable(char **buffer, int *buflen)
3206{
3207 return prepend(buffer, buflen, "(unreachable)", 13);
3208}
3209
3210static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3211{
3212 unsigned seq;
3213
3214 do {
3215 seq = read_seqcount_begin(&fs->seq);
3216 *root = fs->root;
3217 } while (read_seqcount_retry(&fs->seq, seq));
3218}
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236char *d_path(const struct path *path, char *buf, int buflen)
3237{
3238 char *res = buf + buflen;
3239 struct path root;
3240 int error;
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253 if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3254 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3255 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3256
3257 rcu_read_lock();
3258 get_fs_root_rcu(current->fs, &root);
3259 error = path_with_deleted(path, &root, &res, &buflen);
3260 rcu_read_unlock();
3261
3262 if (error < 0)
3263 res = ERR_PTR(error);
3264 return res;
3265}
3266EXPORT_SYMBOL(d_path);
3267
3268
3269
3270
3271char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3272 const char *fmt, ...)
3273{
3274 va_list args;
3275 char temp[64];
3276 int sz;
3277
3278 va_start(args, fmt);
3279 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3280 va_end(args);
3281
3282 if (sz > sizeof(temp) || sz > buflen)
3283 return ERR_PTR(-ENAMETOOLONG);
3284
3285 buffer += buflen - sz;
3286 return memcpy(buffer, temp, sz);
3287}
3288
3289char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3290{
3291 char *end = buffer + buflen;
3292
3293 if (prepend(&end, &buflen, " (deleted)", 11) ||
3294 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3295 prepend(&end, &buflen, "/", 1))
3296 end = ERR_PTR(-ENAMETOOLONG);
3297 return end;
3298}
3299EXPORT_SYMBOL(simple_dname);
3300
3301
3302
3303
3304static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3305{
3306 struct dentry *dentry;
3307 char *end, *retval;
3308 int len, seq = 0;
3309 int error = 0;
3310
3311 if (buflen < 2)
3312 goto Elong;
3313
3314 rcu_read_lock();
3315restart:
3316 dentry = d;
3317 end = buf + buflen;
3318 len = buflen;
3319 prepend(&end, &len, "\0", 1);
3320
3321 retval = end-1;
3322 *retval = '/';
3323 read_seqbegin_or_lock(&rename_lock, &seq);
3324 while (!IS_ROOT(dentry)) {
3325 struct dentry *parent = dentry->d_parent;
3326
3327 prefetch(parent);
3328 error = prepend_name(&end, &len, &dentry->d_name);
3329 if (error)
3330 break;
3331
3332 retval = end;
3333 dentry = parent;
3334 }
3335 if (!(seq & 1))
3336 rcu_read_unlock();
3337 if (need_seqretry(&rename_lock, seq)) {
3338 seq = 1;
3339 goto restart;
3340 }
3341 done_seqretry(&rename_lock, seq);
3342 if (error)
3343 goto Elong;
3344 return retval;
3345Elong:
3346 return ERR_PTR(-ENAMETOOLONG);
3347}
3348
3349char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3350{
3351 return __dentry_path(dentry, buf, buflen);
3352}
3353EXPORT_SYMBOL(dentry_path_raw);
3354
3355char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3356{
3357 char *p = NULL;
3358 char *retval;
3359
3360 if (d_unlinked(dentry)) {
3361 p = buf + buflen;
3362 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3363 goto Elong;
3364 buflen++;
3365 }
3366 retval = __dentry_path(dentry, buf, buflen);
3367 if (!IS_ERR(retval) && p)
3368 *p = '/';
3369 return retval;
3370Elong:
3371 return ERR_PTR(-ENAMETOOLONG);
3372}
3373
3374static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3375 struct path *pwd)
3376{
3377 unsigned seq;
3378
3379 do {
3380 seq = read_seqcount_begin(&fs->seq);
3381 *root = fs->root;
3382 *pwd = fs->pwd;
3383 } while (read_seqcount_retry(&fs->seq, seq));
3384}
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3405{
3406 int error;
3407 struct path pwd, root;
3408 char *page = __getname();
3409
3410 if (!page)
3411 return -ENOMEM;
3412
3413 rcu_read_lock();
3414 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3415
3416 error = -ENOENT;
3417 if (!d_unlinked(pwd.dentry)) {
3418 unsigned long len;
3419 char *cwd = page + PATH_MAX;
3420 int buflen = PATH_MAX;
3421
3422 prepend(&cwd, &buflen, "\0", 1);
3423 error = prepend_path(&pwd, &root, &cwd, &buflen);
3424 rcu_read_unlock();
3425
3426 if (error < 0)
3427 goto out;
3428
3429
3430 if (error > 0) {
3431 error = prepend_unreachable(&cwd, &buflen);
3432 if (error)
3433 goto out;
3434 }
3435
3436 error = -ERANGE;
3437 len = PATH_MAX + page - cwd;
3438 if (len <= size) {
3439 error = len;
3440 if (copy_to_user(buf, cwd, len))
3441 error = -EFAULT;
3442 }
3443 } else {
3444 rcu_read_unlock();
3445 }
3446
3447out:
3448 __putname(page);
3449 return error;
3450}
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3469{
3470 bool result;
3471 unsigned seq;
3472
3473 if (new_dentry == old_dentry)
3474 return true;
3475
3476 do {
3477
3478 seq = read_seqbegin(&rename_lock);
3479
3480
3481
3482
3483 rcu_read_lock();
3484 if (d_ancestor(old_dentry, new_dentry))
3485 result = true;
3486 else
3487 result = false;
3488 rcu_read_unlock();
3489 } while (read_seqretry(&rename_lock, seq));
3490
3491 return result;
3492}
3493
3494static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3495{
3496 struct dentry *root = data;
3497 if (dentry != root) {
3498 if (d_unhashed(dentry) || !dentry->d_inode)
3499 return D_WALK_SKIP;
3500
3501 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3502 dentry->d_flags |= DCACHE_GENOCIDE;
3503 dentry->d_lockref.count--;
3504 }
3505 }
3506 return D_WALK_CONTINUE;
3507}
3508
3509void d_genocide(struct dentry *parent)
3510{
3511 d_walk(parent, parent, d_genocide_kill, NULL);
3512}
3513
3514void d_tmpfile(struct dentry *dentry, struct inode *inode)
3515{
3516 inode_dec_link_count(inode);
3517 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3518 !hlist_unhashed(&dentry->d_u.d_alias) ||
3519 !d_unlinked(dentry));
3520 spin_lock(&dentry->d_parent->d_lock);
3521 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3522 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3523 (unsigned long long)inode->i_ino);
3524 spin_unlock(&dentry->d_lock);
3525 spin_unlock(&dentry->d_parent->d_lock);
3526 d_instantiate(dentry, inode);
3527}
3528EXPORT_SYMBOL(d_tmpfile);
3529
3530static __initdata unsigned long dhash_entries;
3531static int __init set_dhash_entries(char *str)
3532{
3533 if (!str)
3534 return 0;
3535 dhash_entries = simple_strtoul(str, &str, 0);
3536 return 1;
3537}
3538__setup("dhash_entries=", set_dhash_entries);
3539
3540static void __init dcache_init_early(void)
3541{
3542 unsigned int loop;
3543
3544
3545
3546
3547 if (hashdist)
3548 return;
3549
3550 dentry_hashtable =
3551 alloc_large_system_hash("Dentry cache",
3552 sizeof(struct hlist_bl_head),
3553 dhash_entries,
3554 13,
3555 HASH_EARLY,
3556 &d_hash_shift,
3557 &d_hash_mask,
3558 0,
3559 0);
3560
3561 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3562 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3563}
3564
3565static void __init dcache_init(void)
3566{
3567 unsigned int loop;
3568
3569
3570
3571
3572
3573
3574 dentry_cache = KMEM_CACHE(dentry,
3575 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT);
3576
3577
3578 if (!hashdist)
3579 return;
3580
3581 dentry_hashtable =
3582 alloc_large_system_hash("Dentry cache",
3583 sizeof(struct hlist_bl_head),
3584 dhash_entries,
3585 13,
3586 0,
3587 &d_hash_shift,
3588 &d_hash_mask,
3589 0,
3590 0);
3591
3592 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3593 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3594}
3595
3596
3597struct kmem_cache *names_cachep __read_mostly;
3598EXPORT_SYMBOL(names_cachep);
3599
3600EXPORT_SYMBOL(d_genocide);
3601
3602void __init vfs_caches_init_early(void)
3603{
3604 dcache_init_early();
3605 inode_init_early();
3606}
3607
3608void __init vfs_caches_init(void)
3609{
3610 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3611 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3612
3613 dcache_init();
3614 inode_init();
3615 files_init();
3616 files_maxfiles_init();
3617 mnt_init();
3618 bdev_cache_init();
3619 chrdev_init();
3620}
3621