1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/syscalls.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/fsnotify.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/hash.h>
25#include <linux/cache.h>
26#include <linux/export.h>
27#include <linux/mount.h>
28#include <linux/file.h>
29#include <asm/uaccess.h>
30#include <linux/security.h>
31#include <linux/seqlock.h>
32#include <linux/swap.h>
33#include <linux/bootmem.h>
34#include <linux/fs_struct.h>
35#include <linux/hardirq.h>
36#include <linux/bit_spinlock.h>
37#include <linux/rculist_bl.h>
38#include <linux/prefetch.h>
39#include <linux/ratelimit.h>
40#include <linux/list_lru.h>
41#include "internal.h"
42#include "mount.h"
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82int sysctl_vfs_cache_pressure __read_mostly = 100;
83EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
84
85__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
86
87EXPORT_SYMBOL(rename_lock);
88
89static struct kmem_cache *dentry_cache __read_mostly;
90
91
92
93
94
95
96
97
98
99
100
101static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
102{
103 if (!(*seq & 1))
104 *seq = read_seqbegin(lock);
105 else
106 read_seqlock_excl(lock);
107}
108
109static inline int need_seqretry(seqlock_t *lock, int seq)
110{
111 return !(seq & 1) && read_seqretry(lock, seq);
112}
113
114static inline void done_seqretry(seqlock_t *lock, int seq)
115{
116 if (seq & 1)
117 read_sequnlock_excl(lock);
118}
119
120
121
122
123
124
125
126
127
128#define D_HASHBITS d_hash_shift
129#define D_HASHMASK d_hash_mask
130
131static unsigned int d_hash_mask __read_mostly;
132static unsigned int d_hash_shift __read_mostly;
133
134static struct hlist_bl_head *dentry_hashtable __read_mostly;
135
136static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
137 unsigned int hash)
138{
139 hash += (unsigned long) parent / L1_CACHE_BYTES;
140 hash = hash + (hash >> D_HASHBITS);
141 return dentry_hashtable + (hash & D_HASHMASK);
142}
143
144
145struct dentry_stat_t dentry_stat = {
146 .age_limit = 45,
147};
148
149static DEFINE_PER_CPU(long, nr_dentry);
150static DEFINE_PER_CPU(long, nr_dentry_unused);
151
152#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
153
154
155
156
157
158
159
160
161
162
163
164
165
166static long get_nr_dentry(void)
167{
168 int i;
169 long sum = 0;
170 for_each_possible_cpu(i)
171 sum += per_cpu(nr_dentry, i);
172 return sum < 0 ? 0 : sum;
173}
174
175static long get_nr_dentry_unused(void)
176{
177 int i;
178 long sum = 0;
179 for_each_possible_cpu(i)
180 sum += per_cpu(nr_dentry_unused, i);
181 return sum < 0 ? 0 : sum;
182}
183
184int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
185 size_t *lenp, loff_t *ppos)
186{
187 dentry_stat.nr_dentry = get_nr_dentry();
188 dentry_stat.nr_unused = get_nr_dentry_unused();
189 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
190}
191#endif
192
193
194
195
196
197#ifdef CONFIG_DCACHE_WORD_ACCESS
198
199#include <asm/word-at-a-time.h>
200
201
202
203
204
205
206
207
208
209static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
210{
211 unsigned long a,b,mask;
212
213 for (;;) {
214 a = *(unsigned long *)cs;
215 b = load_unaligned_zeropad(ct);
216 if (tcount < sizeof(unsigned long))
217 break;
218 if (unlikely(a != b))
219 return 1;
220 cs += sizeof(unsigned long);
221 ct += sizeof(unsigned long);
222 tcount -= sizeof(unsigned long);
223 if (!tcount)
224 return 0;
225 }
226 mask = ~(~0ul << tcount*8);
227 return unlikely(!!((a ^ b) & mask));
228}
229
230#else
231
232static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
233{
234 do {
235 if (*cs != *ct)
236 return 1;
237 cs++;
238 ct++;
239 tcount--;
240 } while (tcount);
241 return 0;
242}
243
244#endif
245
246static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
247{
248 const unsigned char *cs;
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265 cs = ACCESS_ONCE(dentry->d_name.name);
266 smp_read_barrier_depends();
267 return dentry_string_cmp(cs, ct, tcount);
268}
269
270static void __d_free(struct rcu_head *head)
271{
272 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
273
274 WARN_ON(!hlist_unhashed(&dentry->d_alias));
275 if (dname_external(dentry))
276 kfree(dentry->d_name.name);
277 kmem_cache_free(dentry_cache, dentry);
278}
279
280
281
282
283static void d_free(struct dentry *dentry)
284{
285 BUG_ON((int)dentry->d_lockref.count > 0);
286 this_cpu_dec(nr_dentry);
287 if (dentry->d_op && dentry->d_op->d_release)
288 dentry->d_op->d_release(dentry);
289
290
291 if (!(dentry->d_flags & DCACHE_RCUACCESS))
292 __d_free(&dentry->d_u.d_rcu);
293 else
294 call_rcu(&dentry->d_u.d_rcu, __d_free);
295}
296
297
298
299
300
301
302
303
304static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
305{
306 assert_spin_locked(&dentry->d_lock);
307
308 write_seqcount_barrier(&dentry->d_seq);
309}
310
311
312
313
314
315
316static void dentry_iput(struct dentry * dentry)
317 __releases(dentry->d_lock)
318 __releases(dentry->d_inode->i_lock)
319{
320 struct inode *inode = dentry->d_inode;
321 if (inode) {
322 dentry->d_inode = NULL;
323 hlist_del_init(&dentry->d_alias);
324 spin_unlock(&dentry->d_lock);
325 spin_unlock(&inode->i_lock);
326 if (!inode->i_nlink)
327 fsnotify_inoderemove(inode);
328 if (dentry->d_op && dentry->d_op->d_iput)
329 dentry->d_op->d_iput(dentry, inode);
330 else
331 iput(inode);
332 } else {
333 spin_unlock(&dentry->d_lock);
334 }
335}
336
337
338
339
340
341static void dentry_unlink_inode(struct dentry * dentry)
342 __releases(dentry->d_lock)
343 __releases(dentry->d_inode->i_lock)
344{
345 struct inode *inode = dentry->d_inode;
346 dentry->d_inode = NULL;
347 hlist_del_init(&dentry->d_alias);
348 dentry_rcuwalk_barrier(dentry);
349 spin_unlock(&dentry->d_lock);
350 spin_unlock(&inode->i_lock);
351 if (!inode->i_nlink)
352 fsnotify_inoderemove(inode);
353 if (dentry->d_op && dentry->d_op->d_iput)
354 dentry->d_op->d_iput(dentry, inode);
355 else
356 iput(inode);
357}
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
374static void d_lru_add(struct dentry *dentry)
375{
376 D_FLAG_VERIFY(dentry, 0);
377 dentry->d_flags |= DCACHE_LRU_LIST;
378 this_cpu_inc(nr_dentry_unused);
379 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
380}
381
382static void d_lru_del(struct dentry *dentry)
383{
384 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
385 dentry->d_flags &= ~DCACHE_LRU_LIST;
386 this_cpu_dec(nr_dentry_unused);
387 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
388}
389
390static void d_shrink_del(struct dentry *dentry)
391{
392 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
393 list_del_init(&dentry->d_lru);
394 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
395 this_cpu_dec(nr_dentry_unused);
396}
397
398static void d_shrink_add(struct dentry *dentry, struct list_head *list)
399{
400 D_FLAG_VERIFY(dentry, 0);
401 list_add(&dentry->d_lru, list);
402 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
403 this_cpu_inc(nr_dentry_unused);
404}
405
406
407
408
409
410
411
412static void d_lru_isolate(struct dentry *dentry)
413{
414 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
415 dentry->d_flags &= ~DCACHE_LRU_LIST;
416 this_cpu_dec(nr_dentry_unused);
417 list_del_init(&dentry->d_lru);
418}
419
420static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list)
421{
422 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
423 dentry->d_flags |= DCACHE_SHRINK_LIST;
424 list_move_tail(&dentry->d_lru, list);
425}
426
427
428
429
430static void dentry_lru_add(struct dentry *dentry)
431{
432 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
433 d_lru_add(dentry);
434}
435
436
437
438
439
440
441
442
443static void dentry_lru_del(struct dentry *dentry)
444{
445 if (dentry->d_flags & DCACHE_LRU_LIST) {
446 if (dentry->d_flags & DCACHE_SHRINK_LIST)
447 return d_shrink_del(dentry);
448 d_lru_del(dentry);
449 }
450}
451
452
453
454
455
456
457
458
459
460
461
462
463
464static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
465 __releases(dentry->d_lock)
466 __releases(parent->d_lock)
467 __releases(dentry->d_inode->i_lock)
468{
469 list_del(&dentry->d_u.d_child);
470
471
472
473
474 dentry->d_flags |= DCACHE_DENTRY_KILLED;
475 if (parent)
476 spin_unlock(&parent->d_lock);
477 dentry_iput(dentry);
478
479
480
481
482 d_free(dentry);
483 return parent;
484}
485
486
487
488
489
490
491static void __d_shrink(struct dentry *dentry)
492{
493 if (!d_unhashed(dentry)) {
494 struct hlist_bl_head *b;
495 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
496 b = &dentry->d_sb->s_anon;
497 else
498 b = d_hash(dentry->d_parent, dentry->d_name.hash);
499
500 hlist_bl_lock(b);
501 __hlist_bl_del(&dentry->d_hash);
502 dentry->d_hash.pprev = NULL;
503 hlist_bl_unlock(b);
504 }
505}
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522void __d_drop(struct dentry *dentry)
523{
524 if (!d_unhashed(dentry)) {
525 __d_shrink(dentry);
526 dentry_rcuwalk_barrier(dentry);
527 }
528}
529EXPORT_SYMBOL(__d_drop);
530
531void d_drop(struct dentry *dentry)
532{
533 spin_lock(&dentry->d_lock);
534 __d_drop(dentry);
535 spin_unlock(&dentry->d_lock);
536}
537EXPORT_SYMBOL(d_drop);
538
539
540
541
542
543
544
545static struct dentry *
546dentry_kill(struct dentry *dentry, int unlock_on_failure)
547 __releases(dentry->d_lock)
548{
549 struct inode *inode;
550 struct dentry *parent;
551
552 inode = dentry->d_inode;
553 if (inode && !spin_trylock(&inode->i_lock)) {
554relock:
555 if (unlock_on_failure) {
556 spin_unlock(&dentry->d_lock);
557 cpu_relax();
558 }
559 return dentry;
560 }
561 if (IS_ROOT(dentry))
562 parent = NULL;
563 else
564 parent = dentry->d_parent;
565 if (parent && !spin_trylock(&parent->d_lock)) {
566 if (inode)
567 spin_unlock(&inode->i_lock);
568 goto relock;
569 }
570
571
572
573
574 lockref_mark_dead(&dentry->d_lockref);
575
576
577
578
579
580 if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
581 dentry->d_op->d_prune(dentry);
582
583 dentry_lru_del(dentry);
584
585 __d_drop(dentry);
586 return d_kill(dentry, parent);
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615void dput(struct dentry *dentry)
616{
617 if (unlikely(!dentry))
618 return;
619
620repeat:
621 if (lockref_put_or_lock(&dentry->d_lockref))
622 return;
623
624
625 if (unlikely(d_unhashed(dentry)))
626 goto kill_it;
627
628 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
629 if (dentry->d_op->d_delete(dentry))
630 goto kill_it;
631 }
632
633 if (!(dentry->d_flags & DCACHE_REFERENCED))
634 dentry->d_flags |= DCACHE_REFERENCED;
635 dentry_lru_add(dentry);
636
637 dentry->d_lockref.count--;
638 spin_unlock(&dentry->d_lock);
639 return;
640
641kill_it:
642 dentry = dentry_kill(dentry, 1);
643 if (dentry)
644 goto repeat;
645}
646EXPORT_SYMBOL(dput);
647
648
649
650
651
652
653
654
655
656
657
658
659
660int d_invalidate(struct dentry * dentry)
661{
662
663
664
665 spin_lock(&dentry->d_lock);
666 if (d_unhashed(dentry)) {
667 spin_unlock(&dentry->d_lock);
668 return 0;
669 }
670
671
672
673
674 if (!list_empty(&dentry->d_subdirs)) {
675 spin_unlock(&dentry->d_lock);
676 shrink_dcache_parent(dentry);
677 spin_lock(&dentry->d_lock);
678 }
679
680
681
682
683
684
685
686
687
688
689
690
691
692 if (dentry->d_lockref.count > 1 && dentry->d_inode) {
693 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
694 spin_unlock(&dentry->d_lock);
695 return -EBUSY;
696 }
697 }
698
699 __d_drop(dentry);
700 spin_unlock(&dentry->d_lock);
701 return 0;
702}
703EXPORT_SYMBOL(d_invalidate);
704
705
706static inline void __dget_dlock(struct dentry *dentry)
707{
708 dentry->d_lockref.count++;
709}
710
711static inline void __dget(struct dentry *dentry)
712{
713 lockref_get(&dentry->d_lockref);
714}
715
716struct dentry *dget_parent(struct dentry *dentry)
717{
718 int gotref;
719 struct dentry *ret;
720
721
722
723
724
725 rcu_read_lock();
726 ret = ACCESS_ONCE(dentry->d_parent);
727 gotref = lockref_get_not_zero(&ret->d_lockref);
728 rcu_read_unlock();
729 if (likely(gotref)) {
730 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
731 return ret;
732 dput(ret);
733 }
734
735repeat:
736
737
738
739
740 rcu_read_lock();
741 ret = dentry->d_parent;
742 spin_lock(&ret->d_lock);
743 if (unlikely(ret != dentry->d_parent)) {
744 spin_unlock(&ret->d_lock);
745 rcu_read_unlock();
746 goto repeat;
747 }
748 rcu_read_unlock();
749 BUG_ON(!ret->d_lockref.count);
750 ret->d_lockref.count++;
751 spin_unlock(&ret->d_lock);
752 return ret;
753}
754EXPORT_SYMBOL(dget_parent);
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
773{
774 struct dentry *alias, *discon_alias;
775
776again:
777 discon_alias = NULL;
778 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
779 spin_lock(&alias->d_lock);
780 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
781 if (IS_ROOT(alias) &&
782 (alias->d_flags & DCACHE_DISCONNECTED)) {
783 discon_alias = alias;
784 } else if (!want_discon) {
785 __dget_dlock(alias);
786 spin_unlock(&alias->d_lock);
787 return alias;
788 }
789 }
790 spin_unlock(&alias->d_lock);
791 }
792 if (discon_alias) {
793 alias = discon_alias;
794 spin_lock(&alias->d_lock);
795 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
796 if (IS_ROOT(alias) &&
797 (alias->d_flags & DCACHE_DISCONNECTED)) {
798 __dget_dlock(alias);
799 spin_unlock(&alias->d_lock);
800 return alias;
801 }
802 }
803 spin_unlock(&alias->d_lock);
804 goto again;
805 }
806 return NULL;
807}
808
809struct dentry *d_find_alias(struct inode *inode)
810{
811 struct dentry *de = NULL;
812
813 if (!hlist_empty(&inode->i_dentry)) {
814 spin_lock(&inode->i_lock);
815 de = __d_find_alias(inode, 0);
816 spin_unlock(&inode->i_lock);
817 }
818 return de;
819}
820EXPORT_SYMBOL(d_find_alias);
821
822
823
824
825
826void d_prune_aliases(struct inode *inode)
827{
828 struct dentry *dentry;
829restart:
830 spin_lock(&inode->i_lock);
831 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
832 spin_lock(&dentry->d_lock);
833 if (!dentry->d_lockref.count) {
834
835
836
837
838 if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
839 !d_unhashed(dentry))
840 dentry->d_op->d_prune(dentry);
841
842 __dget_dlock(dentry);
843 __d_drop(dentry);
844 spin_unlock(&dentry->d_lock);
845 spin_unlock(&inode->i_lock);
846 dput(dentry);
847 goto restart;
848 }
849 spin_unlock(&dentry->d_lock);
850 }
851 spin_unlock(&inode->i_lock);
852}
853EXPORT_SYMBOL(d_prune_aliases);
854
855
856
857
858
859
860
861
862static struct dentry * try_prune_one_dentry(struct dentry *dentry)
863 __releases(dentry->d_lock)
864{
865 struct dentry *parent;
866
867 parent = dentry_kill(dentry, 0);
868
869
870
871
872
873
874
875
876
877
878 if (!parent)
879 return NULL;
880 if (parent == dentry)
881 return dentry;
882
883
884 dentry = parent;
885 while (dentry) {
886 if (lockref_put_or_lock(&dentry->d_lockref))
887 return NULL;
888 dentry = dentry_kill(dentry, 1);
889 }
890 return NULL;
891}
892
893static void shrink_dentry_list(struct list_head *list)
894{
895 struct dentry *dentry;
896
897 rcu_read_lock();
898 for (;;) {
899 dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
900 if (&dentry->d_lru == list)
901 break;
902
903
904
905
906
907
908 spin_lock(&dentry->d_lock);
909 if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
910 spin_unlock(&dentry->d_lock);
911 continue;
912 }
913
914
915
916
917
918
919 d_shrink_del(dentry);
920
921
922
923
924
925 if (dentry->d_lockref.count) {
926 spin_unlock(&dentry->d_lock);
927 continue;
928 }
929 rcu_read_unlock();
930
931
932
933
934
935
936
937
938
939
940 dentry = try_prune_one_dentry(dentry);
941
942 rcu_read_lock();
943 if (dentry) {
944 d_shrink_add(dentry, list);
945 spin_unlock(&dentry->d_lock);
946 }
947 }
948 rcu_read_unlock();
949}
950
951static enum lru_status
952dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
953{
954 struct list_head *freeable = arg;
955 struct dentry *dentry = container_of(item, struct dentry, d_lru);
956
957
958
959
960
961
962
963 if (!spin_trylock(&dentry->d_lock))
964 return LRU_SKIP;
965
966
967
968
969
970
971 if (dentry->d_lockref.count) {
972 d_lru_isolate(dentry);
973 spin_unlock(&dentry->d_lock);
974 return LRU_REMOVED;
975 }
976
977 if (dentry->d_flags & DCACHE_REFERENCED) {
978 dentry->d_flags &= ~DCACHE_REFERENCED;
979 spin_unlock(&dentry->d_lock);
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000 return LRU_ROTATE;
1001 }
1002
1003 d_lru_shrink_move(dentry, freeable);
1004 spin_unlock(&dentry->d_lock);
1005
1006 return LRU_REMOVED;
1007}
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
1023 int nid)
1024{
1025 LIST_HEAD(dispose);
1026 long freed;
1027
1028 freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate,
1029 &dispose, &nr_to_scan);
1030 shrink_dentry_list(&dispose);
1031 return freed;
1032}
1033
1034static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1035 spinlock_t *lru_lock, void *arg)
1036{
1037 struct list_head *freeable = arg;
1038 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1039
1040
1041
1042
1043
1044
1045 if (!spin_trylock(&dentry->d_lock))
1046 return LRU_SKIP;
1047
1048 d_lru_shrink_move(dentry, freeable);
1049 spin_unlock(&dentry->d_lock);
1050
1051 return LRU_REMOVED;
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062void shrink_dcache_sb(struct super_block *sb)
1063{
1064 long freed;
1065
1066 do {
1067 LIST_HEAD(dispose);
1068
1069 freed = list_lru_walk(&sb->s_dentry_lru,
1070 dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1071
1072 this_cpu_sub(nr_dentry_unused, freed);
1073 shrink_dentry_list(&dispose);
1074 } while (freed > 0);
1075}
1076EXPORT_SYMBOL(shrink_dcache_sb);
1077
1078
1079
1080
1081
1082
1083static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
1084{
1085 struct dentry *parent;
1086
1087 BUG_ON(!IS_ROOT(dentry));
1088
1089 for (;;) {
1090
1091 while (!list_empty(&dentry->d_subdirs))
1092 dentry = list_entry(dentry->d_subdirs.next,
1093 struct dentry, d_u.d_child);
1094
1095
1096
1097 do {
1098 struct inode *inode;
1099
1100
1101
1102
1103
1104 if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
1105 !d_unhashed(dentry))
1106 dentry->d_op->d_prune(dentry);
1107
1108 dentry_lru_del(dentry);
1109 __d_shrink(dentry);
1110
1111 if (dentry->d_lockref.count != 0) {
1112 printk(KERN_ERR
1113 "BUG: Dentry %p{i=%lx,n=%s}"
1114 " still in use (%d)"
1115 " [unmount of %s %s]\n",
1116 dentry,
1117 dentry->d_inode ?
1118 dentry->d_inode->i_ino : 0UL,
1119 dentry->d_name.name,
1120 dentry->d_lockref.count,
1121 dentry->d_sb->s_type->name,
1122 dentry->d_sb->s_id);
1123 BUG();
1124 }
1125
1126 if (IS_ROOT(dentry)) {
1127 parent = NULL;
1128 list_del(&dentry->d_u.d_child);
1129 } else {
1130 parent = dentry->d_parent;
1131 parent->d_lockref.count--;
1132 list_del(&dentry->d_u.d_child);
1133 }
1134
1135 inode = dentry->d_inode;
1136 if (inode) {
1137 dentry->d_inode = NULL;
1138 hlist_del_init(&dentry->d_alias);
1139 if (dentry->d_op && dentry->d_op->d_iput)
1140 dentry->d_op->d_iput(dentry, inode);
1141 else
1142 iput(inode);
1143 }
1144
1145 d_free(dentry);
1146
1147
1148
1149
1150 if (!parent)
1151 return;
1152 dentry = parent;
1153 } while (list_empty(&dentry->d_subdirs));
1154
1155 dentry = list_entry(dentry->d_subdirs.next,
1156 struct dentry, d_u.d_child);
1157 }
1158}
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170void shrink_dcache_for_umount(struct super_block *sb)
1171{
1172 struct dentry *dentry;
1173
1174 if (down_read_trylock(&sb->s_umount))
1175 BUG();
1176
1177 dentry = sb->s_root;
1178 sb->s_root = NULL;
1179 dentry->d_lockref.count--;
1180 shrink_dcache_for_umount_subtree(dentry);
1181
1182 while (!hlist_bl_empty(&sb->s_anon)) {
1183 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
1184 shrink_dcache_for_umount_subtree(dentry);
1185 }
1186}
1187
1188
1189
1190
1191
1192
1193
1194static struct dentry *try_to_ascend(struct dentry *old, unsigned seq)
1195{
1196 struct dentry *new = old->d_parent;
1197
1198 rcu_read_lock();
1199 spin_unlock(&old->d_lock);
1200 spin_lock(&new->d_lock);
1201
1202
1203
1204
1205
1206 if (new != old->d_parent ||
1207 (old->d_flags & DCACHE_DENTRY_KILLED) ||
1208 need_seqretry(&rename_lock, seq)) {
1209 spin_unlock(&new->d_lock);
1210 new = NULL;
1211 }
1212 rcu_read_unlock();
1213 return new;
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223enum d_walk_ret {
1224 D_WALK_CONTINUE,
1225 D_WALK_QUIT,
1226 D_WALK_NORETRY,
1227 D_WALK_SKIP,
1228};
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239static void d_walk(struct dentry *parent, void *data,
1240 enum d_walk_ret (*enter)(void *, struct dentry *),
1241 void (*finish)(void *))
1242{
1243 struct dentry *this_parent;
1244 struct list_head *next;
1245 unsigned seq = 0;
1246 enum d_walk_ret ret;
1247 bool retry = true;
1248
1249again:
1250 read_seqbegin_or_lock(&rename_lock, &seq);
1251 this_parent = parent;
1252 spin_lock(&this_parent->d_lock);
1253
1254 ret = enter(data, this_parent);
1255 switch (ret) {
1256 case D_WALK_CONTINUE:
1257 break;
1258 case D_WALK_QUIT:
1259 case D_WALK_SKIP:
1260 goto out_unlock;
1261 case D_WALK_NORETRY:
1262 retry = false;
1263 break;
1264 }
1265repeat:
1266 next = this_parent->d_subdirs.next;
1267resume:
1268 while (next != &this_parent->d_subdirs) {
1269 struct list_head *tmp = next;
1270 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1271 next = tmp->next;
1272
1273 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1274
1275 ret = enter(data, dentry);
1276 switch (ret) {
1277 case D_WALK_CONTINUE:
1278 break;
1279 case D_WALK_QUIT:
1280 spin_unlock(&dentry->d_lock);
1281 goto out_unlock;
1282 case D_WALK_NORETRY:
1283 retry = false;
1284 break;
1285 case D_WALK_SKIP:
1286 spin_unlock(&dentry->d_lock);
1287 continue;
1288 }
1289
1290 if (!list_empty(&dentry->d_subdirs)) {
1291 spin_unlock(&this_parent->d_lock);
1292 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1293 this_parent = dentry;
1294 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1295 goto repeat;
1296 }
1297 spin_unlock(&dentry->d_lock);
1298 }
1299
1300
1301
1302 if (this_parent != parent) {
1303 struct dentry *child = this_parent;
1304 this_parent = try_to_ascend(this_parent, seq);
1305 if (!this_parent)
1306 goto rename_retry;
1307 next = child->d_u.d_child.next;
1308 goto resume;
1309 }
1310 if (need_seqretry(&rename_lock, seq)) {
1311 spin_unlock(&this_parent->d_lock);
1312 goto rename_retry;
1313 }
1314 if (finish)
1315 finish(data);
1316
1317out_unlock:
1318 spin_unlock(&this_parent->d_lock);
1319 done_seqretry(&rename_lock, seq);
1320 return;
1321
1322rename_retry:
1323 if (!retry)
1324 return;
1325 seq = 1;
1326 goto again;
1327}
1328
1329
1330
1331
1332
1333
1334
1335static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1336{
1337 int *ret = data;
1338 if (d_mountpoint(dentry)) {
1339 *ret = 1;
1340 return D_WALK_QUIT;
1341 }
1342 return D_WALK_CONTINUE;
1343}
1344
1345
1346
1347
1348
1349
1350
1351
1352int have_submounts(struct dentry *parent)
1353{
1354 int ret = 0;
1355
1356 d_walk(parent, &ret, check_mount, NULL);
1357
1358 return ret;
1359}
1360EXPORT_SYMBOL(have_submounts);
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370int d_set_mounted(struct dentry *dentry)
1371{
1372 struct dentry *p;
1373 int ret = -ENOENT;
1374 write_seqlock(&rename_lock);
1375 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1376
1377 spin_lock(&p->d_lock);
1378 if (unlikely(d_unhashed(p))) {
1379 spin_unlock(&p->d_lock);
1380 goto out;
1381 }
1382 spin_unlock(&p->d_lock);
1383 }
1384 spin_lock(&dentry->d_lock);
1385 if (!d_unlinked(dentry)) {
1386 dentry->d_flags |= DCACHE_MOUNTED;
1387 ret = 0;
1388 }
1389 spin_unlock(&dentry->d_lock);
1390out:
1391 write_sequnlock(&rename_lock);
1392 return ret;
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410struct select_data {
1411 struct dentry *start;
1412 struct list_head dispose;
1413 int found;
1414};
1415
1416static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1417{
1418 struct select_data *data = _data;
1419 enum d_walk_ret ret = D_WALK_CONTINUE;
1420
1421 if (data->start == dentry)
1422 goto out;
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432 if (dentry->d_lockref.count) {
1433 dentry_lru_del(dentry);
1434 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1435
1436
1437
1438
1439
1440 d_lru_del(dentry);
1441 d_shrink_add(dentry, &data->dispose);
1442 data->found++;
1443 ret = D_WALK_NORETRY;
1444 }
1445
1446
1447
1448
1449
1450 if (data->found && need_resched())
1451 ret = D_WALK_QUIT;
1452out:
1453 return ret;
1454}
1455
1456
1457
1458
1459
1460
1461
1462void shrink_dcache_parent(struct dentry *parent)
1463{
1464 for (;;) {
1465 struct select_data data;
1466
1467 INIT_LIST_HEAD(&data.dispose);
1468 data.start = parent;
1469 data.found = 0;
1470
1471 d_walk(parent, &data, select_collect, NULL);
1472 if (!data.found)
1473 break;
1474
1475 shrink_dentry_list(&data.dispose);
1476 cond_resched();
1477 }
1478}
1479EXPORT_SYMBOL(shrink_dcache_parent);
1480
1481static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry)
1482{
1483 struct select_data *data = _data;
1484
1485 if (d_mountpoint(dentry)) {
1486 data->found = -EBUSY;
1487 return D_WALK_QUIT;
1488 }
1489
1490 return select_collect(_data, dentry);
1491}
1492
1493static void check_and_drop(void *_data)
1494{
1495 struct select_data *data = _data;
1496
1497 if (d_mountpoint(data->start))
1498 data->found = -EBUSY;
1499 if (!data->found)
1500 __d_drop(data->start);
1501}
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512int check_submounts_and_drop(struct dentry *dentry)
1513{
1514 int ret = 0;
1515
1516
1517 if (!dentry->d_inode) {
1518 d_drop(dentry);
1519 goto out;
1520 }
1521
1522 for (;;) {
1523 struct select_data data;
1524
1525 INIT_LIST_HEAD(&data.dispose);
1526 data.start = dentry;
1527 data.found = 0;
1528
1529 d_walk(dentry, &data, check_and_collect, check_and_drop);
1530 ret = data.found;
1531
1532 if (!list_empty(&data.dispose))
1533 shrink_dentry_list(&data.dispose);
1534
1535 if (ret <= 0)
1536 break;
1537
1538 cond_resched();
1539 }
1540
1541out:
1542 return ret;
1543}
1544EXPORT_SYMBOL(check_submounts_and_drop);
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1557{
1558 struct dentry *dentry;
1559 char *dname;
1560
1561 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1562 if (!dentry)
1563 return NULL;
1564
1565
1566
1567
1568
1569
1570
1571 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1572 if (name->len > DNAME_INLINE_LEN-1) {
1573 dname = kmalloc(name->len + 1, GFP_KERNEL);
1574 if (!dname) {
1575 kmem_cache_free(dentry_cache, dentry);
1576 return NULL;
1577 }
1578 } else {
1579 dname = dentry->d_iname;
1580 }
1581
1582 dentry->d_name.len = name->len;
1583 dentry->d_name.hash = name->hash;
1584 memcpy(dname, name->name, name->len);
1585 dname[name->len] = 0;
1586
1587
1588 smp_wmb();
1589 dentry->d_name.name = dname;
1590
1591 dentry->d_lockref.count = 1;
1592 dentry->d_flags = 0;
1593 spin_lock_init(&dentry->d_lock);
1594 seqcount_init(&dentry->d_seq);
1595 dentry->d_inode = NULL;
1596 dentry->d_parent = dentry;
1597 dentry->d_sb = sb;
1598 dentry->d_op = NULL;
1599 dentry->d_fsdata = NULL;
1600 INIT_HLIST_BL_NODE(&dentry->d_hash);
1601 INIT_LIST_HEAD(&dentry->d_lru);
1602 INIT_LIST_HEAD(&dentry->d_subdirs);
1603 INIT_HLIST_NODE(&dentry->d_alias);
1604 INIT_LIST_HEAD(&dentry->d_u.d_child);
1605 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1606
1607 this_cpu_inc(nr_dentry);
1608
1609 return dentry;
1610}
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1622{
1623 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1624 if (!dentry)
1625 return NULL;
1626
1627 spin_lock(&parent->d_lock);
1628
1629
1630
1631
1632 __dget_dlock(parent);
1633 dentry->d_parent = parent;
1634 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1635 spin_unlock(&parent->d_lock);
1636
1637 return dentry;
1638}
1639EXPORT_SYMBOL(d_alloc);
1640
1641struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1642{
1643 struct dentry *dentry = __d_alloc(sb, name);
1644 if (dentry)
1645 dentry->d_flags |= DCACHE_DISCONNECTED;
1646 return dentry;
1647}
1648EXPORT_SYMBOL(d_alloc_pseudo);
1649
1650struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1651{
1652 struct qstr q;
1653
1654 q.name = name;
1655 q.len = strlen(name);
1656 q.hash = full_name_hash(q.name, q.len);
1657 return d_alloc(parent, &q);
1658}
1659EXPORT_SYMBOL(d_alloc_name);
1660
1661void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1662{
1663 WARN_ON_ONCE(dentry->d_op);
1664 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1665 DCACHE_OP_COMPARE |
1666 DCACHE_OP_REVALIDATE |
1667 DCACHE_OP_WEAK_REVALIDATE |
1668 DCACHE_OP_DELETE ));
1669 dentry->d_op = op;
1670 if (!op)
1671 return;
1672 if (op->d_hash)
1673 dentry->d_flags |= DCACHE_OP_HASH;
1674 if (op->d_compare)
1675 dentry->d_flags |= DCACHE_OP_COMPARE;
1676 if (op->d_revalidate)
1677 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1678 if (op->d_weak_revalidate)
1679 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1680 if (op->d_delete)
1681 dentry->d_flags |= DCACHE_OP_DELETE;
1682 if (op->d_prune)
1683 dentry->d_flags |= DCACHE_OP_PRUNE;
1684
1685}
1686EXPORT_SYMBOL(d_set_d_op);
1687
1688static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1689{
1690 spin_lock(&dentry->d_lock);
1691 if (inode) {
1692 if (unlikely(IS_AUTOMOUNT(inode)))
1693 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
1694 hlist_add_head(&dentry->d_alias, &inode->i_dentry);
1695 }
1696 dentry->d_inode = inode;
1697 dentry_rcuwalk_barrier(dentry);
1698 spin_unlock(&dentry->d_lock);
1699 fsnotify_d_instantiate(dentry, inode);
1700}
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717void d_instantiate(struct dentry *entry, struct inode * inode)
1718{
1719 BUG_ON(!hlist_unhashed(&entry->d_alias));
1720 if (inode)
1721 spin_lock(&inode->i_lock);
1722 __d_instantiate(entry, inode);
1723 if (inode)
1724 spin_unlock(&inode->i_lock);
1725 security_d_instantiate(entry, inode);
1726}
1727EXPORT_SYMBOL(d_instantiate);
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745static struct dentry *__d_instantiate_unique(struct dentry *entry,
1746 struct inode *inode)
1747{
1748 struct dentry *alias;
1749 int len = entry->d_name.len;
1750 const char *name = entry->d_name.name;
1751 unsigned int hash = entry->d_name.hash;
1752
1753 if (!inode) {
1754 __d_instantiate(entry, NULL);
1755 return NULL;
1756 }
1757
1758 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1759
1760
1761
1762
1763
1764 if (alias->d_name.hash != hash)
1765 continue;
1766 if (alias->d_parent != entry->d_parent)
1767 continue;
1768 if (alias->d_name.len != len)
1769 continue;
1770 if (dentry_cmp(alias, name, len))
1771 continue;
1772 __dget(alias);
1773 return alias;
1774 }
1775
1776 __d_instantiate(entry, inode);
1777 return NULL;
1778}
1779
1780struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1781{
1782 struct dentry *result;
1783
1784 BUG_ON(!hlist_unhashed(&entry->d_alias));
1785
1786 if (inode)
1787 spin_lock(&inode->i_lock);
1788 result = __d_instantiate_unique(entry, inode);
1789 if (inode)
1790 spin_unlock(&inode->i_lock);
1791
1792 if (!result) {
1793 security_d_instantiate(entry, inode);
1794 return NULL;
1795 }
1796
1797 BUG_ON(!d_unhashed(result));
1798 iput(inode);
1799 return result;
1800}
1801
1802EXPORT_SYMBOL(d_instantiate_unique);
1803
1804struct dentry *d_make_root(struct inode *root_inode)
1805{
1806 struct dentry *res = NULL;
1807
1808 if (root_inode) {
1809 static const struct qstr name = QSTR_INIT("/", 1);
1810
1811 res = __d_alloc(root_inode->i_sb, &name);
1812 if (res)
1813 d_instantiate(res, root_inode);
1814 else
1815 iput(root_inode);
1816 }
1817 return res;
1818}
1819EXPORT_SYMBOL(d_make_root);
1820
1821static struct dentry * __d_find_any_alias(struct inode *inode)
1822{
1823 struct dentry *alias;
1824
1825 if (hlist_empty(&inode->i_dentry))
1826 return NULL;
1827 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1828 __dget(alias);
1829 return alias;
1830}
1831
1832
1833
1834
1835
1836
1837
1838
1839struct dentry *d_find_any_alias(struct inode *inode)
1840{
1841 struct dentry *de;
1842
1843 spin_lock(&inode->i_lock);
1844 de = __d_find_any_alias(inode);
1845 spin_unlock(&inode->i_lock);
1846 return de;
1847}
1848EXPORT_SYMBOL(d_find_any_alias);
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868struct dentry *d_obtain_alias(struct inode *inode)
1869{
1870 static const struct qstr anonstring = QSTR_INIT("/", 1);
1871 struct dentry *tmp;
1872 struct dentry *res;
1873
1874 if (!inode)
1875 return ERR_PTR(-ESTALE);
1876 if (IS_ERR(inode))
1877 return ERR_CAST(inode);
1878
1879 res = d_find_any_alias(inode);
1880 if (res)
1881 goto out_iput;
1882
1883 tmp = __d_alloc(inode->i_sb, &anonstring);
1884 if (!tmp) {
1885 res = ERR_PTR(-ENOMEM);
1886 goto out_iput;
1887 }
1888
1889 spin_lock(&inode->i_lock);
1890 res = __d_find_any_alias(inode);
1891 if (res) {
1892 spin_unlock(&inode->i_lock);
1893 dput(tmp);
1894 goto out_iput;
1895 }
1896
1897
1898 spin_lock(&tmp->d_lock);
1899 tmp->d_inode = inode;
1900 tmp->d_flags |= DCACHE_DISCONNECTED;
1901 hlist_add_head(&tmp->d_alias, &inode->i_dentry);
1902 hlist_bl_lock(&tmp->d_sb->s_anon);
1903 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1904 hlist_bl_unlock(&tmp->d_sb->s_anon);
1905 spin_unlock(&tmp->d_lock);
1906 spin_unlock(&inode->i_lock);
1907 security_d_instantiate(tmp, inode);
1908
1909 return tmp;
1910
1911 out_iput:
1912 if (res && !IS_ERR(res))
1913 security_d_instantiate(res, inode);
1914 iput(inode);
1915 return res;
1916}
1917EXPORT_SYMBOL(d_obtain_alias);
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1940{
1941 struct dentry *new = NULL;
1942
1943 if (IS_ERR(inode))
1944 return ERR_CAST(inode);
1945
1946 if (inode && S_ISDIR(inode->i_mode)) {
1947 spin_lock(&inode->i_lock);
1948 new = __d_find_alias(inode, 1);
1949 if (new) {
1950 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1951 spin_unlock(&inode->i_lock);
1952 security_d_instantiate(new, inode);
1953 d_move(new, dentry);
1954 iput(inode);
1955 } else {
1956
1957 __d_instantiate(dentry, inode);
1958 spin_unlock(&inode->i_lock);
1959 security_d_instantiate(dentry, inode);
1960 d_rehash(dentry);
1961 }
1962 } else {
1963 d_instantiate(dentry, inode);
1964 if (d_unhashed(dentry))
1965 d_rehash(dentry);
1966 }
1967 return new;
1968}
1969EXPORT_SYMBOL(d_splice_alias);
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1988 struct qstr *name)
1989{
1990 struct dentry *found;
1991 struct dentry *new;
1992
1993
1994
1995
1996
1997 found = d_hash_and_lookup(dentry->d_parent, name);
1998 if (unlikely(IS_ERR(found)))
1999 goto err_out;
2000 if (!found) {
2001 new = d_alloc(dentry->d_parent, name);
2002 if (!new) {
2003 found = ERR_PTR(-ENOMEM);
2004 goto err_out;
2005 }
2006
2007 found = d_splice_alias(inode, new);
2008 if (found) {
2009 dput(new);
2010 return found;
2011 }
2012 return new;
2013 }
2014
2015
2016
2017
2018
2019
2020
2021 if (found->d_inode) {
2022 if (unlikely(found->d_inode != inode)) {
2023
2024 BUG_ON(!is_bad_inode(inode));
2025 BUG_ON(!is_bad_inode(found->d_inode));
2026 }
2027 iput(inode);
2028 return found;
2029 }
2030
2031
2032
2033
2034
2035 new = d_splice_alias(inode, found);
2036 if (new) {
2037 dput(found);
2038 found = new;
2039 }
2040 return found;
2041
2042err_out:
2043 iput(inode);
2044 return found;
2045}
2046EXPORT_SYMBOL(d_add_ci);
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062enum slow_d_compare {
2063 D_COMP_OK,
2064 D_COMP_NOMATCH,
2065 D_COMP_SEQRETRY,
2066};
2067
2068static noinline enum slow_d_compare slow_dentry_cmp(
2069 const struct dentry *parent,
2070 struct dentry *dentry,
2071 unsigned int seq,
2072 const struct qstr *name)
2073{
2074 int tlen = dentry->d_name.len;
2075 const char *tname = dentry->d_name.name;
2076
2077 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2078 cpu_relax();
2079 return D_COMP_SEQRETRY;
2080 }
2081 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2082 return D_COMP_NOMATCH;
2083 return D_COMP_OK;
2084}
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115struct dentry *__d_lookup_rcu(const struct dentry *parent,
2116 const struct qstr *name,
2117 unsigned *seqp)
2118{
2119 u64 hashlen = name->hash_len;
2120 const unsigned char *str = name->name;
2121 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
2122 struct hlist_bl_node *node;
2123 struct dentry *dentry;
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2146 unsigned seq;
2147
2148seqretry:
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163 seq = raw_seqcount_begin(&dentry->d_seq);
2164 if (dentry->d_parent != parent)
2165 continue;
2166 if (d_unhashed(dentry))
2167 continue;
2168
2169 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2170 if (dentry->d_name.hash != hashlen_hash(hashlen))
2171 continue;
2172 *seqp = seq;
2173 switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2174 case D_COMP_OK:
2175 return dentry;
2176 case D_COMP_NOMATCH:
2177 continue;
2178 default:
2179 goto seqretry;
2180 }
2181 }
2182
2183 if (dentry->d_name.hash_len != hashlen)
2184 continue;
2185 *seqp = seq;
2186 if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2187 return dentry;
2188 }
2189 return NULL;
2190}
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2204{
2205 struct dentry *dentry;
2206 unsigned seq;
2207
2208 do {
2209 seq = read_seqbegin(&rename_lock);
2210 dentry = __d_lookup(parent, name);
2211 if (dentry)
2212 break;
2213 } while (read_seqretry(&rename_lock, seq));
2214 return dentry;
2215}
2216EXPORT_SYMBOL(d_lookup);
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2234{
2235 unsigned int len = name->len;
2236 unsigned int hash = name->hash;
2237 const unsigned char *str = name->name;
2238 struct hlist_bl_head *b = d_hash(parent, hash);
2239 struct hlist_bl_node *node;
2240 struct dentry *found = NULL;
2241 struct dentry *dentry;
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263 rcu_read_lock();
2264
2265 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2266
2267 if (dentry->d_name.hash != hash)
2268 continue;
2269
2270 spin_lock(&dentry->d_lock);
2271 if (dentry->d_parent != parent)
2272 goto next;
2273 if (d_unhashed(dentry))
2274 goto next;
2275
2276
2277
2278
2279
2280 if (parent->d_flags & DCACHE_OP_COMPARE) {
2281 int tlen = dentry->d_name.len;
2282 const char *tname = dentry->d_name.name;
2283 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2284 goto next;
2285 } else {
2286 if (dentry->d_name.len != len)
2287 goto next;
2288 if (dentry_cmp(dentry, str, len))
2289 goto next;
2290 }
2291
2292 dentry->d_lockref.count++;
2293 found = dentry;
2294 spin_unlock(&dentry->d_lock);
2295 break;
2296next:
2297 spin_unlock(&dentry->d_lock);
2298 }
2299 rcu_read_unlock();
2300
2301 return found;
2302}
2303
2304
2305
2306
2307
2308
2309
2310
2311struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2312{
2313
2314
2315
2316
2317
2318 name->hash = full_name_hash(name->name, name->len);
2319 if (dir->d_flags & DCACHE_OP_HASH) {
2320 int err = dir->d_op->d_hash(dir, name);
2321 if (unlikely(err < 0))
2322 return ERR_PTR(err);
2323 }
2324 return d_lookup(dir, name);
2325}
2326EXPORT_SYMBOL(d_hash_and_lookup);
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339int d_validate(struct dentry *dentry, struct dentry *dparent)
2340{
2341 struct dentry *child;
2342
2343 spin_lock(&dparent->d_lock);
2344 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2345 if (dentry == child) {
2346 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2347 __dget_dlock(dentry);
2348 spin_unlock(&dentry->d_lock);
2349 spin_unlock(&dparent->d_lock);
2350 return 1;
2351 }
2352 }
2353 spin_unlock(&dparent->d_lock);
2354
2355 return 0;
2356}
2357EXPORT_SYMBOL(d_validate);
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380void d_delete(struct dentry * dentry)
2381{
2382 struct inode *inode;
2383 int isdir = 0;
2384
2385
2386
2387again:
2388 spin_lock(&dentry->d_lock);
2389 inode = dentry->d_inode;
2390 isdir = S_ISDIR(inode->i_mode);
2391 if (dentry->d_lockref.count == 1) {
2392 if (!spin_trylock(&inode->i_lock)) {
2393 spin_unlock(&dentry->d_lock);
2394 cpu_relax();
2395 goto again;
2396 }
2397 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2398 dentry_unlink_inode(dentry);
2399 fsnotify_nameremove(dentry, isdir);
2400 return;
2401 }
2402
2403 if (!d_unhashed(dentry))
2404 __d_drop(dentry);
2405
2406 spin_unlock(&dentry->d_lock);
2407
2408 fsnotify_nameremove(dentry, isdir);
2409}
2410EXPORT_SYMBOL(d_delete);
2411
2412static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2413{
2414 BUG_ON(!d_unhashed(entry));
2415 hlist_bl_lock(b);
2416 entry->d_flags |= DCACHE_RCUACCESS;
2417 hlist_bl_add_head_rcu(&entry->d_hash, b);
2418 hlist_bl_unlock(b);
2419}
2420
2421static void _d_rehash(struct dentry * entry)
2422{
2423 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2424}
2425
2426
2427
2428
2429
2430
2431
2432
2433void d_rehash(struct dentry * entry)
2434{
2435 spin_lock(&entry->d_lock);
2436 _d_rehash(entry);
2437 spin_unlock(&entry->d_lock);
2438}
2439EXPORT_SYMBOL(d_rehash);
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2456{
2457 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2458 BUG_ON(dentry->d_name.len != name->len);
2459
2460 spin_lock(&dentry->d_lock);
2461 write_seqcount_begin(&dentry->d_seq);
2462 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2463 write_seqcount_end(&dentry->d_seq);
2464 spin_unlock(&dentry->d_lock);
2465}
2466EXPORT_SYMBOL(dentry_update_name_case);
2467
2468static void switch_names(struct dentry *dentry, struct dentry *target)
2469{
2470 if (dname_external(target)) {
2471 if (dname_external(dentry)) {
2472
2473
2474
2475 swap(target->d_name.name, dentry->d_name.name);
2476 } else {
2477
2478
2479
2480
2481 memcpy(target->d_iname, dentry->d_name.name,
2482 dentry->d_name.len + 1);
2483 dentry->d_name.name = target->d_name.name;
2484 target->d_name.name = target->d_iname;
2485 }
2486 } else {
2487 if (dname_external(dentry)) {
2488
2489
2490
2491
2492 memcpy(dentry->d_iname, target->d_name.name,
2493 target->d_name.len + 1);
2494 target->d_name.name = dentry->d_name.name;
2495 dentry->d_name.name = dentry->d_iname;
2496 } else {
2497
2498
2499
2500 memcpy(dentry->d_iname, target->d_name.name,
2501 target->d_name.len + 1);
2502 dentry->d_name.len = target->d_name.len;
2503 return;
2504 }
2505 }
2506 swap(dentry->d_name.len, target->d_name.len);
2507}
2508
2509static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2510{
2511
2512
2513
2514 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2515 spin_lock(&target->d_parent->d_lock);
2516 else {
2517 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2518 spin_lock(&dentry->d_parent->d_lock);
2519 spin_lock_nested(&target->d_parent->d_lock,
2520 DENTRY_D_LOCK_NESTED);
2521 } else {
2522 spin_lock(&target->d_parent->d_lock);
2523 spin_lock_nested(&dentry->d_parent->d_lock,
2524 DENTRY_D_LOCK_NESTED);
2525 }
2526 }
2527 if (target < dentry) {
2528 spin_lock_nested(&target->d_lock, 2);
2529 spin_lock_nested(&dentry->d_lock, 3);
2530 } else {
2531 spin_lock_nested(&dentry->d_lock, 2);
2532 spin_lock_nested(&target->d_lock, 3);
2533 }
2534}
2535
2536static void dentry_unlock_parents_for_move(struct dentry *dentry,
2537 struct dentry *target)
2538{
2539 if (target->d_parent != dentry->d_parent)
2540 spin_unlock(&dentry->d_parent->d_lock);
2541 if (target->d_parent != target)
2542 spin_unlock(&target->d_parent->d_lock);
2543}
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566static void __d_move(struct dentry * dentry, struct dentry * target)
2567{
2568 if (!dentry->d_inode)
2569 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2570
2571 BUG_ON(d_ancestor(dentry, target));
2572 BUG_ON(d_ancestor(target, dentry));
2573
2574 dentry_lock_for_move(dentry, target);
2575
2576 write_seqcount_begin(&dentry->d_seq);
2577 write_seqcount_begin(&target->d_seq);
2578
2579
2580
2581
2582
2583
2584
2585 __d_drop(dentry);
2586 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2587
2588
2589 __d_drop(target);
2590
2591 list_del(&dentry->d_u.d_child);
2592 list_del(&target->d_u.d_child);
2593
2594
2595 switch_names(dentry, target);
2596 swap(dentry->d_name.hash, target->d_name.hash);
2597
2598
2599 if (IS_ROOT(dentry)) {
2600 dentry->d_parent = target->d_parent;
2601 target->d_parent = target;
2602 INIT_LIST_HEAD(&target->d_u.d_child);
2603 } else {
2604 swap(dentry->d_parent, target->d_parent);
2605
2606
2607 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2608 }
2609
2610 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2611
2612 write_seqcount_end(&target->d_seq);
2613 write_seqcount_end(&dentry->d_seq);
2614
2615 dentry_unlock_parents_for_move(dentry, target);
2616 spin_unlock(&target->d_lock);
2617 fsnotify_d_move(dentry);
2618 spin_unlock(&dentry->d_lock);
2619}
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630void d_move(struct dentry *dentry, struct dentry *target)
2631{
2632 write_seqlock(&rename_lock);
2633 __d_move(dentry, target);
2634 write_sequnlock(&rename_lock);
2635}
2636EXPORT_SYMBOL(d_move);
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2647{
2648 struct dentry *p;
2649
2650 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2651 if (p->d_parent == p1)
2652 return p;
2653 }
2654 return NULL;
2655}
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666static struct dentry *__d_unalias(struct inode *inode,
2667 struct dentry *dentry, struct dentry *alias)
2668{
2669 struct mutex *m1 = NULL, *m2 = NULL;
2670 struct dentry *ret = ERR_PTR(-EBUSY);
2671
2672
2673 if (alias->d_parent == dentry->d_parent)
2674 goto out_unalias;
2675
2676
2677 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2678 goto out_err;
2679 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2680 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2681 goto out_err;
2682 m2 = &alias->d_parent->d_inode->i_mutex;
2683out_unalias:
2684 if (likely(!d_mountpoint(alias))) {
2685 __d_move(alias, dentry);
2686 ret = alias;
2687 }
2688out_err:
2689 spin_unlock(&inode->i_lock);
2690 if (m2)
2691 mutex_unlock(m2);
2692 if (m1)
2693 mutex_unlock(m1);
2694 return ret;
2695}
2696
2697
2698
2699
2700
2701
2702static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2703{
2704 struct dentry *dparent;
2705
2706 dentry_lock_for_move(anon, dentry);
2707
2708 write_seqcount_begin(&dentry->d_seq);
2709 write_seqcount_begin(&anon->d_seq);
2710
2711 dparent = dentry->d_parent;
2712
2713 switch_names(dentry, anon);
2714 swap(dentry->d_name.hash, anon->d_name.hash);
2715
2716 dentry->d_parent = dentry;
2717 list_del_init(&dentry->d_u.d_child);
2718 anon->d_parent = dparent;
2719 list_move(&anon->d_u.d_child, &dparent->d_subdirs);
2720
2721 write_seqcount_end(&dentry->d_seq);
2722 write_seqcount_end(&anon->d_seq);
2723
2724 dentry_unlock_parents_for_move(anon, dentry);
2725 spin_unlock(&dentry->d_lock);
2726
2727
2728 anon->d_flags &= ~DCACHE_DISCONNECTED;
2729}
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2741{
2742 struct dentry *actual;
2743
2744 BUG_ON(!d_unhashed(dentry));
2745
2746 if (!inode) {
2747 actual = dentry;
2748 __d_instantiate(dentry, NULL);
2749 d_rehash(actual);
2750 goto out_nolock;
2751 }
2752
2753 spin_lock(&inode->i_lock);
2754
2755 if (S_ISDIR(inode->i_mode)) {
2756 struct dentry *alias;
2757
2758
2759 alias = __d_find_alias(inode, 0);
2760 if (alias) {
2761 actual = alias;
2762 write_seqlock(&rename_lock);
2763
2764 if (d_ancestor(alias, dentry)) {
2765
2766 actual = ERR_PTR(-ELOOP);
2767 spin_unlock(&inode->i_lock);
2768 } else if (IS_ROOT(alias)) {
2769
2770
2771 __d_materialise_dentry(dentry, alias);
2772 write_sequnlock(&rename_lock);
2773 __d_drop(alias);
2774 goto found;
2775 } else {
2776
2777
2778 actual = __d_unalias(inode, dentry, alias);
2779 }
2780 write_sequnlock(&rename_lock);
2781 if (IS_ERR(actual)) {
2782 if (PTR_ERR(actual) == -ELOOP)
2783 pr_warn_ratelimited(
2784 "VFS: Lookup of '%s' in %s %s"
2785 " would have caused loop\n",
2786 dentry->d_name.name,
2787 inode->i_sb->s_type->name,
2788 inode->i_sb->s_id);
2789 dput(alias);
2790 }
2791 goto out_nolock;
2792 }
2793 }
2794
2795
2796 actual = __d_instantiate_unique(dentry, inode);
2797 if (!actual)
2798 actual = dentry;
2799 else
2800 BUG_ON(!d_unhashed(actual));
2801
2802 spin_lock(&actual->d_lock);
2803found:
2804 _d_rehash(actual);
2805 spin_unlock(&actual->d_lock);
2806 spin_unlock(&inode->i_lock);
2807out_nolock:
2808 if (actual == dentry) {
2809 security_d_instantiate(dentry, inode);
2810 return NULL;
2811 }
2812
2813 iput(inode);
2814 return actual;
2815}
2816EXPORT_SYMBOL_GPL(d_materialise_unique);
2817
2818static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2819{
2820 *buflen -= namelen;
2821 if (*buflen < 0)
2822 return -ENAMETOOLONG;
2823 *buffer -= namelen;
2824 memcpy(*buffer, str, namelen);
2825 return 0;
2826}
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2844{
2845 const char *dname = ACCESS_ONCE(name->name);
2846 u32 dlen = ACCESS_ONCE(name->len);
2847 char *p;
2848
2849 if (*buflen < dlen + 1)
2850 return -ENAMETOOLONG;
2851 *buflen -= dlen + 1;
2852 p = *buffer -= dlen + 1;
2853 *p++ = '/';
2854 while (dlen--) {
2855 char c = *dname++;
2856 if (!c)
2857 break;
2858 *p++ = c;
2859 }
2860 return 0;
2861}
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880static int prepend_path(const struct path *path,
2881 const struct path *root,
2882 char **buffer, int *buflen)
2883{
2884 struct dentry *dentry = path->dentry;
2885 struct vfsmount *vfsmnt = path->mnt;
2886 struct mount *mnt = real_mount(vfsmnt);
2887 int error = 0;
2888 unsigned seq = 0;
2889 char *bptr;
2890 int blen;
2891
2892 rcu_read_lock();
2893restart:
2894 bptr = *buffer;
2895 blen = *buflen;
2896 read_seqbegin_or_lock(&rename_lock, &seq);
2897 while (dentry != root->dentry || vfsmnt != root->mnt) {
2898 struct dentry * parent;
2899
2900 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2901
2902 if (mnt_has_parent(mnt)) {
2903 dentry = mnt->mnt_mountpoint;
2904 mnt = mnt->mnt_parent;
2905 vfsmnt = &mnt->mnt;
2906 continue;
2907 }
2908
2909
2910
2911
2912 if (IS_ROOT(dentry) &&
2913 (dentry->d_name.len != 1 ||
2914 dentry->d_name.name[0] != '/')) {
2915 WARN(1, "Root dentry has weird name <%.*s>\n",
2916 (int) dentry->d_name.len,
2917 dentry->d_name.name);
2918 }
2919 if (!error)
2920 error = is_mounted(vfsmnt) ? 1 : 2;
2921 break;
2922 }
2923 parent = dentry->d_parent;
2924 prefetch(parent);
2925 error = prepend_name(&bptr, &blen, &dentry->d_name);
2926 if (error)
2927 break;
2928
2929 dentry = parent;
2930 }
2931 if (!(seq & 1))
2932 rcu_read_unlock();
2933 if (need_seqretry(&rename_lock, seq)) {
2934 seq = 1;
2935 goto restart;
2936 }
2937 done_seqretry(&rename_lock, seq);
2938
2939 if (error >= 0 && bptr == *buffer) {
2940 if (--blen < 0)
2941 error = -ENAMETOOLONG;
2942 else
2943 *--bptr = '/';
2944 }
2945 *buffer = bptr;
2946 *buflen = blen;
2947 return error;
2948}
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966char *__d_path(const struct path *path,
2967 const struct path *root,
2968 char *buf, int buflen)
2969{
2970 char *res = buf + buflen;
2971 int error;
2972
2973 prepend(&res, &buflen, "\0", 1);
2974 br_read_lock(&vfsmount_lock);
2975 error = prepend_path(path, root, &res, &buflen);
2976 br_read_unlock(&vfsmount_lock);
2977
2978 if (error < 0)
2979 return ERR_PTR(error);
2980 if (error > 0)
2981 return NULL;
2982 return res;
2983}
2984
2985char *d_absolute_path(const struct path *path,
2986 char *buf, int buflen)
2987{
2988 struct path root = {};
2989 char *res = buf + buflen;
2990 int error;
2991
2992 prepend(&res, &buflen, "\0", 1);
2993 br_read_lock(&vfsmount_lock);
2994 error = prepend_path(path, &root, &res, &buflen);
2995 br_read_unlock(&vfsmount_lock);
2996
2997 if (error > 1)
2998 error = -EINVAL;
2999 if (error < 0)
3000 return ERR_PTR(error);
3001 return res;
3002}
3003
3004
3005
3006
3007static int path_with_deleted(const struct path *path,
3008 const struct path *root,
3009 char **buf, int *buflen)
3010{
3011 prepend(buf, buflen, "\0", 1);
3012 if (d_unlinked(path->dentry)) {
3013 int error = prepend(buf, buflen, " (deleted)", 10);
3014 if (error)
3015 return error;
3016 }
3017
3018 return prepend_path(path, root, buf, buflen);
3019}
3020
3021static int prepend_unreachable(char **buffer, int *buflen)
3022{
3023 return prepend(buffer, buflen, "(unreachable)", 13);
3024}
3025
3026static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3027{
3028 unsigned seq;
3029
3030 do {
3031 seq = read_seqcount_begin(&fs->seq);
3032 *root = fs->root;
3033 } while (read_seqcount_retry(&fs->seq, seq));
3034}
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052char *d_path(const struct path *path, char *buf, int buflen)
3053{
3054 char *res = buf + buflen;
3055 struct path root;
3056 int error;
3057
3058
3059
3060
3061
3062
3063
3064
3065 if (path->dentry->d_op && path->dentry->d_op->d_dname)
3066 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3067
3068 rcu_read_lock();
3069 get_fs_root_rcu(current->fs, &root);
3070 br_read_lock(&vfsmount_lock);
3071 error = path_with_deleted(path, &root, &res, &buflen);
3072 br_read_unlock(&vfsmount_lock);
3073 rcu_read_unlock();
3074
3075 if (error < 0)
3076 res = ERR_PTR(error);
3077 return res;
3078}
3079EXPORT_SYMBOL(d_path);
3080
3081
3082
3083
3084char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3085 const char *fmt, ...)
3086{
3087 va_list args;
3088 char temp[64];
3089 int sz;
3090
3091 va_start(args, fmt);
3092 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3093 va_end(args);
3094
3095 if (sz > sizeof(temp) || sz > buflen)
3096 return ERR_PTR(-ENAMETOOLONG);
3097
3098 buffer += buflen - sz;
3099 return memcpy(buffer, temp, sz);
3100}
3101
3102char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3103{
3104 char *end = buffer + buflen;
3105
3106 if (prepend(&end, &buflen, " (deleted)", 11) ||
3107 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3108 prepend(&end, &buflen, "/", 1))
3109 end = ERR_PTR(-ENAMETOOLONG);
3110 return end;
3111}
3112
3113
3114
3115
3116static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
3117{
3118 char *end, *retval;
3119 int len, seq = 0;
3120 int error = 0;
3121
3122 rcu_read_lock();
3123restart:
3124 end = buf + buflen;
3125 len = buflen;
3126 prepend(&end, &len, "\0", 1);
3127 if (buflen < 1)
3128 goto Elong;
3129
3130 retval = end-1;
3131 *retval = '/';
3132 read_seqbegin_or_lock(&rename_lock, &seq);
3133 while (!IS_ROOT(dentry)) {
3134 struct dentry *parent = dentry->d_parent;
3135 int error;
3136
3137 prefetch(parent);
3138 error = prepend_name(&end, &len, &dentry->d_name);
3139 if (error)
3140 break;
3141
3142 retval = end;
3143 dentry = parent;
3144 }
3145 if (!(seq & 1))
3146 rcu_read_unlock();
3147 if (need_seqretry(&rename_lock, seq)) {
3148 seq = 1;
3149 goto restart;
3150 }
3151 done_seqretry(&rename_lock, seq);
3152 if (error)
3153 goto Elong;
3154 return retval;
3155Elong:
3156 return ERR_PTR(-ENAMETOOLONG);
3157}
3158
3159char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3160{
3161 return __dentry_path(dentry, buf, buflen);
3162}
3163EXPORT_SYMBOL(dentry_path_raw);
3164
3165char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3166{
3167 char *p = NULL;
3168 char *retval;
3169
3170 if (d_unlinked(dentry)) {
3171 p = buf + buflen;
3172 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3173 goto Elong;
3174 buflen++;
3175 }
3176 retval = __dentry_path(dentry, buf, buflen);
3177 if (!IS_ERR(retval) && p)
3178 *p = '/';
3179 return retval;
3180Elong:
3181 return ERR_PTR(-ENAMETOOLONG);
3182}
3183
3184static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3185 struct path *pwd)
3186{
3187 unsigned seq;
3188
3189 do {
3190 seq = read_seqcount_begin(&fs->seq);
3191 *root = fs->root;
3192 *pwd = fs->pwd;
3193 } while (read_seqcount_retry(&fs->seq, seq));
3194}
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3215{
3216 int error;
3217 struct path pwd, root;
3218 char *page = __getname();
3219
3220 if (!page)
3221 return -ENOMEM;
3222
3223 rcu_read_lock();
3224 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3225
3226 error = -ENOENT;
3227 br_read_lock(&vfsmount_lock);
3228 if (!d_unlinked(pwd.dentry)) {
3229 unsigned long len;
3230 char *cwd = page + PATH_MAX;
3231 int buflen = PATH_MAX;
3232
3233 prepend(&cwd, &buflen, "\0", 1);
3234 error = prepend_path(&pwd, &root, &cwd, &buflen);
3235 br_read_unlock(&vfsmount_lock);
3236 rcu_read_unlock();
3237
3238 if (error < 0)
3239 goto out;
3240
3241
3242 if (error > 0) {
3243 error = prepend_unreachable(&cwd, &buflen);
3244 if (error)
3245 goto out;
3246 }
3247
3248 error = -ERANGE;
3249 len = PATH_MAX + page - cwd;
3250 if (len <= size) {
3251 error = len;
3252 if (copy_to_user(buf, cwd, len))
3253 error = -EFAULT;
3254 }
3255 } else {
3256 br_read_unlock(&vfsmount_lock);
3257 rcu_read_unlock();
3258 }
3259
3260out:
3261 __putname(page);
3262 return error;
3263}
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3282{
3283 int result;
3284 unsigned seq;
3285
3286 if (new_dentry == old_dentry)
3287 return 1;
3288
3289 do {
3290
3291 seq = read_seqbegin(&rename_lock);
3292
3293
3294
3295
3296 rcu_read_lock();
3297 if (d_ancestor(old_dentry, new_dentry))
3298 result = 1;
3299 else
3300 result = 0;
3301 rcu_read_unlock();
3302 } while (read_seqretry(&rename_lock, seq));
3303
3304 return result;
3305}
3306
3307static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3308{
3309 struct dentry *root = data;
3310 if (dentry != root) {
3311 if (d_unhashed(dentry) || !dentry->d_inode)
3312 return D_WALK_SKIP;
3313
3314 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3315 dentry->d_flags |= DCACHE_GENOCIDE;
3316 dentry->d_lockref.count--;
3317 }
3318 }
3319 return D_WALK_CONTINUE;
3320}
3321
3322void d_genocide(struct dentry *parent)
3323{
3324 d_walk(parent, parent, d_genocide_kill, NULL);
3325}
3326
3327void d_tmpfile(struct dentry *dentry, struct inode *inode)
3328{
3329 inode_dec_link_count(inode);
3330 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3331 !hlist_unhashed(&dentry->d_alias) ||
3332 !d_unlinked(dentry));
3333 spin_lock(&dentry->d_parent->d_lock);
3334 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3335 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3336 (unsigned long long)inode->i_ino);
3337 spin_unlock(&dentry->d_lock);
3338 spin_unlock(&dentry->d_parent->d_lock);
3339 d_instantiate(dentry, inode);
3340}
3341EXPORT_SYMBOL(d_tmpfile);
3342
3343static __initdata unsigned long dhash_entries;
3344static int __init set_dhash_entries(char *str)
3345{
3346 if (!str)
3347 return 0;
3348 dhash_entries = simple_strtoul(str, &str, 0);
3349 return 1;
3350}
3351__setup("dhash_entries=", set_dhash_entries);
3352
3353static void __init dcache_init_early(void)
3354{
3355 unsigned int loop;
3356
3357
3358
3359
3360 if (hashdist)
3361 return;
3362
3363 dentry_hashtable =
3364 alloc_large_system_hash("Dentry cache",
3365 sizeof(struct hlist_bl_head),
3366 dhash_entries,
3367 13,
3368 HASH_EARLY,
3369 &d_hash_shift,
3370 &d_hash_mask,
3371 0,
3372 0);
3373
3374 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3375 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3376}
3377
3378static void __init dcache_init(void)
3379{
3380 unsigned int loop;
3381
3382
3383
3384
3385
3386
3387 dentry_cache = KMEM_CACHE(dentry,
3388 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3389
3390
3391 if (!hashdist)
3392 return;
3393
3394 dentry_hashtable =
3395 alloc_large_system_hash("Dentry cache",
3396 sizeof(struct hlist_bl_head),
3397 dhash_entries,
3398 13,
3399 0,
3400 &d_hash_shift,
3401 &d_hash_mask,
3402 0,
3403 0);
3404
3405 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3406 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3407}
3408
3409
3410struct kmem_cache *names_cachep __read_mostly;
3411EXPORT_SYMBOL(names_cachep);
3412
3413EXPORT_SYMBOL(d_genocide);
3414
3415void __init vfs_caches_init_early(void)
3416{
3417 dcache_init_early();
3418 inode_init_early();
3419}
3420
3421void __init vfs_caches_init(unsigned long mempages)
3422{
3423 unsigned long reserve;
3424
3425
3426
3427
3428 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3429 mempages -= reserve;
3430
3431 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3432 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3433
3434 dcache_init();
3435 inode_init();
3436 files_init(mempages);
3437 mnt_init();
3438 bdev_cache_init();
3439 chrdev_init();
3440}
3441