1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/syscalls.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/fsnotify.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/hash.h>
25#include <linux/cache.h>
26#include <linux/export.h>
27#include <linux/mount.h>
28#include <linux/file.h>
29#include <asm/uaccess.h>
30#include <linux/security.h>
31#include <linux/seqlock.h>
32#include <linux/swap.h>
33#include <linux/bootmem.h>
34#include <linux/fs_struct.h>
35#include <linux/hardirq.h>
36#include <linux/bit_spinlock.h>
37#include <linux/rculist_bl.h>
38#include <linux/prefetch.h>
39#include <linux/ratelimit.h>
40#include "internal.h"
41#include "mount.h"
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81int sysctl_vfs_cache_pressure __read_mostly = 100;
82EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
83
84static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
85__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
86
87EXPORT_SYMBOL(rename_lock);
88
89static struct kmem_cache *dentry_cache __read_mostly;
90
91
92
93
94
95
96
97
98
99#define D_HASHBITS d_hash_shift
100#define D_HASHMASK d_hash_mask
101
102static unsigned int d_hash_mask __read_mostly;
103static unsigned int d_hash_shift __read_mostly;
104
105static struct hlist_bl_head *dentry_hashtable __read_mostly;
106
107static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
108 unsigned int hash)
109{
110 hash += (unsigned long) parent / L1_CACHE_BYTES;
111 hash = hash + (hash >> D_HASHBITS);
112 return dentry_hashtable + (hash & D_HASHMASK);
113}
114
115
116struct dentry_stat_t dentry_stat = {
117 .age_limit = 45,
118};
119
120static DEFINE_PER_CPU(unsigned int, nr_dentry);
121
122#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
123static int get_nr_dentry(void)
124{
125 int i;
126 int sum = 0;
127 for_each_possible_cpu(i)
128 sum += per_cpu(nr_dentry, i);
129 return sum < 0 ? 0 : sum;
130}
131
132int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
133 size_t *lenp, loff_t *ppos)
134{
135 dentry_stat.nr_dentry = get_nr_dentry();
136 return proc_dointvec(table, write, buffer, lenp, ppos);
137}
138#endif
139
140
141
142
143
144#ifdef CONFIG_DCACHE_WORD_ACCESS
145
146#include <asm/word-at-a-time.h>
147
148
149
150
151
152
153
154
155
156static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
157{
158 unsigned long a,b,mask;
159
160 for (;;) {
161 a = *(unsigned long *)cs;
162 b = load_unaligned_zeropad(ct);
163 if (tcount < sizeof(unsigned long))
164 break;
165 if (unlikely(a != b))
166 return 1;
167 cs += sizeof(unsigned long);
168 ct += sizeof(unsigned long);
169 tcount -= sizeof(unsigned long);
170 if (!tcount)
171 return 0;
172 }
173 mask = ~(~0ul << tcount*8);
174 return unlikely(!!((a ^ b) & mask));
175}
176
177#else
178
179static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
180{
181 do {
182 if (*cs != *ct)
183 return 1;
184 cs++;
185 ct++;
186 tcount--;
187 } while (tcount);
188 return 0;
189}
190
191#endif
192
193static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
194{
195 const unsigned char *cs;
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212 cs = ACCESS_ONCE(dentry->d_name.name);
213 smp_read_barrier_depends();
214 return dentry_string_cmp(cs, ct, tcount);
215}
216
217static void __d_free(struct rcu_head *head)
218{
219 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
220
221 WARN_ON(!hlist_unhashed(&dentry->d_alias));
222 if (dname_external(dentry))
223 kfree(dentry->d_name.name);
224 kmem_cache_free(dentry_cache, dentry);
225}
226
227
228
229
230static void d_free(struct dentry *dentry)
231{
232 BUG_ON(dentry->d_count);
233 this_cpu_dec(nr_dentry);
234 if (dentry->d_op && dentry->d_op->d_release)
235 dentry->d_op->d_release(dentry);
236
237
238 if (!(dentry->d_flags & DCACHE_RCUACCESS))
239 __d_free(&dentry->d_u.d_rcu);
240 else
241 call_rcu(&dentry->d_u.d_rcu, __d_free);
242}
243
244
245
246
247
248
249
250
251static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
252{
253 assert_spin_locked(&dentry->d_lock);
254
255 write_seqcount_barrier(&dentry->d_seq);
256}
257
258
259
260
261
262
263static void dentry_iput(struct dentry * dentry)
264 __releases(dentry->d_lock)
265 __releases(dentry->d_inode->i_lock)
266{
267 struct inode *inode = dentry->d_inode;
268 if (inode) {
269 dentry->d_inode = NULL;
270 hlist_del_init(&dentry->d_alias);
271 spin_unlock(&dentry->d_lock);
272 spin_unlock(&inode->i_lock);
273 if (!inode->i_nlink)
274 fsnotify_inoderemove(inode);
275 if (dentry->d_op && dentry->d_op->d_iput)
276 dentry->d_op->d_iput(dentry, inode);
277 else
278 iput(inode);
279 } else {
280 spin_unlock(&dentry->d_lock);
281 }
282}
283
284
285
286
287
288static void dentry_unlink_inode(struct dentry * dentry)
289 __releases(dentry->d_lock)
290 __releases(dentry->d_inode->i_lock)
291{
292 struct inode *inode = dentry->d_inode;
293 dentry->d_inode = NULL;
294 hlist_del_init(&dentry->d_alias);
295 dentry_rcuwalk_barrier(dentry);
296 spin_unlock(&dentry->d_lock);
297 spin_unlock(&inode->i_lock);
298 if (!inode->i_nlink)
299 fsnotify_inoderemove(inode);
300 if (dentry->d_op && dentry->d_op->d_iput)
301 dentry->d_op->d_iput(dentry, inode);
302 else
303 iput(inode);
304}
305
306
307
308
309static void dentry_lru_add(struct dentry *dentry)
310{
311 if (list_empty(&dentry->d_lru)) {
312 spin_lock(&dcache_lru_lock);
313 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
314 dentry->d_sb->s_nr_dentry_unused++;
315 dentry_stat.nr_unused++;
316 spin_unlock(&dcache_lru_lock);
317 }
318}
319
320static void __dentry_lru_del(struct dentry *dentry)
321{
322 list_del_init(&dentry->d_lru);
323 dentry->d_flags &= ~DCACHE_SHRINK_LIST;
324 dentry->d_sb->s_nr_dentry_unused--;
325 dentry_stat.nr_unused--;
326}
327
328
329
330
331static void dentry_lru_del(struct dentry *dentry)
332{
333 if (!list_empty(&dentry->d_lru)) {
334 spin_lock(&dcache_lru_lock);
335 __dentry_lru_del(dentry);
336 spin_unlock(&dcache_lru_lock);
337 }
338}
339
340static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
341{
342 spin_lock(&dcache_lru_lock);
343 if (list_empty(&dentry->d_lru)) {
344 list_add_tail(&dentry->d_lru, list);
345 dentry->d_sb->s_nr_dentry_unused++;
346 dentry_stat.nr_unused++;
347 } else {
348 list_move_tail(&dentry->d_lru, list);
349 }
350 spin_unlock(&dcache_lru_lock);
351}
352
353
354
355
356
357
358
359
360
361
362
363
364
365static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
366 __releases(dentry->d_lock)
367 __releases(parent->d_lock)
368 __releases(dentry->d_inode->i_lock)
369{
370 list_del(&dentry->d_u.d_child);
371
372
373
374
375 dentry->d_flags |= DCACHE_DENTRY_KILLED;
376 if (parent)
377 spin_unlock(&parent->d_lock);
378 dentry_iput(dentry);
379
380
381
382
383 d_free(dentry);
384 return parent;
385}
386
387
388
389
390
391
392static void __d_shrink(struct dentry *dentry)
393{
394 if (!d_unhashed(dentry)) {
395 struct hlist_bl_head *b;
396 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
397 b = &dentry->d_sb->s_anon;
398 else
399 b = d_hash(dentry->d_parent, dentry->d_name.hash);
400
401 hlist_bl_lock(b);
402 __hlist_bl_del(&dentry->d_hash);
403 dentry->d_hash.pprev = NULL;
404 hlist_bl_unlock(b);
405 }
406}
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423void __d_drop(struct dentry *dentry)
424{
425 if (!d_unhashed(dentry)) {
426 __d_shrink(dentry);
427 dentry_rcuwalk_barrier(dentry);
428 }
429}
430EXPORT_SYMBOL(__d_drop);
431
432void d_drop(struct dentry *dentry)
433{
434 spin_lock(&dentry->d_lock);
435 __d_drop(dentry);
436 spin_unlock(&dentry->d_lock);
437}
438EXPORT_SYMBOL(d_drop);
439
440
441
442
443
444
445
446static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
447 __releases(dentry->d_lock)
448{
449 struct inode *inode;
450 struct dentry *parent;
451
452 inode = dentry->d_inode;
453 if (inode && !spin_trylock(&inode->i_lock)) {
454relock:
455 spin_unlock(&dentry->d_lock);
456 cpu_relax();
457 return dentry;
458 }
459 if (IS_ROOT(dentry))
460 parent = NULL;
461 else
462 parent = dentry->d_parent;
463 if (parent && !spin_trylock(&parent->d_lock)) {
464 if (inode)
465 spin_unlock(&inode->i_lock);
466 goto relock;
467 }
468
469 if (ref)
470 dentry->d_count--;
471
472
473
474
475 if (dentry->d_flags & DCACHE_OP_PRUNE)
476 dentry->d_op->d_prune(dentry);
477
478 dentry_lru_del(dentry);
479
480 __d_drop(dentry);
481 return d_kill(dentry, parent);
482}
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510void dput(struct dentry *dentry)
511{
512 if (!dentry)
513 return;
514
515repeat:
516 if (dentry->d_count == 1)
517 might_sleep();
518 spin_lock(&dentry->d_lock);
519 BUG_ON(!dentry->d_count);
520 if (dentry->d_count > 1) {
521 dentry->d_count--;
522 spin_unlock(&dentry->d_lock);
523 return;
524 }
525
526 if (dentry->d_flags & DCACHE_OP_DELETE) {
527 if (dentry->d_op->d_delete(dentry))
528 goto kill_it;
529 }
530
531
532 if (d_unhashed(dentry))
533 goto kill_it;
534
535 dentry->d_flags |= DCACHE_REFERENCED;
536 dentry_lru_add(dentry);
537
538 dentry->d_count--;
539 spin_unlock(&dentry->d_lock);
540 return;
541
542kill_it:
543 dentry = dentry_kill(dentry, 1);
544 if (dentry)
545 goto repeat;
546}
547EXPORT_SYMBOL(dput);
548
549
550
551
552
553
554
555
556
557
558
559
560
561int d_invalidate(struct dentry * dentry)
562{
563
564
565
566 spin_lock(&dentry->d_lock);
567 if (d_unhashed(dentry)) {
568 spin_unlock(&dentry->d_lock);
569 return 0;
570 }
571
572
573
574
575 if (!list_empty(&dentry->d_subdirs)) {
576 spin_unlock(&dentry->d_lock);
577 shrink_dcache_parent(dentry);
578 spin_lock(&dentry->d_lock);
579 }
580
581
582
583
584
585
586
587
588
589
590
591
592
593 if (dentry->d_count > 1 && dentry->d_inode) {
594 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
595 spin_unlock(&dentry->d_lock);
596 return -EBUSY;
597 }
598 }
599
600 __d_drop(dentry);
601 spin_unlock(&dentry->d_lock);
602 return 0;
603}
604EXPORT_SYMBOL(d_invalidate);
605
606
607static inline void __dget_dlock(struct dentry *dentry)
608{
609 dentry->d_count++;
610}
611
612static inline void __dget(struct dentry *dentry)
613{
614 spin_lock(&dentry->d_lock);
615 __dget_dlock(dentry);
616 spin_unlock(&dentry->d_lock);
617}
618
619struct dentry *dget_parent(struct dentry *dentry)
620{
621 struct dentry *ret;
622
623repeat:
624
625
626
627
628 rcu_read_lock();
629 ret = dentry->d_parent;
630 spin_lock(&ret->d_lock);
631 if (unlikely(ret != dentry->d_parent)) {
632 spin_unlock(&ret->d_lock);
633 rcu_read_unlock();
634 goto repeat;
635 }
636 rcu_read_unlock();
637 BUG_ON(!ret->d_count);
638 ret->d_count++;
639 spin_unlock(&ret->d_lock);
640 return ret;
641}
642EXPORT_SYMBOL(dget_parent);
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
661{
662 struct dentry *alias, *discon_alias;
663
664again:
665 discon_alias = NULL;
666 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
667 spin_lock(&alias->d_lock);
668 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
669 if (IS_ROOT(alias) &&
670 (alias->d_flags & DCACHE_DISCONNECTED)) {
671 discon_alias = alias;
672 } else if (!want_discon) {
673 __dget_dlock(alias);
674 spin_unlock(&alias->d_lock);
675 return alias;
676 }
677 }
678 spin_unlock(&alias->d_lock);
679 }
680 if (discon_alias) {
681 alias = discon_alias;
682 spin_lock(&alias->d_lock);
683 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
684 if (IS_ROOT(alias) &&
685 (alias->d_flags & DCACHE_DISCONNECTED)) {
686 __dget_dlock(alias);
687 spin_unlock(&alias->d_lock);
688 return alias;
689 }
690 }
691 spin_unlock(&alias->d_lock);
692 goto again;
693 }
694 return NULL;
695}
696
697struct dentry *d_find_alias(struct inode *inode)
698{
699 struct dentry *de = NULL;
700
701 if (!hlist_empty(&inode->i_dentry)) {
702 spin_lock(&inode->i_lock);
703 de = __d_find_alias(inode, 0);
704 spin_unlock(&inode->i_lock);
705 }
706 return de;
707}
708EXPORT_SYMBOL(d_find_alias);
709
710
711
712
713
714void d_prune_aliases(struct inode *inode)
715{
716 struct dentry *dentry;
717restart:
718 spin_lock(&inode->i_lock);
719 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
720 spin_lock(&dentry->d_lock);
721 if (!dentry->d_count) {
722 __dget_dlock(dentry);
723 __d_drop(dentry);
724 spin_unlock(&dentry->d_lock);
725 spin_unlock(&inode->i_lock);
726 dput(dentry);
727 goto restart;
728 }
729 spin_unlock(&dentry->d_lock);
730 }
731 spin_unlock(&inode->i_lock);
732}
733EXPORT_SYMBOL(d_prune_aliases);
734
735
736
737
738
739
740
741
742static void try_prune_one_dentry(struct dentry *dentry)
743 __releases(dentry->d_lock)
744{
745 struct dentry *parent;
746
747 parent = dentry_kill(dentry, 0);
748
749
750
751
752
753
754
755
756
757
758 if (!parent)
759 return;
760 if (parent == dentry)
761 return;
762
763
764 dentry = parent;
765 while (dentry) {
766 spin_lock(&dentry->d_lock);
767 if (dentry->d_count > 1) {
768 dentry->d_count--;
769 spin_unlock(&dentry->d_lock);
770 return;
771 }
772 dentry = dentry_kill(dentry, 1);
773 }
774}
775
776static void shrink_dentry_list(struct list_head *list)
777{
778 struct dentry *dentry;
779
780 rcu_read_lock();
781 for (;;) {
782 dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
783 if (&dentry->d_lru == list)
784 break;
785 spin_lock(&dentry->d_lock);
786 if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
787 spin_unlock(&dentry->d_lock);
788 continue;
789 }
790
791
792
793
794
795
796 if (dentry->d_count) {
797 dentry_lru_del(dentry);
798 spin_unlock(&dentry->d_lock);
799 continue;
800 }
801
802 rcu_read_unlock();
803
804 try_prune_one_dentry(dentry);
805
806 rcu_read_lock();
807 }
808 rcu_read_unlock();
809}
810
811
812
813
814
815
816
817
818
819
820
821
822
823void prune_dcache_sb(struct super_block *sb, int count)
824{
825 struct dentry *dentry;
826 LIST_HEAD(referenced);
827 LIST_HEAD(tmp);
828
829relock:
830 spin_lock(&dcache_lru_lock);
831 while (!list_empty(&sb->s_dentry_lru)) {
832 dentry = list_entry(sb->s_dentry_lru.prev,
833 struct dentry, d_lru);
834 BUG_ON(dentry->d_sb != sb);
835
836 if (!spin_trylock(&dentry->d_lock)) {
837 spin_unlock(&dcache_lru_lock);
838 cpu_relax();
839 goto relock;
840 }
841
842 if (dentry->d_flags & DCACHE_REFERENCED) {
843 dentry->d_flags &= ~DCACHE_REFERENCED;
844 list_move(&dentry->d_lru, &referenced);
845 spin_unlock(&dentry->d_lock);
846 } else {
847 list_move_tail(&dentry->d_lru, &tmp);
848 dentry->d_flags |= DCACHE_SHRINK_LIST;
849 spin_unlock(&dentry->d_lock);
850 if (!--count)
851 break;
852 }
853 cond_resched_lock(&dcache_lru_lock);
854 }
855 if (!list_empty(&referenced))
856 list_splice(&referenced, &sb->s_dentry_lru);
857 spin_unlock(&dcache_lru_lock);
858
859 shrink_dentry_list(&tmp);
860}
861
862
863
864
865
866
867
868
869void shrink_dcache_sb(struct super_block *sb)
870{
871 LIST_HEAD(tmp);
872
873 spin_lock(&dcache_lru_lock);
874 while (!list_empty(&sb->s_dentry_lru)) {
875 list_splice_init(&sb->s_dentry_lru, &tmp);
876 spin_unlock(&dcache_lru_lock);
877 shrink_dentry_list(&tmp);
878 spin_lock(&dcache_lru_lock);
879 }
880 spin_unlock(&dcache_lru_lock);
881}
882EXPORT_SYMBOL(shrink_dcache_sb);
883
884
885
886
887
888
889static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
890{
891 struct dentry *parent;
892
893 BUG_ON(!IS_ROOT(dentry));
894
895 for (;;) {
896
897 while (!list_empty(&dentry->d_subdirs))
898 dentry = list_entry(dentry->d_subdirs.next,
899 struct dentry, d_u.d_child);
900
901
902
903 do {
904 struct inode *inode;
905
906
907
908
909
910 if (dentry->d_flags & DCACHE_OP_PRUNE)
911 dentry->d_op->d_prune(dentry);
912
913 dentry_lru_del(dentry);
914 __d_shrink(dentry);
915
916 if (dentry->d_count != 0) {
917 printk(KERN_ERR
918 "BUG: Dentry %p{i=%lx,n=%s}"
919 " still in use (%d)"
920 " [unmount of %s %s]\n",
921 dentry,
922 dentry->d_inode ?
923 dentry->d_inode->i_ino : 0UL,
924 dentry->d_name.name,
925 dentry->d_count,
926 dentry->d_sb->s_type->name,
927 dentry->d_sb->s_id);
928 BUG();
929 }
930
931 if (IS_ROOT(dentry)) {
932 parent = NULL;
933 list_del(&dentry->d_u.d_child);
934 } else {
935 parent = dentry->d_parent;
936 parent->d_count--;
937 list_del(&dentry->d_u.d_child);
938 }
939
940 inode = dentry->d_inode;
941 if (inode) {
942 dentry->d_inode = NULL;
943 hlist_del_init(&dentry->d_alias);
944 if (dentry->d_op && dentry->d_op->d_iput)
945 dentry->d_op->d_iput(dentry, inode);
946 else
947 iput(inode);
948 }
949
950 d_free(dentry);
951
952
953
954
955 if (!parent)
956 return;
957 dentry = parent;
958 } while (list_empty(&dentry->d_subdirs));
959
960 dentry = list_entry(dentry->d_subdirs.next,
961 struct dentry, d_u.d_child);
962 }
963}
964
965
966
967
968
969
970
971
972
973
974
975void shrink_dcache_for_umount(struct super_block *sb)
976{
977 struct dentry *dentry;
978
979 if (down_read_trylock(&sb->s_umount))
980 BUG();
981
982 dentry = sb->s_root;
983 sb->s_root = NULL;
984 dentry->d_count--;
985 shrink_dcache_for_umount_subtree(dentry);
986
987 while (!hlist_bl_empty(&sb->s_anon)) {
988 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
989 shrink_dcache_for_umount_subtree(dentry);
990 }
991}
992
993
994
995
996
997
998
999static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq)
1000{
1001 struct dentry *new = old->d_parent;
1002
1003 rcu_read_lock();
1004 spin_unlock(&old->d_lock);
1005 spin_lock(&new->d_lock);
1006
1007
1008
1009
1010
1011 if (new != old->d_parent ||
1012 (old->d_flags & DCACHE_DENTRY_KILLED) ||
1013 (!locked && read_seqretry(&rename_lock, seq))) {
1014 spin_unlock(&new->d_lock);
1015 new = NULL;
1016 }
1017 rcu_read_unlock();
1018 return new;
1019}
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035int have_submounts(struct dentry *parent)
1036{
1037 struct dentry *this_parent;
1038 struct list_head *next;
1039 unsigned seq;
1040 int locked = 0;
1041
1042 seq = read_seqbegin(&rename_lock);
1043again:
1044 this_parent = parent;
1045
1046 if (d_mountpoint(parent))
1047 goto positive;
1048 spin_lock(&this_parent->d_lock);
1049repeat:
1050 next = this_parent->d_subdirs.next;
1051resume:
1052 while (next != &this_parent->d_subdirs) {
1053 struct list_head *tmp = next;
1054 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1055 next = tmp->next;
1056
1057 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1058
1059 if (d_mountpoint(dentry)) {
1060 spin_unlock(&dentry->d_lock);
1061 spin_unlock(&this_parent->d_lock);
1062 goto positive;
1063 }
1064 if (!list_empty(&dentry->d_subdirs)) {
1065 spin_unlock(&this_parent->d_lock);
1066 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1067 this_parent = dentry;
1068 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1069 goto repeat;
1070 }
1071 spin_unlock(&dentry->d_lock);
1072 }
1073
1074
1075
1076 if (this_parent != parent) {
1077 struct dentry *child = this_parent;
1078 this_parent = try_to_ascend(this_parent, locked, seq);
1079 if (!this_parent)
1080 goto rename_retry;
1081 next = child->d_u.d_child.next;
1082 goto resume;
1083 }
1084 spin_unlock(&this_parent->d_lock);
1085 if (!locked && read_seqretry(&rename_lock, seq))
1086 goto rename_retry;
1087 if (locked)
1088 write_sequnlock(&rename_lock);
1089 return 0;
1090positive:
1091 if (!locked && read_seqretry(&rename_lock, seq))
1092 goto rename_retry;
1093 if (locked)
1094 write_sequnlock(&rename_lock);
1095 return 1;
1096
1097rename_retry:
1098 if (locked)
1099 goto again;
1100 locked = 1;
1101 write_seqlock(&rename_lock);
1102 goto again;
1103}
1104EXPORT_SYMBOL(have_submounts);
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120static int select_parent(struct dentry *parent, struct list_head *dispose)
1121{
1122 struct dentry *this_parent;
1123 struct list_head *next;
1124 unsigned seq;
1125 int found = 0;
1126 int locked = 0;
1127
1128 seq = read_seqbegin(&rename_lock);
1129again:
1130 this_parent = parent;
1131 spin_lock(&this_parent->d_lock);
1132repeat:
1133 next = this_parent->d_subdirs.next;
1134resume:
1135 while (next != &this_parent->d_subdirs) {
1136 struct list_head *tmp = next;
1137 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1138 next = tmp->next;
1139
1140 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150 if (dentry->d_count) {
1151 dentry_lru_del(dentry);
1152 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1153 dentry_lru_move_list(dentry, dispose);
1154 dentry->d_flags |= DCACHE_SHRINK_LIST;
1155 found++;
1156 }
1157
1158
1159
1160
1161
1162 if (found && need_resched()) {
1163 spin_unlock(&dentry->d_lock);
1164 goto out;
1165 }
1166
1167
1168
1169
1170 if (!list_empty(&dentry->d_subdirs)) {
1171 spin_unlock(&this_parent->d_lock);
1172 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1173 this_parent = dentry;
1174 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1175 goto repeat;
1176 }
1177
1178 spin_unlock(&dentry->d_lock);
1179 }
1180
1181
1182
1183 if (this_parent != parent) {
1184 struct dentry *child = this_parent;
1185 this_parent = try_to_ascend(this_parent, locked, seq);
1186 if (!this_parent)
1187 goto rename_retry;
1188 next = child->d_u.d_child.next;
1189 goto resume;
1190 }
1191out:
1192 spin_unlock(&this_parent->d_lock);
1193 if (!locked && read_seqretry(&rename_lock, seq))
1194 goto rename_retry;
1195 if (locked)
1196 write_sequnlock(&rename_lock);
1197 return found;
1198
1199rename_retry:
1200 if (found)
1201 return found;
1202 if (locked)
1203 goto again;
1204 locked = 1;
1205 write_seqlock(&rename_lock);
1206 goto again;
1207}
1208
1209
1210
1211
1212
1213
1214
1215void shrink_dcache_parent(struct dentry * parent)
1216{
1217 LIST_HEAD(dispose);
1218 int found;
1219
1220 while ((found = select_parent(parent, &dispose)) != 0) {
1221 shrink_dentry_list(&dispose);
1222 cond_resched();
1223 }
1224}
1225EXPORT_SYMBOL(shrink_dcache_parent);
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1238{
1239 struct dentry *dentry;
1240 char *dname;
1241
1242 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1243 if (!dentry)
1244 return NULL;
1245
1246
1247
1248
1249
1250
1251
1252 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1253 if (name->len > DNAME_INLINE_LEN-1) {
1254 dname = kmalloc(name->len + 1, GFP_KERNEL);
1255 if (!dname) {
1256 kmem_cache_free(dentry_cache, dentry);
1257 return NULL;
1258 }
1259 } else {
1260 dname = dentry->d_iname;
1261 }
1262
1263 dentry->d_name.len = name->len;
1264 dentry->d_name.hash = name->hash;
1265 memcpy(dname, name->name, name->len);
1266 dname[name->len] = 0;
1267
1268
1269 smp_wmb();
1270 dentry->d_name.name = dname;
1271
1272 dentry->d_count = 1;
1273 dentry->d_flags = 0;
1274 spin_lock_init(&dentry->d_lock);
1275 seqcount_init(&dentry->d_seq);
1276 dentry->d_inode = NULL;
1277 dentry->d_parent = dentry;
1278 dentry->d_sb = sb;
1279 dentry->d_op = NULL;
1280 dentry->d_fsdata = NULL;
1281 INIT_HLIST_BL_NODE(&dentry->d_hash);
1282 INIT_LIST_HEAD(&dentry->d_lru);
1283 INIT_LIST_HEAD(&dentry->d_subdirs);
1284 INIT_HLIST_NODE(&dentry->d_alias);
1285 INIT_LIST_HEAD(&dentry->d_u.d_child);
1286 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1287
1288 this_cpu_inc(nr_dentry);
1289
1290 return dentry;
1291}
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1303{
1304 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1305 if (!dentry)
1306 return NULL;
1307
1308 spin_lock(&parent->d_lock);
1309
1310
1311
1312
1313 __dget_dlock(parent);
1314 dentry->d_parent = parent;
1315 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1316 spin_unlock(&parent->d_lock);
1317
1318 return dentry;
1319}
1320EXPORT_SYMBOL(d_alloc);
1321
1322struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1323{
1324 struct dentry *dentry = __d_alloc(sb, name);
1325 if (dentry)
1326 dentry->d_flags |= DCACHE_DISCONNECTED;
1327 return dentry;
1328}
1329EXPORT_SYMBOL(d_alloc_pseudo);
1330
1331struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1332{
1333 struct qstr q;
1334
1335 q.name = name;
1336 q.len = strlen(name);
1337 q.hash = full_name_hash(q.name, q.len);
1338 return d_alloc(parent, &q);
1339}
1340EXPORT_SYMBOL(d_alloc_name);
1341
1342void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1343{
1344 WARN_ON_ONCE(dentry->d_op);
1345 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1346 DCACHE_OP_COMPARE |
1347 DCACHE_OP_REVALIDATE |
1348 DCACHE_OP_WEAK_REVALIDATE |
1349 DCACHE_OP_DELETE ));
1350 dentry->d_op = op;
1351 if (!op)
1352 return;
1353 if (op->d_hash)
1354 dentry->d_flags |= DCACHE_OP_HASH;
1355 if (op->d_compare)
1356 dentry->d_flags |= DCACHE_OP_COMPARE;
1357 if (op->d_revalidate)
1358 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1359 if (op->d_weak_revalidate)
1360 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1361 if (op->d_delete)
1362 dentry->d_flags |= DCACHE_OP_DELETE;
1363 if (op->d_prune)
1364 dentry->d_flags |= DCACHE_OP_PRUNE;
1365
1366}
1367EXPORT_SYMBOL(d_set_d_op);
1368
1369static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1370{
1371 spin_lock(&dentry->d_lock);
1372 if (inode) {
1373 if (unlikely(IS_AUTOMOUNT(inode)))
1374 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
1375 hlist_add_head(&dentry->d_alias, &inode->i_dentry);
1376 }
1377 dentry->d_inode = inode;
1378 dentry_rcuwalk_barrier(dentry);
1379 spin_unlock(&dentry->d_lock);
1380 fsnotify_d_instantiate(dentry, inode);
1381}
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398void d_instantiate(struct dentry *entry, struct inode * inode)
1399{
1400 BUG_ON(!hlist_unhashed(&entry->d_alias));
1401 if (inode)
1402 spin_lock(&inode->i_lock);
1403 __d_instantiate(entry, inode);
1404 if (inode)
1405 spin_unlock(&inode->i_lock);
1406 security_d_instantiate(entry, inode);
1407}
1408EXPORT_SYMBOL(d_instantiate);
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426static struct dentry *__d_instantiate_unique(struct dentry *entry,
1427 struct inode *inode)
1428{
1429 struct dentry *alias;
1430 int len = entry->d_name.len;
1431 const char *name = entry->d_name.name;
1432 unsigned int hash = entry->d_name.hash;
1433
1434 if (!inode) {
1435 __d_instantiate(entry, NULL);
1436 return NULL;
1437 }
1438
1439 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1440
1441
1442
1443
1444
1445 if (alias->d_name.hash != hash)
1446 continue;
1447 if (alias->d_parent != entry->d_parent)
1448 continue;
1449 if (alias->d_name.len != len)
1450 continue;
1451 if (dentry_cmp(alias, name, len))
1452 continue;
1453 __dget(alias);
1454 return alias;
1455 }
1456
1457 __d_instantiate(entry, inode);
1458 return NULL;
1459}
1460
1461struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1462{
1463 struct dentry *result;
1464
1465 BUG_ON(!hlist_unhashed(&entry->d_alias));
1466
1467 if (inode)
1468 spin_lock(&inode->i_lock);
1469 result = __d_instantiate_unique(entry, inode);
1470 if (inode)
1471 spin_unlock(&inode->i_lock);
1472
1473 if (!result) {
1474 security_d_instantiate(entry, inode);
1475 return NULL;
1476 }
1477
1478 BUG_ON(!d_unhashed(result));
1479 iput(inode);
1480 return result;
1481}
1482
1483EXPORT_SYMBOL(d_instantiate_unique);
1484
1485struct dentry *d_make_root(struct inode *root_inode)
1486{
1487 struct dentry *res = NULL;
1488
1489 if (root_inode) {
1490 static const struct qstr name = QSTR_INIT("/", 1);
1491
1492 res = __d_alloc(root_inode->i_sb, &name);
1493 if (res)
1494 d_instantiate(res, root_inode);
1495 else
1496 iput(root_inode);
1497 }
1498 return res;
1499}
1500EXPORT_SYMBOL(d_make_root);
1501
1502static struct dentry * __d_find_any_alias(struct inode *inode)
1503{
1504 struct dentry *alias;
1505
1506 if (hlist_empty(&inode->i_dentry))
1507 return NULL;
1508 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1509 __dget(alias);
1510 return alias;
1511}
1512
1513
1514
1515
1516
1517
1518
1519
1520struct dentry *d_find_any_alias(struct inode *inode)
1521{
1522 struct dentry *de;
1523
1524 spin_lock(&inode->i_lock);
1525 de = __d_find_any_alias(inode);
1526 spin_unlock(&inode->i_lock);
1527 return de;
1528}
1529EXPORT_SYMBOL(d_find_any_alias);
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549struct dentry *d_obtain_alias(struct inode *inode)
1550{
1551 static const struct qstr anonstring = QSTR_INIT("/", 1);
1552 struct dentry *tmp;
1553 struct dentry *res;
1554
1555 if (!inode)
1556 return ERR_PTR(-ESTALE);
1557 if (IS_ERR(inode))
1558 return ERR_CAST(inode);
1559
1560 res = d_find_any_alias(inode);
1561 if (res)
1562 goto out_iput;
1563
1564 tmp = __d_alloc(inode->i_sb, &anonstring);
1565 if (!tmp) {
1566 res = ERR_PTR(-ENOMEM);
1567 goto out_iput;
1568 }
1569
1570 spin_lock(&inode->i_lock);
1571 res = __d_find_any_alias(inode);
1572 if (res) {
1573 spin_unlock(&inode->i_lock);
1574 dput(tmp);
1575 goto out_iput;
1576 }
1577
1578
1579 spin_lock(&tmp->d_lock);
1580 tmp->d_inode = inode;
1581 tmp->d_flags |= DCACHE_DISCONNECTED;
1582 hlist_add_head(&tmp->d_alias, &inode->i_dentry);
1583 hlist_bl_lock(&tmp->d_sb->s_anon);
1584 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1585 hlist_bl_unlock(&tmp->d_sb->s_anon);
1586 spin_unlock(&tmp->d_lock);
1587 spin_unlock(&inode->i_lock);
1588 security_d_instantiate(tmp, inode);
1589
1590 return tmp;
1591
1592 out_iput:
1593 if (res && !IS_ERR(res))
1594 security_d_instantiate(res, inode);
1595 iput(inode);
1596 return res;
1597}
1598EXPORT_SYMBOL(d_obtain_alias);
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1617{
1618 struct dentry *new = NULL;
1619
1620 if (IS_ERR(inode))
1621 return ERR_CAST(inode);
1622
1623 if (inode && S_ISDIR(inode->i_mode)) {
1624 spin_lock(&inode->i_lock);
1625 new = __d_find_alias(inode, 1);
1626 if (new) {
1627 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1628 spin_unlock(&inode->i_lock);
1629 security_d_instantiate(new, inode);
1630 d_move(new, dentry);
1631 iput(inode);
1632 } else {
1633
1634 __d_instantiate(dentry, inode);
1635 spin_unlock(&inode->i_lock);
1636 security_d_instantiate(dentry, inode);
1637 d_rehash(dentry);
1638 }
1639 } else
1640 d_add(dentry, inode);
1641 return new;
1642}
1643EXPORT_SYMBOL(d_splice_alias);
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1662 struct qstr *name)
1663{
1664 struct dentry *found;
1665 struct dentry *new;
1666
1667
1668
1669
1670
1671 found = d_hash_and_lookup(dentry->d_parent, name);
1672 if (unlikely(IS_ERR(found)))
1673 goto err_out;
1674 if (!found) {
1675 new = d_alloc(dentry->d_parent, name);
1676 if (!new) {
1677 found = ERR_PTR(-ENOMEM);
1678 goto err_out;
1679 }
1680
1681 found = d_splice_alias(inode, new);
1682 if (found) {
1683 dput(new);
1684 return found;
1685 }
1686 return new;
1687 }
1688
1689
1690
1691
1692
1693
1694
1695 if (found->d_inode) {
1696 if (unlikely(found->d_inode != inode)) {
1697
1698 BUG_ON(!is_bad_inode(inode));
1699 BUG_ON(!is_bad_inode(found->d_inode));
1700 }
1701 iput(inode);
1702 return found;
1703 }
1704
1705
1706
1707
1708
1709 new = d_splice_alias(inode, found);
1710 if (new) {
1711 dput(found);
1712 found = new;
1713 }
1714 return found;
1715
1716err_out:
1717 iput(inode);
1718 return found;
1719}
1720EXPORT_SYMBOL(d_add_ci);
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736enum slow_d_compare {
1737 D_COMP_OK,
1738 D_COMP_NOMATCH,
1739 D_COMP_SEQRETRY,
1740};
1741
1742static noinline enum slow_d_compare slow_dentry_cmp(
1743 const struct dentry *parent,
1744 struct inode *inode,
1745 struct dentry *dentry,
1746 unsigned int seq,
1747 const struct qstr *name)
1748{
1749 int tlen = dentry->d_name.len;
1750 const char *tname = dentry->d_name.name;
1751 struct inode *i = dentry->d_inode;
1752
1753 if (read_seqcount_retry(&dentry->d_seq, seq)) {
1754 cpu_relax();
1755 return D_COMP_SEQRETRY;
1756 }
1757 if (parent->d_op->d_compare(parent, inode,
1758 dentry, i,
1759 tlen, tname, name))
1760 return D_COMP_NOMATCH;
1761 return D_COMP_OK;
1762}
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794struct dentry *__d_lookup_rcu(const struct dentry *parent,
1795 const struct qstr *name,
1796 unsigned *seqp, struct inode *inode)
1797{
1798 u64 hashlen = name->hash_len;
1799 const unsigned char *str = name->name;
1800 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
1801 struct hlist_bl_node *node;
1802 struct dentry *dentry;
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
1825 unsigned seq;
1826
1827seqretry:
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843 seq = raw_seqcount_begin(&dentry->d_seq);
1844 if (dentry->d_parent != parent)
1845 continue;
1846 if (d_unhashed(dentry))
1847 continue;
1848 *seqp = seq;
1849
1850 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
1851 if (dentry->d_name.hash != hashlen_hash(hashlen))
1852 continue;
1853 switch (slow_dentry_cmp(parent, inode, dentry, seq, name)) {
1854 case D_COMP_OK:
1855 return dentry;
1856 case D_COMP_NOMATCH:
1857 continue;
1858 default:
1859 goto seqretry;
1860 }
1861 }
1862
1863 if (dentry->d_name.hash_len != hashlen)
1864 continue;
1865 if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
1866 return dentry;
1867 }
1868 return NULL;
1869}
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
1883{
1884 struct dentry *dentry;
1885 unsigned seq;
1886
1887 do {
1888 seq = read_seqbegin(&rename_lock);
1889 dentry = __d_lookup(parent, name);
1890 if (dentry)
1891 break;
1892 } while (read_seqretry(&rename_lock, seq));
1893 return dentry;
1894}
1895EXPORT_SYMBOL(d_lookup);
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
1913{
1914 unsigned int len = name->len;
1915 unsigned int hash = name->hash;
1916 const unsigned char *str = name->name;
1917 struct hlist_bl_head *b = d_hash(parent, hash);
1918 struct hlist_bl_node *node;
1919 struct dentry *found = NULL;
1920 struct dentry *dentry;
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942 rcu_read_lock();
1943
1944 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
1945
1946 if (dentry->d_name.hash != hash)
1947 continue;
1948
1949 spin_lock(&dentry->d_lock);
1950 if (dentry->d_parent != parent)
1951 goto next;
1952 if (d_unhashed(dentry))
1953 goto next;
1954
1955
1956
1957
1958
1959 if (parent->d_flags & DCACHE_OP_COMPARE) {
1960 int tlen = dentry->d_name.len;
1961 const char *tname = dentry->d_name.name;
1962 if (parent->d_op->d_compare(parent, parent->d_inode,
1963 dentry, dentry->d_inode,
1964 tlen, tname, name))
1965 goto next;
1966 } else {
1967 if (dentry->d_name.len != len)
1968 goto next;
1969 if (dentry_cmp(dentry, str, len))
1970 goto next;
1971 }
1972
1973 dentry->d_count++;
1974 found = dentry;
1975 spin_unlock(&dentry->d_lock);
1976 break;
1977next:
1978 spin_unlock(&dentry->d_lock);
1979 }
1980 rcu_read_unlock();
1981
1982 return found;
1983}
1984
1985
1986
1987
1988
1989
1990
1991
1992struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
1993{
1994
1995
1996
1997
1998
1999 name->hash = full_name_hash(name->name, name->len);
2000 if (dir->d_flags & DCACHE_OP_HASH) {
2001 int err = dir->d_op->d_hash(dir, dir->d_inode, name);
2002 if (unlikely(err < 0))
2003 return ERR_PTR(err);
2004 }
2005 return d_lookup(dir, name);
2006}
2007EXPORT_SYMBOL(d_hash_and_lookup);
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020int d_validate(struct dentry *dentry, struct dentry *dparent)
2021{
2022 struct dentry *child;
2023
2024 spin_lock(&dparent->d_lock);
2025 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2026 if (dentry == child) {
2027 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2028 __dget_dlock(dentry);
2029 spin_unlock(&dentry->d_lock);
2030 spin_unlock(&dparent->d_lock);
2031 return 1;
2032 }
2033 }
2034 spin_unlock(&dparent->d_lock);
2035
2036 return 0;
2037}
2038EXPORT_SYMBOL(d_validate);
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061void d_delete(struct dentry * dentry)
2062{
2063 struct inode *inode;
2064 int isdir = 0;
2065
2066
2067
2068again:
2069 spin_lock(&dentry->d_lock);
2070 inode = dentry->d_inode;
2071 isdir = S_ISDIR(inode->i_mode);
2072 if (dentry->d_count == 1) {
2073 if (!spin_trylock(&inode->i_lock)) {
2074 spin_unlock(&dentry->d_lock);
2075 cpu_relax();
2076 goto again;
2077 }
2078 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2079 dentry_unlink_inode(dentry);
2080 fsnotify_nameremove(dentry, isdir);
2081 return;
2082 }
2083
2084 if (!d_unhashed(dentry))
2085 __d_drop(dentry);
2086
2087 spin_unlock(&dentry->d_lock);
2088
2089 fsnotify_nameremove(dentry, isdir);
2090}
2091EXPORT_SYMBOL(d_delete);
2092
2093static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2094{
2095 BUG_ON(!d_unhashed(entry));
2096 hlist_bl_lock(b);
2097 entry->d_flags |= DCACHE_RCUACCESS;
2098 hlist_bl_add_head_rcu(&entry->d_hash, b);
2099 hlist_bl_unlock(b);
2100}
2101
2102static void _d_rehash(struct dentry * entry)
2103{
2104 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2105}
2106
2107
2108
2109
2110
2111
2112
2113
2114void d_rehash(struct dentry * entry)
2115{
2116 spin_lock(&entry->d_lock);
2117 _d_rehash(entry);
2118 spin_unlock(&entry->d_lock);
2119}
2120EXPORT_SYMBOL(d_rehash);
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2137{
2138 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2139 BUG_ON(dentry->d_name.len != name->len);
2140
2141 spin_lock(&dentry->d_lock);
2142 write_seqcount_begin(&dentry->d_seq);
2143 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2144 write_seqcount_end(&dentry->d_seq);
2145 spin_unlock(&dentry->d_lock);
2146}
2147EXPORT_SYMBOL(dentry_update_name_case);
2148
2149static void switch_names(struct dentry *dentry, struct dentry *target)
2150{
2151 if (dname_external(target)) {
2152 if (dname_external(dentry)) {
2153
2154
2155
2156 swap(target->d_name.name, dentry->d_name.name);
2157 } else {
2158
2159
2160
2161
2162 memcpy(target->d_iname, dentry->d_name.name,
2163 dentry->d_name.len + 1);
2164 dentry->d_name.name = target->d_name.name;
2165 target->d_name.name = target->d_iname;
2166 }
2167 } else {
2168 if (dname_external(dentry)) {
2169
2170
2171
2172
2173 memcpy(dentry->d_iname, target->d_name.name,
2174 target->d_name.len + 1);
2175 target->d_name.name = dentry->d_name.name;
2176 dentry->d_name.name = dentry->d_iname;
2177 } else {
2178
2179
2180
2181 memcpy(dentry->d_iname, target->d_name.name,
2182 target->d_name.len + 1);
2183 dentry->d_name.len = target->d_name.len;
2184 return;
2185 }
2186 }
2187 swap(dentry->d_name.len, target->d_name.len);
2188}
2189
2190static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2191{
2192
2193
2194
2195 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2196 spin_lock(&target->d_parent->d_lock);
2197 else {
2198 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2199 spin_lock(&dentry->d_parent->d_lock);
2200 spin_lock_nested(&target->d_parent->d_lock,
2201 DENTRY_D_LOCK_NESTED);
2202 } else {
2203 spin_lock(&target->d_parent->d_lock);
2204 spin_lock_nested(&dentry->d_parent->d_lock,
2205 DENTRY_D_LOCK_NESTED);
2206 }
2207 }
2208 if (target < dentry) {
2209 spin_lock_nested(&target->d_lock, 2);
2210 spin_lock_nested(&dentry->d_lock, 3);
2211 } else {
2212 spin_lock_nested(&dentry->d_lock, 2);
2213 spin_lock_nested(&target->d_lock, 3);
2214 }
2215}
2216
2217static void dentry_unlock_parents_for_move(struct dentry *dentry,
2218 struct dentry *target)
2219{
2220 if (target->d_parent != dentry->d_parent)
2221 spin_unlock(&dentry->d_parent->d_lock);
2222 if (target->d_parent != target)
2223 spin_unlock(&target->d_parent->d_lock);
2224}
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247static void __d_move(struct dentry * dentry, struct dentry * target)
2248{
2249 if (!dentry->d_inode)
2250 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2251
2252 BUG_ON(d_ancestor(dentry, target));
2253 BUG_ON(d_ancestor(target, dentry));
2254
2255 dentry_lock_for_move(dentry, target);
2256
2257 write_seqcount_begin(&dentry->d_seq);
2258 write_seqcount_begin(&target->d_seq);
2259
2260
2261
2262
2263
2264
2265
2266 __d_drop(dentry);
2267 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2268
2269
2270 __d_drop(target);
2271
2272 list_del(&dentry->d_u.d_child);
2273 list_del(&target->d_u.d_child);
2274
2275
2276 switch_names(dentry, target);
2277 swap(dentry->d_name.hash, target->d_name.hash);
2278
2279
2280 if (IS_ROOT(dentry)) {
2281 dentry->d_parent = target->d_parent;
2282 target->d_parent = target;
2283 INIT_LIST_HEAD(&target->d_u.d_child);
2284 } else {
2285 swap(dentry->d_parent, target->d_parent);
2286
2287
2288 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2289 }
2290
2291 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2292
2293 write_seqcount_end(&target->d_seq);
2294 write_seqcount_end(&dentry->d_seq);
2295
2296 dentry_unlock_parents_for_move(dentry, target);
2297 spin_unlock(&target->d_lock);
2298 fsnotify_d_move(dentry);
2299 spin_unlock(&dentry->d_lock);
2300}
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311void d_move(struct dentry *dentry, struct dentry *target)
2312{
2313 write_seqlock(&rename_lock);
2314 __d_move(dentry, target);
2315 write_sequnlock(&rename_lock);
2316}
2317EXPORT_SYMBOL(d_move);
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2328{
2329 struct dentry *p;
2330
2331 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2332 if (p->d_parent == p1)
2333 return p;
2334 }
2335 return NULL;
2336}
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347static struct dentry *__d_unalias(struct inode *inode,
2348 struct dentry *dentry, struct dentry *alias)
2349{
2350 struct mutex *m1 = NULL, *m2 = NULL;
2351 struct dentry *ret = ERR_PTR(-EBUSY);
2352
2353
2354 if (alias->d_parent == dentry->d_parent)
2355 goto out_unalias;
2356
2357
2358 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2359 goto out_err;
2360 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2361 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2362 goto out_err;
2363 m2 = &alias->d_parent->d_inode->i_mutex;
2364out_unalias:
2365 if (likely(!d_mountpoint(alias))) {
2366 __d_move(alias, dentry);
2367 ret = alias;
2368 }
2369out_err:
2370 spin_unlock(&inode->i_lock);
2371 if (m2)
2372 mutex_unlock(m2);
2373 if (m1)
2374 mutex_unlock(m1);
2375 return ret;
2376}
2377
2378
2379
2380
2381
2382
2383static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2384{
2385 struct dentry *dparent;
2386
2387 dentry_lock_for_move(anon, dentry);
2388
2389 write_seqcount_begin(&dentry->d_seq);
2390 write_seqcount_begin(&anon->d_seq);
2391
2392 dparent = dentry->d_parent;
2393
2394 switch_names(dentry, anon);
2395 swap(dentry->d_name.hash, anon->d_name.hash);
2396
2397 dentry->d_parent = dentry;
2398 list_del_init(&dentry->d_u.d_child);
2399 anon->d_parent = dparent;
2400 list_move(&anon->d_u.d_child, &dparent->d_subdirs);
2401
2402 write_seqcount_end(&dentry->d_seq);
2403 write_seqcount_end(&anon->d_seq);
2404
2405 dentry_unlock_parents_for_move(anon, dentry);
2406 spin_unlock(&dentry->d_lock);
2407
2408
2409 anon->d_flags &= ~DCACHE_DISCONNECTED;
2410}
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2422{
2423 struct dentry *actual;
2424
2425 BUG_ON(!d_unhashed(dentry));
2426
2427 if (!inode) {
2428 actual = dentry;
2429 __d_instantiate(dentry, NULL);
2430 d_rehash(actual);
2431 goto out_nolock;
2432 }
2433
2434 spin_lock(&inode->i_lock);
2435
2436 if (S_ISDIR(inode->i_mode)) {
2437 struct dentry *alias;
2438
2439
2440 alias = __d_find_alias(inode, 0);
2441 if (alias) {
2442 actual = alias;
2443 write_seqlock(&rename_lock);
2444
2445 if (d_ancestor(alias, dentry)) {
2446
2447 actual = ERR_PTR(-ELOOP);
2448 spin_unlock(&inode->i_lock);
2449 } else if (IS_ROOT(alias)) {
2450
2451
2452 __d_materialise_dentry(dentry, alias);
2453 write_sequnlock(&rename_lock);
2454 __d_drop(alias);
2455 goto found;
2456 } else {
2457
2458
2459 actual = __d_unalias(inode, dentry, alias);
2460 }
2461 write_sequnlock(&rename_lock);
2462 if (IS_ERR(actual)) {
2463 if (PTR_ERR(actual) == -ELOOP)
2464 pr_warn_ratelimited(
2465 "VFS: Lookup of '%s' in %s %s"
2466 " would have caused loop\n",
2467 dentry->d_name.name,
2468 inode->i_sb->s_type->name,
2469 inode->i_sb->s_id);
2470 dput(alias);
2471 }
2472 goto out_nolock;
2473 }
2474 }
2475
2476
2477 actual = __d_instantiate_unique(dentry, inode);
2478 if (!actual)
2479 actual = dentry;
2480 else
2481 BUG_ON(!d_unhashed(actual));
2482
2483 spin_lock(&actual->d_lock);
2484found:
2485 _d_rehash(actual);
2486 spin_unlock(&actual->d_lock);
2487 spin_unlock(&inode->i_lock);
2488out_nolock:
2489 if (actual == dentry) {
2490 security_d_instantiate(dentry, inode);
2491 return NULL;
2492 }
2493
2494 iput(inode);
2495 return actual;
2496}
2497EXPORT_SYMBOL_GPL(d_materialise_unique);
2498
2499static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2500{
2501 *buflen -= namelen;
2502 if (*buflen < 0)
2503 return -ENAMETOOLONG;
2504 *buffer -= namelen;
2505 memcpy(*buffer, str, namelen);
2506 return 0;
2507}
2508
2509static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2510{
2511 return prepend(buffer, buflen, name->name, name->len);
2512}
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523static int prepend_path(const struct path *path,
2524 const struct path *root,
2525 char **buffer, int *buflen)
2526{
2527 struct dentry *dentry = path->dentry;
2528 struct vfsmount *vfsmnt = path->mnt;
2529 struct mount *mnt = real_mount(vfsmnt);
2530 bool slash = false;
2531 int error = 0;
2532
2533 while (dentry != root->dentry || vfsmnt != root->mnt) {
2534 struct dentry * parent;
2535
2536 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2537
2538 if (!mnt_has_parent(mnt))
2539 goto global_root;
2540 dentry = mnt->mnt_mountpoint;
2541 mnt = mnt->mnt_parent;
2542 vfsmnt = &mnt->mnt;
2543 continue;
2544 }
2545 parent = dentry->d_parent;
2546 prefetch(parent);
2547 spin_lock(&dentry->d_lock);
2548 error = prepend_name(buffer, buflen, &dentry->d_name);
2549 spin_unlock(&dentry->d_lock);
2550 if (!error)
2551 error = prepend(buffer, buflen, "/", 1);
2552 if (error)
2553 break;
2554
2555 slash = true;
2556 dentry = parent;
2557 }
2558
2559 if (!error && !slash)
2560 error = prepend(buffer, buflen, "/", 1);
2561
2562 return error;
2563
2564global_root:
2565
2566
2567
2568
2569 if (IS_ROOT(dentry) &&
2570 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) {
2571 WARN(1, "Root dentry has weird name <%.*s>\n",
2572 (int) dentry->d_name.len, dentry->d_name.name);
2573 }
2574 if (!slash)
2575 error = prepend(buffer, buflen, "/", 1);
2576 if (!error)
2577 error = is_mounted(vfsmnt) ? 1 : 2;
2578 return error;
2579}
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597char *__d_path(const struct path *path,
2598 const struct path *root,
2599 char *buf, int buflen)
2600{
2601 char *res = buf + buflen;
2602 int error;
2603
2604 prepend(&res, &buflen, "\0", 1);
2605 br_read_lock(&vfsmount_lock);
2606 write_seqlock(&rename_lock);
2607 error = prepend_path(path, root, &res, &buflen);
2608 write_sequnlock(&rename_lock);
2609 br_read_unlock(&vfsmount_lock);
2610
2611 if (error < 0)
2612 return ERR_PTR(error);
2613 if (error > 0)
2614 return NULL;
2615 return res;
2616}
2617
2618char *d_absolute_path(const struct path *path,
2619 char *buf, int buflen)
2620{
2621 struct path root = {};
2622 char *res = buf + buflen;
2623 int error;
2624
2625 prepend(&res, &buflen, "\0", 1);
2626 br_read_lock(&vfsmount_lock);
2627 write_seqlock(&rename_lock);
2628 error = prepend_path(path, &root, &res, &buflen);
2629 write_sequnlock(&rename_lock);
2630 br_read_unlock(&vfsmount_lock);
2631
2632 if (error > 1)
2633 error = -EINVAL;
2634 if (error < 0)
2635 return ERR_PTR(error);
2636 return res;
2637}
2638
2639
2640
2641
2642static int path_with_deleted(const struct path *path,
2643 const struct path *root,
2644 char **buf, int *buflen)
2645{
2646 prepend(buf, buflen, "\0", 1);
2647 if (d_unlinked(path->dentry)) {
2648 int error = prepend(buf, buflen, " (deleted)", 10);
2649 if (error)
2650 return error;
2651 }
2652
2653 return prepend_path(path, root, buf, buflen);
2654}
2655
2656static int prepend_unreachable(char **buffer, int *buflen)
2657{
2658 return prepend(buffer, buflen, "(unreachable)", 13);
2659}
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677char *d_path(const struct path *path, char *buf, int buflen)
2678{
2679 char *res = buf + buflen;
2680 struct path root;
2681 int error;
2682
2683
2684
2685
2686
2687
2688
2689
2690 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2691 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2692
2693 get_fs_root(current->fs, &root);
2694 br_read_lock(&vfsmount_lock);
2695 write_seqlock(&rename_lock);
2696 error = path_with_deleted(path, &root, &res, &buflen);
2697 write_sequnlock(&rename_lock);
2698 br_read_unlock(&vfsmount_lock);
2699 if (error < 0)
2700 res = ERR_PTR(error);
2701 path_put(&root);
2702 return res;
2703}
2704EXPORT_SYMBOL(d_path);
2705
2706
2707
2708
2709char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2710 const char *fmt, ...)
2711{
2712 va_list args;
2713 char temp[64];
2714 int sz;
2715
2716 va_start(args, fmt);
2717 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
2718 va_end(args);
2719
2720 if (sz > sizeof(temp) || sz > buflen)
2721 return ERR_PTR(-ENAMETOOLONG);
2722
2723 buffer += buflen - sz;
2724 return memcpy(buffer, temp, sz);
2725}
2726
2727
2728
2729
2730static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
2731{
2732 char *end = buf + buflen;
2733 char *retval;
2734
2735 prepend(&end, &buflen, "\0", 1);
2736 if (buflen < 1)
2737 goto Elong;
2738
2739 retval = end-1;
2740 *retval = '/';
2741
2742 while (!IS_ROOT(dentry)) {
2743 struct dentry *parent = dentry->d_parent;
2744 int error;
2745
2746 prefetch(parent);
2747 spin_lock(&dentry->d_lock);
2748 error = prepend_name(&end, &buflen, &dentry->d_name);
2749 spin_unlock(&dentry->d_lock);
2750 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
2751 goto Elong;
2752
2753 retval = end;
2754 dentry = parent;
2755 }
2756 return retval;
2757Elong:
2758 return ERR_PTR(-ENAMETOOLONG);
2759}
2760
2761char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
2762{
2763 char *retval;
2764
2765 write_seqlock(&rename_lock);
2766 retval = __dentry_path(dentry, buf, buflen);
2767 write_sequnlock(&rename_lock);
2768
2769 return retval;
2770}
2771EXPORT_SYMBOL(dentry_path_raw);
2772
2773char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2774{
2775 char *p = NULL;
2776 char *retval;
2777
2778 write_seqlock(&rename_lock);
2779 if (d_unlinked(dentry)) {
2780 p = buf + buflen;
2781 if (prepend(&p, &buflen, "//deleted", 10) != 0)
2782 goto Elong;
2783 buflen++;
2784 }
2785 retval = __dentry_path(dentry, buf, buflen);
2786 write_sequnlock(&rename_lock);
2787 if (!IS_ERR(retval) && p)
2788 *p = '/';
2789 return retval;
2790Elong:
2791 return ERR_PTR(-ENAMETOOLONG);
2792}
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2813{
2814 int error;
2815 struct path pwd, root;
2816 char *page = (char *) __get_free_page(GFP_USER);
2817
2818 if (!page)
2819 return -ENOMEM;
2820
2821 get_fs_root_and_pwd(current->fs, &root, &pwd);
2822
2823 error = -ENOENT;
2824 br_read_lock(&vfsmount_lock);
2825 write_seqlock(&rename_lock);
2826 if (!d_unlinked(pwd.dentry)) {
2827 unsigned long len;
2828 char *cwd = page + PAGE_SIZE;
2829 int buflen = PAGE_SIZE;
2830
2831 prepend(&cwd, &buflen, "\0", 1);
2832 error = prepend_path(&pwd, &root, &cwd, &buflen);
2833 write_sequnlock(&rename_lock);
2834 br_read_unlock(&vfsmount_lock);
2835
2836 if (error < 0)
2837 goto out;
2838
2839
2840 if (error > 0) {
2841 error = prepend_unreachable(&cwd, &buflen);
2842 if (error)
2843 goto out;
2844 }
2845
2846 error = -ERANGE;
2847 len = PAGE_SIZE + page - cwd;
2848 if (len <= size) {
2849 error = len;
2850 if (copy_to_user(buf, cwd, len))
2851 error = -EFAULT;
2852 }
2853 } else {
2854 write_sequnlock(&rename_lock);
2855 br_read_unlock(&vfsmount_lock);
2856 }
2857
2858out:
2859 path_put(&pwd);
2860 path_put(&root);
2861 free_page((unsigned long) page);
2862 return error;
2863}
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2882{
2883 int result;
2884 unsigned seq;
2885
2886 if (new_dentry == old_dentry)
2887 return 1;
2888
2889 do {
2890
2891 seq = read_seqbegin(&rename_lock);
2892
2893
2894
2895
2896 rcu_read_lock();
2897 if (d_ancestor(old_dentry, new_dentry))
2898 result = 1;
2899 else
2900 result = 0;
2901 rcu_read_unlock();
2902 } while (read_seqretry(&rename_lock, seq));
2903
2904 return result;
2905}
2906
2907void d_genocide(struct dentry *root)
2908{
2909 struct dentry *this_parent;
2910 struct list_head *next;
2911 unsigned seq;
2912 int locked = 0;
2913
2914 seq = read_seqbegin(&rename_lock);
2915again:
2916 this_parent = root;
2917 spin_lock(&this_parent->d_lock);
2918repeat:
2919 next = this_parent->d_subdirs.next;
2920resume:
2921 while (next != &this_parent->d_subdirs) {
2922 struct list_head *tmp = next;
2923 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
2924 next = tmp->next;
2925
2926 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2927 if (d_unhashed(dentry) || !dentry->d_inode) {
2928 spin_unlock(&dentry->d_lock);
2929 continue;
2930 }
2931 if (!list_empty(&dentry->d_subdirs)) {
2932 spin_unlock(&this_parent->d_lock);
2933 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
2934 this_parent = dentry;
2935 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
2936 goto repeat;
2937 }
2938 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
2939 dentry->d_flags |= DCACHE_GENOCIDE;
2940 dentry->d_count--;
2941 }
2942 spin_unlock(&dentry->d_lock);
2943 }
2944 if (this_parent != root) {
2945 struct dentry *child = this_parent;
2946 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
2947 this_parent->d_flags |= DCACHE_GENOCIDE;
2948 this_parent->d_count--;
2949 }
2950 this_parent = try_to_ascend(this_parent, locked, seq);
2951 if (!this_parent)
2952 goto rename_retry;
2953 next = child->d_u.d_child.next;
2954 goto resume;
2955 }
2956 spin_unlock(&this_parent->d_lock);
2957 if (!locked && read_seqretry(&rename_lock, seq))
2958 goto rename_retry;
2959 if (locked)
2960 write_sequnlock(&rename_lock);
2961 return;
2962
2963rename_retry:
2964 if (locked)
2965 goto again;
2966 locked = 1;
2967 write_seqlock(&rename_lock);
2968 goto again;
2969}
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985ino_t find_inode_number(struct dentry *dir, struct qstr *name)
2986{
2987 struct dentry * dentry;
2988 ino_t ino = 0;
2989
2990 dentry = d_hash_and_lookup(dir, name);
2991 if (!IS_ERR_OR_NULL(dentry)) {
2992 if (dentry->d_inode)
2993 ino = dentry->d_inode->i_ino;
2994 dput(dentry);
2995 }
2996 return ino;
2997}
2998EXPORT_SYMBOL(find_inode_number);
2999
3000static __initdata unsigned long dhash_entries;
3001static int __init set_dhash_entries(char *str)
3002{
3003 if (!str)
3004 return 0;
3005 dhash_entries = simple_strtoul(str, &str, 0);
3006 return 1;
3007}
3008__setup("dhash_entries=", set_dhash_entries);
3009
3010static void __init dcache_init_early(void)
3011{
3012 unsigned int loop;
3013
3014
3015
3016
3017 if (hashdist)
3018 return;
3019
3020 dentry_hashtable =
3021 alloc_large_system_hash("Dentry cache",
3022 sizeof(struct hlist_bl_head),
3023 dhash_entries,
3024 13,
3025 HASH_EARLY,
3026 &d_hash_shift,
3027 &d_hash_mask,
3028 0,
3029 0);
3030
3031 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3032 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3033}
3034
3035static void __init dcache_init(void)
3036{
3037 unsigned int loop;
3038
3039
3040
3041
3042
3043
3044 dentry_cache = KMEM_CACHE(dentry,
3045 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3046
3047
3048 if (!hashdist)
3049 return;
3050
3051 dentry_hashtable =
3052 alloc_large_system_hash("Dentry cache",
3053 sizeof(struct hlist_bl_head),
3054 dhash_entries,
3055 13,
3056 0,
3057 &d_hash_shift,
3058 &d_hash_mask,
3059 0,
3060 0);
3061
3062 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3063 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3064}
3065
3066
3067struct kmem_cache *names_cachep __read_mostly;
3068EXPORT_SYMBOL(names_cachep);
3069
3070EXPORT_SYMBOL(d_genocide);
3071
3072void __init vfs_caches_init_early(void)
3073{
3074 dcache_init_early();
3075 inode_init_early();
3076}
3077
3078void __init vfs_caches_init(unsigned long mempages)
3079{
3080 unsigned long reserve;
3081
3082
3083
3084
3085 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3086 mempages -= reserve;
3087
3088 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3089 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3090
3091 dcache_init();
3092 inode_init();
3093 files_init(mempages);
3094 mnt_init();
3095 bdev_cache_init();
3096 chrdev_init();
3097}
3098