1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/syscalls.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/fsnotify.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/hash.h>
25#include <linux/cache.h>
26#include <linux/module.h>
27#include <linux/mount.h>
28#include <linux/file.h>
29#include <asm/uaccess.h>
30#include <linux/security.h>
31#include <linux/seqlock.h>
32#include <linux/swap.h>
33#include <linux/bootmem.h>
34#include <linux/fs_struct.h>
35#include <linux/hardirq.h>
36#include <linux/bit_spinlock.h>
37#include <linux/rculist_bl.h>
38#include "internal.h"
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78int sysctl_vfs_cache_pressure __read_mostly = 100;
79EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
80
81static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
82__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
83
84EXPORT_SYMBOL(rename_lock);
85
86static struct kmem_cache *dentry_cache __read_mostly;
87
88
89
90
91
92
93
94
95
96#define D_HASHBITS d_hash_shift
97#define D_HASHMASK d_hash_mask
98
99static unsigned int d_hash_mask __read_mostly;
100static unsigned int d_hash_shift __read_mostly;
101
102struct dcache_hash_bucket {
103 struct hlist_bl_head head;
104};
105static struct dcache_hash_bucket *dentry_hashtable __read_mostly;
106
107static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
108 unsigned long hash)
109{
110 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
111 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
112 return dentry_hashtable + (hash & D_HASHMASK);
113}
114
115static inline void spin_lock_bucket(struct dcache_hash_bucket *b)
116{
117 bit_spin_lock(0, (unsigned long *)&b->head.first);
118}
119
120static inline void spin_unlock_bucket(struct dcache_hash_bucket *b)
121{
122 __bit_spin_unlock(0, (unsigned long *)&b->head.first);
123}
124
125
126struct dentry_stat_t dentry_stat = {
127 .age_limit = 45,
128};
129
130static DEFINE_PER_CPU(unsigned int, nr_dentry);
131
132#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
133static int get_nr_dentry(void)
134{
135 int i;
136 int sum = 0;
137 for_each_possible_cpu(i)
138 sum += per_cpu(nr_dentry, i);
139 return sum < 0 ? 0 : sum;
140}
141
142int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
143 size_t *lenp, loff_t *ppos)
144{
145 dentry_stat.nr_dentry = get_nr_dentry();
146 return proc_dointvec(table, write, buffer, lenp, ppos);
147}
148#endif
149
150static void __d_free(struct rcu_head *head)
151{
152 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
153
154 WARN_ON(!list_empty(&dentry->d_alias));
155 if (dname_external(dentry))
156 kfree(dentry->d_name.name);
157 kmem_cache_free(dentry_cache, dentry);
158}
159
160
161
162
163static void d_free(struct dentry *dentry)
164{
165 BUG_ON(dentry->d_count);
166 this_cpu_dec(nr_dentry);
167 if (dentry->d_op && dentry->d_op->d_release)
168 dentry->d_op->d_release(dentry);
169
170
171 if (hlist_bl_unhashed(&dentry->d_hash))
172 __d_free(&dentry->d_u.d_rcu);
173 else
174 call_rcu(&dentry->d_u.d_rcu, __d_free);
175}
176
177
178
179
180
181
182
183
184static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
185{
186 assert_spin_locked(&dentry->d_lock);
187
188 write_seqcount_barrier(&dentry->d_seq);
189}
190
191
192
193
194
195
196static void dentry_iput(struct dentry * dentry)
197 __releases(dentry->d_lock)
198 __releases(dentry->d_inode->i_lock)
199{
200 struct inode *inode = dentry->d_inode;
201 if (inode) {
202 dentry->d_inode = NULL;
203 list_del_init(&dentry->d_alias);
204 spin_unlock(&dentry->d_lock);
205 spin_unlock(&inode->i_lock);
206 if (!inode->i_nlink)
207 fsnotify_inoderemove(inode);
208 if (dentry->d_op && dentry->d_op->d_iput)
209 dentry->d_op->d_iput(dentry, inode);
210 else
211 iput(inode);
212 } else {
213 spin_unlock(&dentry->d_lock);
214 }
215}
216
217
218
219
220
221static void dentry_unlink_inode(struct dentry * dentry)
222 __releases(dentry->d_lock)
223 __releases(dentry->d_inode->i_lock)
224{
225 struct inode *inode = dentry->d_inode;
226 dentry->d_inode = NULL;
227 list_del_init(&dentry->d_alias);
228 dentry_rcuwalk_barrier(dentry);
229 spin_unlock(&dentry->d_lock);
230 spin_unlock(&inode->i_lock);
231 if (!inode->i_nlink)
232 fsnotify_inoderemove(inode);
233 if (dentry->d_op && dentry->d_op->d_iput)
234 dentry->d_op->d_iput(dentry, inode);
235 else
236 iput(inode);
237}
238
239
240
241
242static void dentry_lru_add(struct dentry *dentry)
243{
244 if (list_empty(&dentry->d_lru)) {
245 spin_lock(&dcache_lru_lock);
246 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
247 dentry->d_sb->s_nr_dentry_unused++;
248 dentry_stat.nr_unused++;
249 spin_unlock(&dcache_lru_lock);
250 }
251}
252
253static void __dentry_lru_del(struct dentry *dentry)
254{
255 list_del_init(&dentry->d_lru);
256 dentry->d_sb->s_nr_dentry_unused--;
257 dentry_stat.nr_unused--;
258}
259
260static void dentry_lru_del(struct dentry *dentry)
261{
262 if (!list_empty(&dentry->d_lru)) {
263 spin_lock(&dcache_lru_lock);
264 __dentry_lru_del(dentry);
265 spin_unlock(&dcache_lru_lock);
266 }
267}
268
269static void dentry_lru_move_tail(struct dentry *dentry)
270{
271 spin_lock(&dcache_lru_lock);
272 if (list_empty(&dentry->d_lru)) {
273 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
274 dentry->d_sb->s_nr_dentry_unused++;
275 dentry_stat.nr_unused++;
276 } else {
277 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
278 }
279 spin_unlock(&dcache_lru_lock);
280}
281
282
283
284
285
286
287
288
289
290
291
292
293
294static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
295 __releases(dentry->d_lock)
296 __releases(parent->d_lock)
297 __releases(dentry->d_inode->i_lock)
298{
299 dentry->d_parent = NULL;
300 list_del(&dentry->d_u.d_child);
301 if (parent)
302 spin_unlock(&parent->d_lock);
303 dentry_iput(dentry);
304
305
306
307
308 d_free(dentry);
309 return parent;
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327void __d_drop(struct dentry *dentry)
328{
329 if (!(dentry->d_flags & DCACHE_UNHASHED)) {
330 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) {
331 bit_spin_lock(0,
332 (unsigned long *)&dentry->d_sb->s_anon.first);
333 dentry->d_flags |= DCACHE_UNHASHED;
334 hlist_bl_del_init(&dentry->d_hash);
335 __bit_spin_unlock(0,
336 (unsigned long *)&dentry->d_sb->s_anon.first);
337 } else {
338 struct dcache_hash_bucket *b;
339 b = d_hash(dentry->d_parent, dentry->d_name.hash);
340 spin_lock_bucket(b);
341
342
343
344
345
346 dentry->d_flags |= DCACHE_UNHASHED;
347 hlist_bl_del_rcu(&dentry->d_hash);
348 spin_unlock_bucket(b);
349 dentry_rcuwalk_barrier(dentry);
350 }
351 }
352}
353EXPORT_SYMBOL(__d_drop);
354
355void d_drop(struct dentry *dentry)
356{
357 spin_lock(&dentry->d_lock);
358 __d_drop(dentry);
359 spin_unlock(&dentry->d_lock);
360}
361EXPORT_SYMBOL(d_drop);
362
363
364
365
366
367
368
369static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
370 __releases(dentry->d_lock)
371{
372 struct inode *inode;
373 struct dentry *parent;
374
375 inode = dentry->d_inode;
376 if (inode && !spin_trylock(&inode->i_lock)) {
377relock:
378 spin_unlock(&dentry->d_lock);
379 cpu_relax();
380 return dentry;
381 }
382 if (IS_ROOT(dentry))
383 parent = NULL;
384 else
385 parent = dentry->d_parent;
386 if (parent && !spin_trylock(&parent->d_lock)) {
387 if (inode)
388 spin_unlock(&inode->i_lock);
389 goto relock;
390 }
391
392 if (ref)
393 dentry->d_count--;
394
395 dentry_lru_del(dentry);
396
397 __d_drop(dentry);
398 return d_kill(dentry, parent);
399}
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427void dput(struct dentry *dentry)
428{
429 if (!dentry)
430 return;
431
432repeat:
433 if (dentry->d_count == 1)
434 might_sleep();
435 spin_lock(&dentry->d_lock);
436 BUG_ON(!dentry->d_count);
437 if (dentry->d_count > 1) {
438 dentry->d_count--;
439 spin_unlock(&dentry->d_lock);
440 return;
441 }
442
443 if (dentry->d_flags & DCACHE_OP_DELETE) {
444 if (dentry->d_op->d_delete(dentry))
445 goto kill_it;
446 }
447
448
449 if (d_unhashed(dentry))
450 goto kill_it;
451
452
453 dentry->d_flags |= DCACHE_REFERENCED;
454 dentry_lru_add(dentry);
455
456 dentry->d_count--;
457 spin_unlock(&dentry->d_lock);
458 return;
459
460kill_it:
461 dentry = dentry_kill(dentry, 1);
462 if (dentry)
463 goto repeat;
464}
465EXPORT_SYMBOL(dput);
466
467
468
469
470
471
472
473
474
475
476
477
478
479int d_invalidate(struct dentry * dentry)
480{
481
482
483
484 spin_lock(&dentry->d_lock);
485 if (d_unhashed(dentry)) {
486 spin_unlock(&dentry->d_lock);
487 return 0;
488 }
489
490
491
492
493 if (!list_empty(&dentry->d_subdirs)) {
494 spin_unlock(&dentry->d_lock);
495 shrink_dcache_parent(dentry);
496 spin_lock(&dentry->d_lock);
497 }
498
499
500
501
502
503
504
505
506
507
508
509 if (dentry->d_count > 1) {
510 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
511 spin_unlock(&dentry->d_lock);
512 return -EBUSY;
513 }
514 }
515
516 __d_drop(dentry);
517 spin_unlock(&dentry->d_lock);
518 return 0;
519}
520EXPORT_SYMBOL(d_invalidate);
521
522
523static inline void __dget_dlock(struct dentry *dentry)
524{
525 dentry->d_count++;
526}
527
528static inline void __dget(struct dentry *dentry)
529{
530 spin_lock(&dentry->d_lock);
531 __dget_dlock(dentry);
532 spin_unlock(&dentry->d_lock);
533}
534
535struct dentry *dget_parent(struct dentry *dentry)
536{
537 struct dentry *ret;
538
539repeat:
540
541
542
543
544 rcu_read_lock();
545 ret = dentry->d_parent;
546 if (!ret) {
547 rcu_read_unlock();
548 goto out;
549 }
550 spin_lock(&ret->d_lock);
551 if (unlikely(ret != dentry->d_parent)) {
552 spin_unlock(&ret->d_lock);
553 rcu_read_unlock();
554 goto repeat;
555 }
556 rcu_read_unlock();
557 BUG_ON(!ret->d_count);
558 ret->d_count++;
559 spin_unlock(&ret->d_lock);
560out:
561 return ret;
562}
563EXPORT_SYMBOL(dget_parent);
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
582{
583 struct dentry *alias, *discon_alias;
584
585again:
586 discon_alias = NULL;
587 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
588 spin_lock(&alias->d_lock);
589 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
590 if (IS_ROOT(alias) &&
591 (alias->d_flags & DCACHE_DISCONNECTED)) {
592 discon_alias = alias;
593 } else if (!want_discon) {
594 __dget_dlock(alias);
595 spin_unlock(&alias->d_lock);
596 return alias;
597 }
598 }
599 spin_unlock(&alias->d_lock);
600 }
601 if (discon_alias) {
602 alias = discon_alias;
603 spin_lock(&alias->d_lock);
604 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
605 if (IS_ROOT(alias) &&
606 (alias->d_flags & DCACHE_DISCONNECTED)) {
607 __dget_dlock(alias);
608 spin_unlock(&alias->d_lock);
609 return alias;
610 }
611 }
612 spin_unlock(&alias->d_lock);
613 goto again;
614 }
615 return NULL;
616}
617
618struct dentry *d_find_alias(struct inode *inode)
619{
620 struct dentry *de = NULL;
621
622 if (!list_empty(&inode->i_dentry)) {
623 spin_lock(&inode->i_lock);
624 de = __d_find_alias(inode, 0);
625 spin_unlock(&inode->i_lock);
626 }
627 return de;
628}
629EXPORT_SYMBOL(d_find_alias);
630
631
632
633
634
635void d_prune_aliases(struct inode *inode)
636{
637 struct dentry *dentry;
638restart:
639 spin_lock(&inode->i_lock);
640 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
641 spin_lock(&dentry->d_lock);
642 if (!dentry->d_count) {
643 __dget_dlock(dentry);
644 __d_drop(dentry);
645 spin_unlock(&dentry->d_lock);
646 spin_unlock(&inode->i_lock);
647 dput(dentry);
648 goto restart;
649 }
650 spin_unlock(&dentry->d_lock);
651 }
652 spin_unlock(&inode->i_lock);
653}
654EXPORT_SYMBOL(d_prune_aliases);
655
656
657
658
659
660
661
662
663static void try_prune_one_dentry(struct dentry *dentry)
664 __releases(dentry->d_lock)
665{
666 struct dentry *parent;
667
668 parent = dentry_kill(dentry, 0);
669
670
671
672
673
674
675
676
677
678
679 if (!parent)
680 return;
681 if (parent == dentry)
682 return;
683
684
685 dentry = parent;
686 while (dentry) {
687 spin_lock(&dentry->d_lock);
688 if (dentry->d_count > 1) {
689 dentry->d_count--;
690 spin_unlock(&dentry->d_lock);
691 return;
692 }
693 dentry = dentry_kill(dentry, 1);
694 }
695}
696
697static void shrink_dentry_list(struct list_head *list)
698{
699 struct dentry *dentry;
700
701 rcu_read_lock();
702 for (;;) {
703 dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
704 if (&dentry->d_lru == list)
705 break;
706 spin_lock(&dentry->d_lock);
707 if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
708 spin_unlock(&dentry->d_lock);
709 continue;
710 }
711
712
713
714
715
716
717 if (dentry->d_count) {
718 dentry_lru_del(dentry);
719 spin_unlock(&dentry->d_lock);
720 continue;
721 }
722
723 rcu_read_unlock();
724
725 try_prune_one_dentry(dentry);
726
727 rcu_read_lock();
728 }
729 rcu_read_unlock();
730}
731
732
733
734
735
736
737
738
739
740static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
741{
742
743 struct dentry *dentry;
744 LIST_HEAD(referenced);
745 LIST_HEAD(tmp);
746 int cnt = *count;
747
748relock:
749 spin_lock(&dcache_lru_lock);
750 while (!list_empty(&sb->s_dentry_lru)) {
751 dentry = list_entry(sb->s_dentry_lru.prev,
752 struct dentry, d_lru);
753 BUG_ON(dentry->d_sb != sb);
754
755 if (!spin_trylock(&dentry->d_lock)) {
756 spin_unlock(&dcache_lru_lock);
757 cpu_relax();
758 goto relock;
759 }
760
761
762
763
764
765
766 if (flags & DCACHE_REFERENCED &&
767 dentry->d_flags & DCACHE_REFERENCED) {
768 dentry->d_flags &= ~DCACHE_REFERENCED;
769 list_move(&dentry->d_lru, &referenced);
770 spin_unlock(&dentry->d_lock);
771 } else {
772 list_move_tail(&dentry->d_lru, &tmp);
773 spin_unlock(&dentry->d_lock);
774 if (!--cnt)
775 break;
776 }
777 cond_resched_lock(&dcache_lru_lock);
778 }
779 if (!list_empty(&referenced))
780 list_splice(&referenced, &sb->s_dentry_lru);
781 spin_unlock(&dcache_lru_lock);
782
783 shrink_dentry_list(&tmp);
784
785 *count = cnt;
786}
787
788
789
790
791
792
793
794
795
796
797static void prune_dcache(int count)
798{
799 struct super_block *sb, *p = NULL;
800 int w_count;
801 int unused = dentry_stat.nr_unused;
802 int prune_ratio;
803 int pruned;
804
805 if (unused == 0 || count == 0)
806 return;
807 if (count >= unused)
808 prune_ratio = 1;
809 else
810 prune_ratio = unused / count;
811 spin_lock(&sb_lock);
812 list_for_each_entry(sb, &super_blocks, s_list) {
813 if (list_empty(&sb->s_instances))
814 continue;
815 if (sb->s_nr_dentry_unused == 0)
816 continue;
817 sb->s_count++;
818
819
820
821
822
823
824
825
826
827 spin_unlock(&sb_lock);
828 if (prune_ratio != 1)
829 w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1;
830 else
831 w_count = sb->s_nr_dentry_unused;
832 pruned = w_count;
833
834
835
836
837
838
839
840 if (down_read_trylock(&sb->s_umount)) {
841 if ((sb->s_root != NULL) &&
842 (!list_empty(&sb->s_dentry_lru))) {
843 __shrink_dcache_sb(sb, &w_count,
844 DCACHE_REFERENCED);
845 pruned -= w_count;
846 }
847 up_read(&sb->s_umount);
848 }
849 spin_lock(&sb_lock);
850 if (p)
851 __put_super(p);
852 count -= pruned;
853 p = sb;
854
855 if (count <= 0)
856 break;
857 }
858 if (p)
859 __put_super(p);
860 spin_unlock(&sb_lock);
861}
862
863
864
865
866
867
868
869
870void shrink_dcache_sb(struct super_block *sb)
871{
872 LIST_HEAD(tmp);
873
874 spin_lock(&dcache_lru_lock);
875 while (!list_empty(&sb->s_dentry_lru)) {
876 list_splice_init(&sb->s_dentry_lru, &tmp);
877 spin_unlock(&dcache_lru_lock);
878 shrink_dentry_list(&tmp);
879 spin_lock(&dcache_lru_lock);
880 }
881 spin_unlock(&dcache_lru_lock);
882}
883EXPORT_SYMBOL(shrink_dcache_sb);
884
885
886
887
888
889
890static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
891{
892 struct dentry *parent;
893 unsigned detached = 0;
894
895 BUG_ON(!IS_ROOT(dentry));
896
897
898 spin_lock(&dentry->d_lock);
899 dentry_lru_del(dentry);
900 __d_drop(dentry);
901 spin_unlock(&dentry->d_lock);
902
903 for (;;) {
904
905 while (!list_empty(&dentry->d_subdirs)) {
906 struct dentry *loop;
907
908
909
910 spin_lock(&dentry->d_lock);
911 list_for_each_entry(loop, &dentry->d_subdirs,
912 d_u.d_child) {
913 spin_lock_nested(&loop->d_lock,
914 DENTRY_D_LOCK_NESTED);
915 dentry_lru_del(loop);
916 __d_drop(loop);
917 spin_unlock(&loop->d_lock);
918 }
919 spin_unlock(&dentry->d_lock);
920
921
922 dentry = list_entry(dentry->d_subdirs.next,
923 struct dentry, d_u.d_child);
924 }
925
926
927
928 do {
929 struct inode *inode;
930
931 if (dentry->d_count != 0) {
932 printk(KERN_ERR
933 "BUG: Dentry %p{i=%lx,n=%s}"
934 " still in use (%d)"
935 " [unmount of %s %s]\n",
936 dentry,
937 dentry->d_inode ?
938 dentry->d_inode->i_ino : 0UL,
939 dentry->d_name.name,
940 dentry->d_count,
941 dentry->d_sb->s_type->name,
942 dentry->d_sb->s_id);
943 BUG();
944 }
945
946 if (IS_ROOT(dentry)) {
947 parent = NULL;
948 list_del(&dentry->d_u.d_child);
949 } else {
950 parent = dentry->d_parent;
951 spin_lock(&parent->d_lock);
952 parent->d_count--;
953 list_del(&dentry->d_u.d_child);
954 spin_unlock(&parent->d_lock);
955 }
956
957 detached++;
958
959 inode = dentry->d_inode;
960 if (inode) {
961 dentry->d_inode = NULL;
962 list_del_init(&dentry->d_alias);
963 if (dentry->d_op && dentry->d_op->d_iput)
964 dentry->d_op->d_iput(dentry, inode);
965 else
966 iput(inode);
967 }
968
969 d_free(dentry);
970
971
972
973
974 if (!parent)
975 return;
976 dentry = parent;
977 } while (list_empty(&dentry->d_subdirs));
978
979 dentry = list_entry(dentry->d_subdirs.next,
980 struct dentry, d_u.d_child);
981 }
982}
983
984
985
986
987
988
989
990
991
992
993
994void shrink_dcache_for_umount(struct super_block *sb)
995{
996 struct dentry *dentry;
997
998 if (down_read_trylock(&sb->s_umount))
999 BUG();
1000
1001 dentry = sb->s_root;
1002 sb->s_root = NULL;
1003 spin_lock(&dentry->d_lock);
1004 dentry->d_count--;
1005 spin_unlock(&dentry->d_lock);
1006 shrink_dcache_for_umount_subtree(dentry);
1007
1008 while (!hlist_bl_empty(&sb->s_anon)) {
1009 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
1010 shrink_dcache_for_umount_subtree(dentry);
1011 }
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027int have_submounts(struct dentry *parent)
1028{
1029 struct dentry *this_parent;
1030 struct list_head *next;
1031 unsigned seq;
1032 int locked = 0;
1033
1034 seq = read_seqbegin(&rename_lock);
1035again:
1036 this_parent = parent;
1037
1038 if (d_mountpoint(parent))
1039 goto positive;
1040 spin_lock(&this_parent->d_lock);
1041repeat:
1042 next = this_parent->d_subdirs.next;
1043resume:
1044 while (next != &this_parent->d_subdirs) {
1045 struct list_head *tmp = next;
1046 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1047 next = tmp->next;
1048
1049 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1050
1051 if (d_mountpoint(dentry)) {
1052 spin_unlock(&dentry->d_lock);
1053 spin_unlock(&this_parent->d_lock);
1054 goto positive;
1055 }
1056 if (!list_empty(&dentry->d_subdirs)) {
1057 spin_unlock(&this_parent->d_lock);
1058 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1059 this_parent = dentry;
1060 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1061 goto repeat;
1062 }
1063 spin_unlock(&dentry->d_lock);
1064 }
1065
1066
1067
1068 if (this_parent != parent) {
1069 struct dentry *tmp;
1070 struct dentry *child;
1071
1072 tmp = this_parent->d_parent;
1073 rcu_read_lock();
1074 spin_unlock(&this_parent->d_lock);
1075 child = this_parent;
1076 this_parent = tmp;
1077 spin_lock(&this_parent->d_lock);
1078
1079
1080 if (this_parent != child->d_parent ||
1081 (!locked && read_seqretry(&rename_lock, seq))) {
1082 spin_unlock(&this_parent->d_lock);
1083 rcu_read_unlock();
1084 goto rename_retry;
1085 }
1086 rcu_read_unlock();
1087 next = child->d_u.d_child.next;
1088 goto resume;
1089 }
1090 spin_unlock(&this_parent->d_lock);
1091 if (!locked && read_seqretry(&rename_lock, seq))
1092 goto rename_retry;
1093 if (locked)
1094 write_sequnlock(&rename_lock);
1095 return 0;
1096positive:
1097 if (!locked && read_seqretry(&rename_lock, seq))
1098 goto rename_retry;
1099 if (locked)
1100 write_sequnlock(&rename_lock);
1101 return 1;
1102
1103rename_retry:
1104 locked = 1;
1105 write_seqlock(&rename_lock);
1106 goto again;
1107}
1108EXPORT_SYMBOL(have_submounts);
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124static int select_parent(struct dentry * parent)
1125{
1126 struct dentry *this_parent;
1127 struct list_head *next;
1128 unsigned seq;
1129 int found = 0;
1130 int locked = 0;
1131
1132 seq = read_seqbegin(&rename_lock);
1133again:
1134 this_parent = parent;
1135 spin_lock(&this_parent->d_lock);
1136repeat:
1137 next = this_parent->d_subdirs.next;
1138resume:
1139 while (next != &this_parent->d_subdirs) {
1140 struct list_head *tmp = next;
1141 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1142 next = tmp->next;
1143
1144 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1145
1146
1147
1148
1149
1150 if (!dentry->d_count) {
1151 dentry_lru_move_tail(dentry);
1152 found++;
1153 } else {
1154 dentry_lru_del(dentry);
1155 }
1156
1157
1158
1159
1160
1161
1162 if (found && need_resched()) {
1163 spin_unlock(&dentry->d_lock);
1164 goto out;
1165 }
1166
1167
1168
1169
1170 if (!list_empty(&dentry->d_subdirs)) {
1171 spin_unlock(&this_parent->d_lock);
1172 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1173 this_parent = dentry;
1174 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1175 goto repeat;
1176 }
1177
1178 spin_unlock(&dentry->d_lock);
1179 }
1180
1181
1182
1183 if (this_parent != parent) {
1184 struct dentry *tmp;
1185 struct dentry *child;
1186
1187 tmp = this_parent->d_parent;
1188 rcu_read_lock();
1189 spin_unlock(&this_parent->d_lock);
1190 child = this_parent;
1191 this_parent = tmp;
1192 spin_lock(&this_parent->d_lock);
1193
1194
1195 if (this_parent != child->d_parent ||
1196 (!locked && read_seqretry(&rename_lock, seq))) {
1197 spin_unlock(&this_parent->d_lock);
1198 rcu_read_unlock();
1199 goto rename_retry;
1200 }
1201 rcu_read_unlock();
1202 next = child->d_u.d_child.next;
1203 goto resume;
1204 }
1205out:
1206 spin_unlock(&this_parent->d_lock);
1207 if (!locked && read_seqretry(&rename_lock, seq))
1208 goto rename_retry;
1209 if (locked)
1210 write_sequnlock(&rename_lock);
1211 return found;
1212
1213rename_retry:
1214 if (found)
1215 return found;
1216 locked = 1;
1217 write_seqlock(&rename_lock);
1218 goto again;
1219}
1220
1221
1222
1223
1224
1225
1226
1227
1228void shrink_dcache_parent(struct dentry * parent)
1229{
1230 struct super_block *sb = parent->d_sb;
1231 int found;
1232
1233 while ((found = select_parent(parent)) != 0)
1234 __shrink_dcache_sb(sb, &found, 0);
1235}
1236EXPORT_SYMBOL(shrink_dcache_parent);
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
1251{
1252 if (nr) {
1253 if (!(gfp_mask & __GFP_FS))
1254 return -1;
1255 prune_dcache(nr);
1256 }
1257
1258 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
1259}
1260
1261static struct shrinker dcache_shrinker = {
1262 .shrink = shrink_dcache_memory,
1263 .seeks = DEFAULT_SEEKS,
1264};
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1277{
1278 struct dentry *dentry;
1279 char *dname;
1280
1281 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1282 if (!dentry)
1283 return NULL;
1284
1285 if (name->len > DNAME_INLINE_LEN-1) {
1286 dname = kmalloc(name->len + 1, GFP_KERNEL);
1287 if (!dname) {
1288 kmem_cache_free(dentry_cache, dentry);
1289 return NULL;
1290 }
1291 } else {
1292 dname = dentry->d_iname;
1293 }
1294 dentry->d_name.name = dname;
1295
1296 dentry->d_name.len = name->len;
1297 dentry->d_name.hash = name->hash;
1298 memcpy(dname, name->name, name->len);
1299 dname[name->len] = 0;
1300
1301 dentry->d_count = 1;
1302 dentry->d_flags = DCACHE_UNHASHED;
1303 spin_lock_init(&dentry->d_lock);
1304 seqcount_init(&dentry->d_seq);
1305 dentry->d_inode = NULL;
1306 dentry->d_parent = NULL;
1307 dentry->d_sb = NULL;
1308 dentry->d_op = NULL;
1309 dentry->d_fsdata = NULL;
1310 INIT_HLIST_BL_NODE(&dentry->d_hash);
1311 INIT_LIST_HEAD(&dentry->d_lru);
1312 INIT_LIST_HEAD(&dentry->d_subdirs);
1313 INIT_LIST_HEAD(&dentry->d_alias);
1314 INIT_LIST_HEAD(&dentry->d_u.d_child);
1315
1316 if (parent) {
1317 spin_lock(&parent->d_lock);
1318
1319
1320
1321
1322 __dget_dlock(parent);
1323 dentry->d_parent = parent;
1324 dentry->d_sb = parent->d_sb;
1325 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1326 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1327 spin_unlock(&parent->d_lock);
1328 }
1329
1330 this_cpu_inc(nr_dentry);
1331
1332 return dentry;
1333}
1334EXPORT_SYMBOL(d_alloc);
1335
1336struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1337{
1338 struct dentry *dentry = d_alloc(NULL, name);
1339 if (dentry) {
1340 dentry->d_sb = sb;
1341 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1342 dentry->d_parent = dentry;
1343 dentry->d_flags |= DCACHE_DISCONNECTED;
1344 }
1345 return dentry;
1346}
1347EXPORT_SYMBOL(d_alloc_pseudo);
1348
1349struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1350{
1351 struct qstr q;
1352
1353 q.name = name;
1354 q.len = strlen(name);
1355 q.hash = full_name_hash(q.name, q.len);
1356 return d_alloc(parent, &q);
1357}
1358EXPORT_SYMBOL(d_alloc_name);
1359
1360void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1361{
1362 WARN_ON_ONCE(dentry->d_op);
1363 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1364 DCACHE_OP_COMPARE |
1365 DCACHE_OP_REVALIDATE |
1366 DCACHE_OP_DELETE ));
1367 dentry->d_op = op;
1368 if (!op)
1369 return;
1370 if (op->d_hash)
1371 dentry->d_flags |= DCACHE_OP_HASH;
1372 if (op->d_compare)
1373 dentry->d_flags |= DCACHE_OP_COMPARE;
1374 if (op->d_revalidate)
1375 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1376 if (op->d_delete)
1377 dentry->d_flags |= DCACHE_OP_DELETE;
1378
1379}
1380EXPORT_SYMBOL(d_set_d_op);
1381
1382static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1383{
1384 spin_lock(&dentry->d_lock);
1385 if (inode) {
1386 if (unlikely(IS_AUTOMOUNT(inode)))
1387 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
1388 list_add(&dentry->d_alias, &inode->i_dentry);
1389 }
1390 dentry->d_inode = inode;
1391 dentry_rcuwalk_barrier(dentry);
1392 spin_unlock(&dentry->d_lock);
1393 fsnotify_d_instantiate(dentry, inode);
1394}
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411void d_instantiate(struct dentry *entry, struct inode * inode)
1412{
1413 BUG_ON(!list_empty(&entry->d_alias));
1414 if (inode)
1415 spin_lock(&inode->i_lock);
1416 __d_instantiate(entry, inode);
1417 if (inode)
1418 spin_unlock(&inode->i_lock);
1419 security_d_instantiate(entry, inode);
1420}
1421EXPORT_SYMBOL(d_instantiate);
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439static struct dentry *__d_instantiate_unique(struct dentry *entry,
1440 struct inode *inode)
1441{
1442 struct dentry *alias;
1443 int len = entry->d_name.len;
1444 const char *name = entry->d_name.name;
1445 unsigned int hash = entry->d_name.hash;
1446
1447 if (!inode) {
1448 __d_instantiate(entry, NULL);
1449 return NULL;
1450 }
1451
1452 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
1453 struct qstr *qstr = &alias->d_name;
1454
1455
1456
1457
1458
1459
1460 if (qstr->hash != hash)
1461 continue;
1462 if (alias->d_parent != entry->d_parent)
1463 continue;
1464 if (dentry_cmp(qstr->name, qstr->len, name, len))
1465 continue;
1466 __dget(alias);
1467 return alias;
1468 }
1469
1470 __d_instantiate(entry, inode);
1471 return NULL;
1472}
1473
1474struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1475{
1476 struct dentry *result;
1477
1478 BUG_ON(!list_empty(&entry->d_alias));
1479
1480 if (inode)
1481 spin_lock(&inode->i_lock);
1482 result = __d_instantiate_unique(entry, inode);
1483 if (inode)
1484 spin_unlock(&inode->i_lock);
1485
1486 if (!result) {
1487 security_d_instantiate(entry, inode);
1488 return NULL;
1489 }
1490
1491 BUG_ON(!d_unhashed(result));
1492 iput(inode);
1493 return result;
1494}
1495
1496EXPORT_SYMBOL(d_instantiate_unique);
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507struct dentry * d_alloc_root(struct inode * root_inode)
1508{
1509 struct dentry *res = NULL;
1510
1511 if (root_inode) {
1512 static const struct qstr name = { .name = "/", .len = 1 };
1513
1514 res = d_alloc(NULL, &name);
1515 if (res) {
1516 res->d_sb = root_inode->i_sb;
1517 d_set_d_op(res, res->d_sb->s_d_op);
1518 res->d_parent = res;
1519 d_instantiate(res, root_inode);
1520 }
1521 }
1522 return res;
1523}
1524EXPORT_SYMBOL(d_alloc_root);
1525
1526static struct dentry * __d_find_any_alias(struct inode *inode)
1527{
1528 struct dentry *alias;
1529
1530 if (list_empty(&inode->i_dentry))
1531 return NULL;
1532 alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
1533 __dget(alias);
1534 return alias;
1535}
1536
1537static struct dentry * d_find_any_alias(struct inode *inode)
1538{
1539 struct dentry *de;
1540
1541 spin_lock(&inode->i_lock);
1542 de = __d_find_any_alias(inode);
1543 spin_unlock(&inode->i_lock);
1544 return de;
1545}
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566struct dentry *d_obtain_alias(struct inode *inode)
1567{
1568 static const struct qstr anonstring = { .name = "" };
1569 struct dentry *tmp;
1570 struct dentry *res;
1571
1572 if (!inode)
1573 return ERR_PTR(-ESTALE);
1574 if (IS_ERR(inode))
1575 return ERR_CAST(inode);
1576
1577 res = d_find_any_alias(inode);
1578 if (res)
1579 goto out_iput;
1580
1581 tmp = d_alloc(NULL, &anonstring);
1582 if (!tmp) {
1583 res = ERR_PTR(-ENOMEM);
1584 goto out_iput;
1585 }
1586 tmp->d_parent = tmp;
1587
1588
1589 spin_lock(&inode->i_lock);
1590 res = __d_find_any_alias(inode);
1591 if (res) {
1592 spin_unlock(&inode->i_lock);
1593 dput(tmp);
1594 goto out_iput;
1595 }
1596
1597
1598 spin_lock(&tmp->d_lock);
1599 tmp->d_sb = inode->i_sb;
1600 d_set_d_op(tmp, tmp->d_sb->s_d_op);
1601 tmp->d_inode = inode;
1602 tmp->d_flags |= DCACHE_DISCONNECTED;
1603 list_add(&tmp->d_alias, &inode->i_dentry);
1604 bit_spin_lock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
1605 tmp->d_flags &= ~DCACHE_UNHASHED;
1606 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1607 __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
1608 spin_unlock(&tmp->d_lock);
1609 spin_unlock(&inode->i_lock);
1610
1611 return tmp;
1612
1613 out_iput:
1614 iput(inode);
1615 return res;
1616}
1617EXPORT_SYMBOL(d_obtain_alias);
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1636{
1637 struct dentry *new = NULL;
1638
1639 if (inode && S_ISDIR(inode->i_mode)) {
1640 spin_lock(&inode->i_lock);
1641 new = __d_find_alias(inode, 1);
1642 if (new) {
1643 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1644 spin_unlock(&inode->i_lock);
1645 security_d_instantiate(new, inode);
1646 d_move(new, dentry);
1647 iput(inode);
1648 } else {
1649
1650 __d_instantiate(dentry, inode);
1651 spin_unlock(&inode->i_lock);
1652 security_d_instantiate(dentry, inode);
1653 d_rehash(dentry);
1654 }
1655 } else
1656 d_add(dentry, inode);
1657 return new;
1658}
1659EXPORT_SYMBOL(d_splice_alias);
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1678 struct qstr *name)
1679{
1680 int error;
1681 struct dentry *found;
1682 struct dentry *new;
1683
1684
1685
1686
1687
1688 found = d_hash_and_lookup(dentry->d_parent, name);
1689 if (!found) {
1690 new = d_alloc(dentry->d_parent, name);
1691 if (!new) {
1692 error = -ENOMEM;
1693 goto err_out;
1694 }
1695
1696 found = d_splice_alias(inode, new);
1697 if (found) {
1698 dput(new);
1699 return found;
1700 }
1701 return new;
1702 }
1703
1704
1705
1706
1707
1708
1709
1710 if (found->d_inode) {
1711 if (unlikely(found->d_inode != inode)) {
1712
1713 BUG_ON(!is_bad_inode(inode));
1714 BUG_ON(!is_bad_inode(found->d_inode));
1715 }
1716 iput(inode);
1717 return found;
1718 }
1719
1720
1721
1722
1723
1724 spin_lock(&inode->i_lock);
1725 if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
1726 __d_instantiate(found, inode);
1727 spin_unlock(&inode->i_lock);
1728 security_d_instantiate(found, inode);
1729 return found;
1730 }
1731
1732
1733
1734
1735
1736 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
1737 __dget(new);
1738 spin_unlock(&inode->i_lock);
1739 security_d_instantiate(found, inode);
1740 d_move(new, found);
1741 iput(inode);
1742 dput(found);
1743 return new;
1744
1745err_out:
1746 iput(inode);
1747 return ERR_PTR(error);
1748}
1749EXPORT_SYMBOL(d_add_ci);
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
1779 unsigned *seq, struct inode **inode)
1780{
1781 unsigned int len = name->len;
1782 unsigned int hash = name->hash;
1783 const unsigned char *str = name->name;
1784 struct dcache_hash_bucket *b = d_hash(parent, hash);
1785 struct hlist_bl_node *node;
1786 struct dentry *dentry;
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808 hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
1809 struct inode *i;
1810 const char *tname;
1811 int tlen;
1812
1813 if (dentry->d_name.hash != hash)
1814 continue;
1815
1816seqretry:
1817 *seq = read_seqcount_begin(&dentry->d_seq);
1818 if (dentry->d_parent != parent)
1819 continue;
1820 if (d_unhashed(dentry))
1821 continue;
1822 tlen = dentry->d_name.len;
1823 tname = dentry->d_name.name;
1824 i = dentry->d_inode;
1825 prefetch(tname);
1826 if (i)
1827 prefetch(i);
1828
1829
1830
1831
1832
1833
1834 if (read_seqcount_retry(&dentry->d_seq, *seq))
1835 goto seqretry;
1836 if (parent->d_flags & DCACHE_OP_COMPARE) {
1837 if (parent->d_op->d_compare(parent, *inode,
1838 dentry, i,
1839 tlen, tname, name))
1840 continue;
1841 } else {
1842 if (dentry_cmp(tname, tlen, str, len))
1843 continue;
1844 }
1845
1846
1847
1848
1849
1850
1851 *inode = i;
1852 return dentry;
1853 }
1854 return NULL;
1855}
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868struct dentry *d_lookup(struct dentry *parent, struct qstr *name)
1869{
1870 struct dentry *dentry;
1871 unsigned seq;
1872
1873 do {
1874 seq = read_seqbegin(&rename_lock);
1875 dentry = __d_lookup(parent, name);
1876 if (dentry)
1877 break;
1878 } while (read_seqretry(&rename_lock, seq));
1879 return dentry;
1880}
1881EXPORT_SYMBOL(d_lookup);
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
1899{
1900 unsigned int len = name->len;
1901 unsigned int hash = name->hash;
1902 const unsigned char *str = name->name;
1903 struct dcache_hash_bucket *b = d_hash(parent, hash);
1904 struct hlist_bl_node *node;
1905 struct dentry *found = NULL;
1906 struct dentry *dentry;
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928 rcu_read_lock();
1929
1930 hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
1931 const char *tname;
1932 int tlen;
1933
1934 if (dentry->d_name.hash != hash)
1935 continue;
1936
1937 spin_lock(&dentry->d_lock);
1938 if (dentry->d_parent != parent)
1939 goto next;
1940 if (d_unhashed(dentry))
1941 goto next;
1942
1943
1944
1945
1946
1947 tlen = dentry->d_name.len;
1948 tname = dentry->d_name.name;
1949 if (parent->d_flags & DCACHE_OP_COMPARE) {
1950 if (parent->d_op->d_compare(parent, parent->d_inode,
1951 dentry, dentry->d_inode,
1952 tlen, tname, name))
1953 goto next;
1954 } else {
1955 if (dentry_cmp(tname, tlen, str, len))
1956 goto next;
1957 }
1958
1959 dentry->d_count++;
1960 found = dentry;
1961 spin_unlock(&dentry->d_lock);
1962 break;
1963next:
1964 spin_unlock(&dentry->d_lock);
1965 }
1966 rcu_read_unlock();
1967
1968 return found;
1969}
1970
1971
1972
1973
1974
1975
1976
1977
1978struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
1979{
1980 struct dentry *dentry = NULL;
1981
1982
1983
1984
1985
1986
1987 name->hash = full_name_hash(name->name, name->len);
1988 if (dir->d_flags & DCACHE_OP_HASH) {
1989 if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0)
1990 goto out;
1991 }
1992 dentry = d_lookup(dir, name);
1993out:
1994 return dentry;
1995}
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008int d_validate(struct dentry *dentry, struct dentry *dparent)
2009{
2010 struct dentry *child;
2011
2012 spin_lock(&dparent->d_lock);
2013 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2014 if (dentry == child) {
2015 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2016 __dget_dlock(dentry);
2017 spin_unlock(&dentry->d_lock);
2018 spin_unlock(&dparent->d_lock);
2019 return 1;
2020 }
2021 }
2022 spin_unlock(&dparent->d_lock);
2023
2024 return 0;
2025}
2026EXPORT_SYMBOL(d_validate);
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049void d_delete(struct dentry * dentry)
2050{
2051 struct inode *inode;
2052 int isdir = 0;
2053
2054
2055
2056again:
2057 spin_lock(&dentry->d_lock);
2058 inode = dentry->d_inode;
2059 isdir = S_ISDIR(inode->i_mode);
2060 if (dentry->d_count == 1) {
2061 if (inode && !spin_trylock(&inode->i_lock)) {
2062 spin_unlock(&dentry->d_lock);
2063 cpu_relax();
2064 goto again;
2065 }
2066 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2067 dentry_unlink_inode(dentry);
2068 fsnotify_nameremove(dentry, isdir);
2069 return;
2070 }
2071
2072 if (!d_unhashed(dentry))
2073 __d_drop(dentry);
2074
2075 spin_unlock(&dentry->d_lock);
2076
2077 fsnotify_nameremove(dentry, isdir);
2078}
2079EXPORT_SYMBOL(d_delete);
2080
2081static void __d_rehash(struct dentry * entry, struct dcache_hash_bucket *b)
2082{
2083 BUG_ON(!d_unhashed(entry));
2084 spin_lock_bucket(b);
2085 entry->d_flags &= ~DCACHE_UNHASHED;
2086 hlist_bl_add_head_rcu(&entry->d_hash, &b->head);
2087 spin_unlock_bucket(b);
2088}
2089
2090static void _d_rehash(struct dentry * entry)
2091{
2092 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2093}
2094
2095
2096
2097
2098
2099
2100
2101
2102void d_rehash(struct dentry * entry)
2103{
2104 spin_lock(&entry->d_lock);
2105 _d_rehash(entry);
2106 spin_unlock(&entry->d_lock);
2107}
2108EXPORT_SYMBOL(d_rehash);
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2125{
2126 BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
2127 BUG_ON(dentry->d_name.len != name->len);
2128
2129 spin_lock(&dentry->d_lock);
2130 write_seqcount_begin(&dentry->d_seq);
2131 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2132 write_seqcount_end(&dentry->d_seq);
2133 spin_unlock(&dentry->d_lock);
2134}
2135EXPORT_SYMBOL(dentry_update_name_case);
2136
2137static void switch_names(struct dentry *dentry, struct dentry *target)
2138{
2139 if (dname_external(target)) {
2140 if (dname_external(dentry)) {
2141
2142
2143
2144 swap(target->d_name.name, dentry->d_name.name);
2145 } else {
2146
2147
2148
2149
2150 memcpy(target->d_iname, dentry->d_name.name,
2151 dentry->d_name.len + 1);
2152 dentry->d_name.name = target->d_name.name;
2153 target->d_name.name = target->d_iname;
2154 }
2155 } else {
2156 if (dname_external(dentry)) {
2157
2158
2159
2160
2161 memcpy(dentry->d_iname, target->d_name.name,
2162 target->d_name.len + 1);
2163 target->d_name.name = dentry->d_name.name;
2164 dentry->d_name.name = dentry->d_iname;
2165 } else {
2166
2167
2168
2169 memcpy(dentry->d_iname, target->d_name.name,
2170 target->d_name.len + 1);
2171 dentry->d_name.len = target->d_name.len;
2172 return;
2173 }
2174 }
2175 swap(dentry->d_name.len, target->d_name.len);
2176}
2177
2178static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2179{
2180
2181
2182
2183 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2184 spin_lock(&target->d_parent->d_lock);
2185 else {
2186 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2187 spin_lock(&dentry->d_parent->d_lock);
2188 spin_lock_nested(&target->d_parent->d_lock,
2189 DENTRY_D_LOCK_NESTED);
2190 } else {
2191 spin_lock(&target->d_parent->d_lock);
2192 spin_lock_nested(&dentry->d_parent->d_lock,
2193 DENTRY_D_LOCK_NESTED);
2194 }
2195 }
2196 if (target < dentry) {
2197 spin_lock_nested(&target->d_lock, 2);
2198 spin_lock_nested(&dentry->d_lock, 3);
2199 } else {
2200 spin_lock_nested(&dentry->d_lock, 2);
2201 spin_lock_nested(&target->d_lock, 3);
2202 }
2203}
2204
2205static void dentry_unlock_parents_for_move(struct dentry *dentry,
2206 struct dentry *target)
2207{
2208 if (target->d_parent != dentry->d_parent)
2209 spin_unlock(&dentry->d_parent->d_lock);
2210 if (target->d_parent != target)
2211 spin_unlock(&target->d_parent->d_lock);
2212}
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233void d_move(struct dentry * dentry, struct dentry * target)
2234{
2235 if (!dentry->d_inode)
2236 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2237
2238 BUG_ON(d_ancestor(dentry, target));
2239 BUG_ON(d_ancestor(target, dentry));
2240
2241 write_seqlock(&rename_lock);
2242
2243 dentry_lock_for_move(dentry, target);
2244
2245 write_seqcount_begin(&dentry->d_seq);
2246 write_seqcount_begin(&target->d_seq);
2247
2248
2249
2250
2251
2252
2253
2254 __d_drop(dentry);
2255 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2256
2257
2258 __d_drop(target);
2259
2260 list_del(&dentry->d_u.d_child);
2261 list_del(&target->d_u.d_child);
2262
2263
2264 switch_names(dentry, target);
2265 swap(dentry->d_name.hash, target->d_name.hash);
2266
2267
2268 if (IS_ROOT(dentry)) {
2269 dentry->d_parent = target->d_parent;
2270 target->d_parent = target;
2271 INIT_LIST_HEAD(&target->d_u.d_child);
2272 } else {
2273 swap(dentry->d_parent, target->d_parent);
2274
2275
2276 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2277 }
2278
2279 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2280
2281 write_seqcount_end(&target->d_seq);
2282 write_seqcount_end(&dentry->d_seq);
2283
2284 dentry_unlock_parents_for_move(dentry, target);
2285 spin_unlock(&target->d_lock);
2286 fsnotify_d_move(dentry);
2287 spin_unlock(&dentry->d_lock);
2288 write_sequnlock(&rename_lock);
2289}
2290EXPORT_SYMBOL(d_move);
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2301{
2302 struct dentry *p;
2303
2304 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2305 if (p->d_parent == p1)
2306 return p;
2307 }
2308 return NULL;
2309}
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320static struct dentry *__d_unalias(struct inode *inode,
2321 struct dentry *dentry, struct dentry *alias)
2322{
2323 struct mutex *m1 = NULL, *m2 = NULL;
2324 struct dentry *ret;
2325
2326
2327 if (alias->d_parent == dentry->d_parent)
2328 goto out_unalias;
2329
2330
2331 ret = ERR_PTR(-ELOOP);
2332 if (d_ancestor(alias, dentry))
2333 goto out_err;
2334
2335
2336 ret = ERR_PTR(-EBUSY);
2337 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2338 goto out_err;
2339 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2340 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2341 goto out_err;
2342 m2 = &alias->d_parent->d_inode->i_mutex;
2343out_unalias:
2344 d_move(alias, dentry);
2345 ret = alias;
2346out_err:
2347 spin_unlock(&inode->i_lock);
2348 if (m2)
2349 mutex_unlock(m2);
2350 if (m1)
2351 mutex_unlock(m1);
2352 return ret;
2353}
2354
2355
2356
2357
2358
2359
2360static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2361{
2362 struct dentry *dparent, *aparent;
2363
2364 dentry_lock_for_move(anon, dentry);
2365
2366 write_seqcount_begin(&dentry->d_seq);
2367 write_seqcount_begin(&anon->d_seq);
2368
2369 dparent = dentry->d_parent;
2370 aparent = anon->d_parent;
2371
2372 switch_names(dentry, anon);
2373 swap(dentry->d_name.hash, anon->d_name.hash);
2374
2375 dentry->d_parent = (aparent == anon) ? dentry : aparent;
2376 list_del(&dentry->d_u.d_child);
2377 if (!IS_ROOT(dentry))
2378 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2379 else
2380 INIT_LIST_HEAD(&dentry->d_u.d_child);
2381
2382 anon->d_parent = (dparent == dentry) ? anon : dparent;
2383 list_del(&anon->d_u.d_child);
2384 if (!IS_ROOT(anon))
2385 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs);
2386 else
2387 INIT_LIST_HEAD(&anon->d_u.d_child);
2388
2389 write_seqcount_end(&dentry->d_seq);
2390 write_seqcount_end(&anon->d_seq);
2391
2392 dentry_unlock_parents_for_move(anon, dentry);
2393 spin_unlock(&dentry->d_lock);
2394
2395
2396 anon->d_flags &= ~DCACHE_DISCONNECTED;
2397}
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2408{
2409 struct dentry *actual;
2410
2411 BUG_ON(!d_unhashed(dentry));
2412
2413 if (!inode) {
2414 actual = dentry;
2415 __d_instantiate(dentry, NULL);
2416 d_rehash(actual);
2417 goto out_nolock;
2418 }
2419
2420 spin_lock(&inode->i_lock);
2421
2422 if (S_ISDIR(inode->i_mode)) {
2423 struct dentry *alias;
2424
2425
2426 alias = __d_find_alias(inode, 0);
2427 if (alias) {
2428 actual = alias;
2429
2430
2431 if (IS_ROOT(alias)) {
2432 __d_materialise_dentry(dentry, alias);
2433 __d_drop(alias);
2434 goto found;
2435 }
2436
2437 actual = __d_unalias(inode, dentry, alias);
2438 if (IS_ERR(actual))
2439 dput(alias);
2440 goto out_nolock;
2441 }
2442 }
2443
2444
2445 actual = __d_instantiate_unique(dentry, inode);
2446 if (!actual)
2447 actual = dentry;
2448 else
2449 BUG_ON(!d_unhashed(actual));
2450
2451 spin_lock(&actual->d_lock);
2452found:
2453 _d_rehash(actual);
2454 spin_unlock(&actual->d_lock);
2455 spin_unlock(&inode->i_lock);
2456out_nolock:
2457 if (actual == dentry) {
2458 security_d_instantiate(dentry, inode);
2459 return NULL;
2460 }
2461
2462 iput(inode);
2463 return actual;
2464}
2465EXPORT_SYMBOL_GPL(d_materialise_unique);
2466
2467static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2468{
2469 *buflen -= namelen;
2470 if (*buflen < 0)
2471 return -ENAMETOOLONG;
2472 *buffer -= namelen;
2473 memcpy(*buffer, str, namelen);
2474 return 0;
2475}
2476
2477static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2478{
2479 return prepend(buffer, buflen, name->name, name->len);
2480}
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494static int prepend_path(const struct path *path, struct path *root,
2495 char **buffer, int *buflen)
2496{
2497 struct dentry *dentry = path->dentry;
2498 struct vfsmount *vfsmnt = path->mnt;
2499 bool slash = false;
2500 int error = 0;
2501
2502 br_read_lock(vfsmount_lock);
2503 while (dentry != root->dentry || vfsmnt != root->mnt) {
2504 struct dentry * parent;
2505
2506 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2507
2508 if (vfsmnt->mnt_parent == vfsmnt) {
2509 goto global_root;
2510 }
2511 dentry = vfsmnt->mnt_mountpoint;
2512 vfsmnt = vfsmnt->mnt_parent;
2513 continue;
2514 }
2515 parent = dentry->d_parent;
2516 prefetch(parent);
2517 spin_lock(&dentry->d_lock);
2518 error = prepend_name(buffer, buflen, &dentry->d_name);
2519 spin_unlock(&dentry->d_lock);
2520 if (!error)
2521 error = prepend(buffer, buflen, "/", 1);
2522 if (error)
2523 break;
2524
2525 slash = true;
2526 dentry = parent;
2527 }
2528
2529out:
2530 if (!error && !slash)
2531 error = prepend(buffer, buflen, "/", 1);
2532
2533 br_read_unlock(vfsmount_lock);
2534 return error;
2535
2536global_root:
2537
2538
2539
2540
2541 if (IS_ROOT(dentry) &&
2542 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) {
2543 WARN(1, "Root dentry has weird name <%.*s>\n",
2544 (int) dentry->d_name.len, dentry->d_name.name);
2545 }
2546 root->mnt = vfsmnt;
2547 root->dentry = dentry;
2548 goto out;
2549}
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568char *__d_path(const struct path *path, struct path *root,
2569 char *buf, int buflen)
2570{
2571 char *res = buf + buflen;
2572 int error;
2573
2574 prepend(&res, &buflen, "\0", 1);
2575 write_seqlock(&rename_lock);
2576 error = prepend_path(path, root, &res, &buflen);
2577 write_sequnlock(&rename_lock);
2578
2579 if (error)
2580 return ERR_PTR(error);
2581 return res;
2582}
2583
2584
2585
2586
2587static int path_with_deleted(const struct path *path, struct path *root,
2588 char **buf, int *buflen)
2589{
2590 prepend(buf, buflen, "\0", 1);
2591 if (d_unlinked(path->dentry)) {
2592 int error = prepend(buf, buflen, " (deleted)", 10);
2593 if (error)
2594 return error;
2595 }
2596
2597 return prepend_path(path, root, buf, buflen);
2598}
2599
2600static int prepend_unreachable(char **buffer, int *buflen)
2601{
2602 return prepend(buffer, buflen, "(unreachable)", 13);
2603}
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621char *d_path(const struct path *path, char *buf, int buflen)
2622{
2623 char *res = buf + buflen;
2624 struct path root;
2625 struct path tmp;
2626 int error;
2627
2628
2629
2630
2631
2632
2633
2634
2635 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2636 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2637
2638 get_fs_root(current->fs, &root);
2639 write_seqlock(&rename_lock);
2640 tmp = root;
2641 error = path_with_deleted(path, &tmp, &res, &buflen);
2642 if (error)
2643 res = ERR_PTR(error);
2644 write_sequnlock(&rename_lock);
2645 path_put(&root);
2646 return res;
2647}
2648EXPORT_SYMBOL(d_path);
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
2660{
2661 char *res = buf + buflen;
2662 struct path root;
2663 struct path tmp;
2664 int error;
2665
2666 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2667 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2668
2669 get_fs_root(current->fs, &root);
2670 write_seqlock(&rename_lock);
2671 tmp = root;
2672 error = path_with_deleted(path, &tmp, &res, &buflen);
2673 if (!error && !path_equal(&tmp, &root))
2674 error = prepend_unreachable(&res, &buflen);
2675 write_sequnlock(&rename_lock);
2676 path_put(&root);
2677 if (error)
2678 res = ERR_PTR(error);
2679
2680 return res;
2681}
2682
2683
2684
2685
2686char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2687 const char *fmt, ...)
2688{
2689 va_list args;
2690 char temp[64];
2691 int sz;
2692
2693 va_start(args, fmt);
2694 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
2695 va_end(args);
2696
2697 if (sz > sizeof(temp) || sz > buflen)
2698 return ERR_PTR(-ENAMETOOLONG);
2699
2700 buffer += buflen - sz;
2701 return memcpy(buffer, temp, sz);
2702}
2703
2704
2705
2706
2707static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
2708{
2709 char *end = buf + buflen;
2710 char *retval;
2711
2712 prepend(&end, &buflen, "\0", 1);
2713 if (buflen < 1)
2714 goto Elong;
2715
2716 retval = end-1;
2717 *retval = '/';
2718
2719 while (!IS_ROOT(dentry)) {
2720 struct dentry *parent = dentry->d_parent;
2721 int error;
2722
2723 prefetch(parent);
2724 spin_lock(&dentry->d_lock);
2725 error = prepend_name(&end, &buflen, &dentry->d_name);
2726 spin_unlock(&dentry->d_lock);
2727 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
2728 goto Elong;
2729
2730 retval = end;
2731 dentry = parent;
2732 }
2733 return retval;
2734Elong:
2735 return ERR_PTR(-ENAMETOOLONG);
2736}
2737
2738char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
2739{
2740 char *retval;
2741
2742 write_seqlock(&rename_lock);
2743 retval = __dentry_path(dentry, buf, buflen);
2744 write_sequnlock(&rename_lock);
2745
2746 return retval;
2747}
2748EXPORT_SYMBOL(dentry_path_raw);
2749
2750char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2751{
2752 char *p = NULL;
2753 char *retval;
2754
2755 write_seqlock(&rename_lock);
2756 if (d_unlinked(dentry)) {
2757 p = buf + buflen;
2758 if (prepend(&p, &buflen, "//deleted", 10) != 0)
2759 goto Elong;
2760 buflen++;
2761 }
2762 retval = __dentry_path(dentry, buf, buflen);
2763 write_sequnlock(&rename_lock);
2764 if (!IS_ERR(retval) && p)
2765 *p = '/';
2766 return retval;
2767Elong:
2768 return ERR_PTR(-ENAMETOOLONG);
2769}
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2790{
2791 int error;
2792 struct path pwd, root;
2793 char *page = (char *) __get_free_page(GFP_USER);
2794
2795 if (!page)
2796 return -ENOMEM;
2797
2798 get_fs_root_and_pwd(current->fs, &root, &pwd);
2799
2800 error = -ENOENT;
2801 write_seqlock(&rename_lock);
2802 if (!d_unlinked(pwd.dentry)) {
2803 unsigned long len;
2804 struct path tmp = root;
2805 char *cwd = page + PAGE_SIZE;
2806 int buflen = PAGE_SIZE;
2807
2808 prepend(&cwd, &buflen, "\0", 1);
2809 error = prepend_path(&pwd, &tmp, &cwd, &buflen);
2810 write_sequnlock(&rename_lock);
2811
2812 if (error)
2813 goto out;
2814
2815
2816 if (!path_equal(&tmp, &root)) {
2817 error = prepend_unreachable(&cwd, &buflen);
2818 if (error)
2819 goto out;
2820 }
2821
2822 error = -ERANGE;
2823 len = PAGE_SIZE + page - cwd;
2824 if (len <= size) {
2825 error = len;
2826 if (copy_to_user(buf, cwd, len))
2827 error = -EFAULT;
2828 }
2829 } else {
2830 write_sequnlock(&rename_lock);
2831 }
2832
2833out:
2834 path_put(&pwd);
2835 path_put(&root);
2836 free_page((unsigned long) page);
2837 return error;
2838}
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2857{
2858 int result;
2859 unsigned seq;
2860
2861 if (new_dentry == old_dentry)
2862 return 1;
2863
2864 do {
2865
2866 seq = read_seqbegin(&rename_lock);
2867
2868
2869
2870
2871 rcu_read_lock();
2872 if (d_ancestor(old_dentry, new_dentry))
2873 result = 1;
2874 else
2875 result = 0;
2876 rcu_read_unlock();
2877 } while (read_seqretry(&rename_lock, seq));
2878
2879 return result;
2880}
2881
2882int path_is_under(struct path *path1, struct path *path2)
2883{
2884 struct vfsmount *mnt = path1->mnt;
2885 struct dentry *dentry = path1->dentry;
2886 int res;
2887
2888 br_read_lock(vfsmount_lock);
2889 if (mnt != path2->mnt) {
2890 for (;;) {
2891 if (mnt->mnt_parent == mnt) {
2892 br_read_unlock(vfsmount_lock);
2893 return 0;
2894 }
2895 if (mnt->mnt_parent == path2->mnt)
2896 break;
2897 mnt = mnt->mnt_parent;
2898 }
2899 dentry = mnt->mnt_mountpoint;
2900 }
2901 res = is_subdir(dentry, path2->dentry);
2902 br_read_unlock(vfsmount_lock);
2903 return res;
2904}
2905EXPORT_SYMBOL(path_is_under);
2906
2907void d_genocide(struct dentry *root)
2908{
2909 struct dentry *this_parent;
2910 struct list_head *next;
2911 unsigned seq;
2912 int locked = 0;
2913
2914 seq = read_seqbegin(&rename_lock);
2915again:
2916 this_parent = root;
2917 spin_lock(&this_parent->d_lock);
2918repeat:
2919 next = this_parent->d_subdirs.next;
2920resume:
2921 while (next != &this_parent->d_subdirs) {
2922 struct list_head *tmp = next;
2923 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
2924 next = tmp->next;
2925
2926 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2927 if (d_unhashed(dentry) || !dentry->d_inode) {
2928 spin_unlock(&dentry->d_lock);
2929 continue;
2930 }
2931 if (!list_empty(&dentry->d_subdirs)) {
2932 spin_unlock(&this_parent->d_lock);
2933 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
2934 this_parent = dentry;
2935 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
2936 goto repeat;
2937 }
2938 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
2939 dentry->d_flags |= DCACHE_GENOCIDE;
2940 dentry->d_count--;
2941 }
2942 spin_unlock(&dentry->d_lock);
2943 }
2944 if (this_parent != root) {
2945 struct dentry *tmp;
2946 struct dentry *child;
2947
2948 tmp = this_parent->d_parent;
2949 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
2950 this_parent->d_flags |= DCACHE_GENOCIDE;
2951 this_parent->d_count--;
2952 }
2953 rcu_read_lock();
2954 spin_unlock(&this_parent->d_lock);
2955 child = this_parent;
2956 this_parent = tmp;
2957 spin_lock(&this_parent->d_lock);
2958
2959
2960 if (this_parent != child->d_parent ||
2961 (!locked && read_seqretry(&rename_lock, seq))) {
2962 spin_unlock(&this_parent->d_lock);
2963 rcu_read_unlock();
2964 goto rename_retry;
2965 }
2966 rcu_read_unlock();
2967 next = child->d_u.d_child.next;
2968 goto resume;
2969 }
2970 spin_unlock(&this_parent->d_lock);
2971 if (!locked && read_seqretry(&rename_lock, seq))
2972 goto rename_retry;
2973 if (locked)
2974 write_sequnlock(&rename_lock);
2975 return;
2976
2977rename_retry:
2978 locked = 1;
2979 write_seqlock(&rename_lock);
2980 goto again;
2981}
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997ino_t find_inode_number(struct dentry *dir, struct qstr *name)
2998{
2999 struct dentry * dentry;
3000 ino_t ino = 0;
3001
3002 dentry = d_hash_and_lookup(dir, name);
3003 if (dentry) {
3004 if (dentry->d_inode)
3005 ino = dentry->d_inode->i_ino;
3006 dput(dentry);
3007 }
3008 return ino;
3009}
3010EXPORT_SYMBOL(find_inode_number);
3011
3012static __initdata unsigned long dhash_entries;
3013static int __init set_dhash_entries(char *str)
3014{
3015 if (!str)
3016 return 0;
3017 dhash_entries = simple_strtoul(str, &str, 0);
3018 return 1;
3019}
3020__setup("dhash_entries=", set_dhash_entries);
3021
3022static void __init dcache_init_early(void)
3023{
3024 int loop;
3025
3026
3027
3028
3029 if (hashdist)
3030 return;
3031
3032 dentry_hashtable =
3033 alloc_large_system_hash("Dentry cache",
3034 sizeof(struct dcache_hash_bucket),
3035 dhash_entries,
3036 13,
3037 HASH_EARLY,
3038 &d_hash_shift,
3039 &d_hash_mask,
3040 0);
3041
3042 for (loop = 0; loop < (1 << d_hash_shift); loop++)
3043 INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
3044}
3045
3046static void __init dcache_init(void)
3047{
3048 int loop;
3049
3050
3051
3052
3053
3054
3055 dentry_cache = KMEM_CACHE(dentry,
3056 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3057
3058 register_shrinker(&dcache_shrinker);
3059
3060
3061 if (!hashdist)
3062 return;
3063
3064 dentry_hashtable =
3065 alloc_large_system_hash("Dentry cache",
3066 sizeof(struct dcache_hash_bucket),
3067 dhash_entries,
3068 13,
3069 0,
3070 &d_hash_shift,
3071 &d_hash_mask,
3072 0);
3073
3074 for (loop = 0; loop < (1 << d_hash_shift); loop++)
3075 INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
3076}
3077
3078
3079struct kmem_cache *names_cachep __read_mostly;
3080EXPORT_SYMBOL(names_cachep);
3081
3082EXPORT_SYMBOL(d_genocide);
3083
3084void __init vfs_caches_init_early(void)
3085{
3086 dcache_init_early();
3087 inode_init_early();
3088}
3089
3090void __init vfs_caches_init(unsigned long mempages)
3091{
3092 unsigned long reserve;
3093
3094
3095
3096
3097 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3098 mempages -= reserve;
3099
3100 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3101 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3102
3103 dcache_init();
3104 inode_init();
3105 files_init(mempages);
3106 mnt_init();
3107 bdev_cache_init();
3108 chrdev_init();
3109}
3110