1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/syscalls.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/fsnotify.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/hash.h>
25#include <linux/cache.h>
26#include <linux/export.h>
27#include <linux/mount.h>
28#include <linux/file.h>
29#include <asm/uaccess.h>
30#include <linux/security.h>
31#include <linux/seqlock.h>
32#include <linux/swap.h>
33#include <linux/bootmem.h>
34#include <linux/fs_struct.h>
35#include <linux/hardirq.h>
36#include <linux/bit_spinlock.h>
37#include <linux/rculist_bl.h>
38#include <linux/prefetch.h>
39#include <linux/ratelimit.h>
40#include "internal.h"
41#include "mount.h"
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81int sysctl_vfs_cache_pressure __read_mostly = 100;
82EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
83
84static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
85__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
86
87EXPORT_SYMBOL(rename_lock);
88
89static struct kmem_cache *dentry_cache __read_mostly;
90
91
92
93
94
95
96
97
98
99#define D_HASHBITS d_hash_shift
100#define D_HASHMASK d_hash_mask
101
102static unsigned int d_hash_mask __read_mostly;
103static unsigned int d_hash_shift __read_mostly;
104
105static struct hlist_bl_head *dentry_hashtable __read_mostly;
106
107static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
108 unsigned int hash)
109{
110 hash += (unsigned long) parent / L1_CACHE_BYTES;
111 hash = hash + (hash >> D_HASHBITS);
112 return dentry_hashtable + (hash & D_HASHMASK);
113}
114
115
116struct dentry_stat_t dentry_stat = {
117 .age_limit = 45,
118};
119
120static DEFINE_PER_CPU(unsigned int, nr_dentry);
121
122#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
123static int get_nr_dentry(void)
124{
125 int i;
126 int sum = 0;
127 for_each_possible_cpu(i)
128 sum += per_cpu(nr_dentry, i);
129 return sum < 0 ? 0 : sum;
130}
131
132int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
133 size_t *lenp, loff_t *ppos)
134{
135 dentry_stat.nr_dentry = get_nr_dentry();
136 return proc_dointvec(table, write, buffer, lenp, ppos);
137}
138#endif
139
140
141
142
143
144#ifdef CONFIG_DCACHE_WORD_ACCESS
145
146#include <asm/word-at-a-time.h>
147
148
149
150
151
152
153
154
155
156static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
157{
158 unsigned long a,b,mask;
159
160 for (;;) {
161 a = *(unsigned long *)cs;
162 b = load_unaligned_zeropad(ct);
163 if (tcount < sizeof(unsigned long))
164 break;
165 if (unlikely(a != b))
166 return 1;
167 cs += sizeof(unsigned long);
168 ct += sizeof(unsigned long);
169 tcount -= sizeof(unsigned long);
170 if (!tcount)
171 return 0;
172 }
173 mask = ~(~0ul << tcount*8);
174 return unlikely(!!((a ^ b) & mask));
175}
176
177#else
178
179static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
180{
181 do {
182 if (*cs != *ct)
183 return 1;
184 cs++;
185 ct++;
186 tcount--;
187 } while (tcount);
188 return 0;
189}
190
191#endif
192
193static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
194{
195 const unsigned char *cs;
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212 cs = ACCESS_ONCE(dentry->d_name.name);
213 smp_read_barrier_depends();
214 return dentry_string_cmp(cs, ct, tcount);
215}
216
217void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
218{
219 size_t size = 0;
220 char *buf = NULL;
221
222 if (unlikely(dname_external(dentry))) {
223 size = READ_ONCE(dentry->d_name.len);
224retry:
225
226 name->name = buf = kmalloc(size + 1, GFP_KERNEL);
227 if (!buf)
228 return;
229 }
230
231 spin_lock(&dentry->d_lock);
232 if (unlikely(dname_external(dentry))) {
233 if (size < dentry->d_name.len) {
234
235 size = dentry->d_name.len;
236 spin_unlock(&dentry->d_lock);
237 kfree(buf);
238 goto retry;
239 }
240 strcpy(buf, dentry->d_name.name);
241 buf = NULL;
242 } else {
243 memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
244 name->name = name->inline_name;
245 }
246 spin_unlock(&dentry->d_lock);
247 kfree(buf);
248}
249EXPORT_SYMBOL(take_dentry_name_snapshot);
250
251void release_dentry_name_snapshot(struct name_snapshot *name)
252{
253 if (unlikely(name->name != name->inline_name))
254 kfree(name->name);
255}
256EXPORT_SYMBOL(release_dentry_name_snapshot);
257
258static void __d_free(struct rcu_head *head)
259{
260 struct dentry *dentry = container_of(
261 (struct hlist_node *)head, struct dentry, d_alias);
262
263 if (dname_external(dentry))
264 kfree(dentry->d_name.name);
265 kmem_cache_free(dentry_cache, dentry);
266}
267
268
269
270
271static void d_free(struct dentry *dentry)
272{
273 struct rcu_head *p = (struct rcu_head *)&dentry->d_alias;
274 BUG_ON((int)dentry->d_lockref.count > 0);
275 this_cpu_dec(nr_dentry);
276 if (dentry->d_op && dentry->d_op->d_release)
277 dentry->d_op->d_release(dentry);
278
279
280 if (!(dentry->d_flags & DCACHE_RCUACCESS))
281 __d_free(p);
282 else
283 call_rcu(p, __d_free);
284}
285
286
287
288
289
290
291
292
293static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
294{
295 assert_spin_locked(&dentry->d_lock);
296
297 write_seqcount_barrier(&dentry->d_seq);
298}
299
300
301
302
303
304
305static void dentry_iput(struct dentry * dentry)
306 __releases(dentry->d_lock)
307 __releases(dentry->d_inode->i_lock)
308{
309 struct inode *inode = dentry->d_inode;
310 if (inode) {
311 dentry->d_inode = NULL;
312 hlist_del_init(&dentry->d_alias);
313 spin_unlock(&dentry->d_lock);
314 spin_unlock(&inode->i_lock);
315 if (!inode->i_nlink)
316 fsnotify_inoderemove(inode);
317 if (dentry->d_op && dentry->d_op->d_iput)
318 dentry->d_op->d_iput(dentry, inode);
319 else
320 iput(inode);
321 } else {
322 spin_unlock(&dentry->d_lock);
323 }
324}
325
326
327
328
329
330static void dentry_unlink_inode(struct dentry * dentry)
331 __releases(dentry->d_lock)
332 __releases(dentry->d_inode->i_lock)
333{
334 struct inode *inode = dentry->d_inode;
335 __d_clear_type(dentry);
336 dentry->d_inode = NULL;
337 hlist_del_init(&dentry->d_alias);
338 dentry_rcuwalk_barrier(dentry);
339 spin_unlock(&dentry->d_lock);
340 spin_unlock(&inode->i_lock);
341 if (!inode->i_nlink)
342 fsnotify_inoderemove(inode);
343 if (dentry->d_op && dentry->d_op->d_iput)
344 dentry->d_op->d_iput(dentry, inode);
345 else
346 iput(inode);
347}
348
349
350
351
352static void dentry_lru_add(struct dentry *dentry)
353{
354 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) {
355 spin_lock(&dcache_lru_lock);
356 dentry->d_flags |= DCACHE_LRU_LIST;
357 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
358 dentry->d_sb->s_nr_dentry_unused++;
359 dentry_stat.nr_unused++;
360 spin_unlock(&dcache_lru_lock);
361 }
362}
363
364static void __dentry_lru_del(struct dentry *dentry)
365{
366 list_del_init(&dentry->d_lru);
367 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
368 dentry->d_sb->s_nr_dentry_unused--;
369 dentry_stat.nr_unused--;
370}
371
372
373
374
375static void dentry_lru_del(struct dentry *dentry)
376{
377 if (!list_empty(&dentry->d_lru)) {
378 spin_lock(&dcache_lru_lock);
379 __dentry_lru_del(dentry);
380 spin_unlock(&dcache_lru_lock);
381 }
382}
383
384static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
385{
386 spin_lock(&dcache_lru_lock);
387 if (list_empty(&dentry->d_lru)) {
388 dentry->d_flags |= DCACHE_LRU_LIST;
389 list_add_tail(&dentry->d_lru, list);
390 dentry->d_sb->s_nr_dentry_unused++;
391 dentry_stat.nr_unused++;
392 } else {
393 list_move_tail(&dentry->d_lru, list);
394 }
395 spin_unlock(&dcache_lru_lock);
396}
397
398
399
400
401
402
403static void __d_shrink(struct dentry *dentry)
404{
405 if (!d_unhashed(dentry)) {
406 struct hlist_bl_head *b;
407 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
408 b = &dentry->d_sb->s_anon;
409 else
410 b = d_hash(dentry->d_parent, dentry->d_name.hash);
411
412 hlist_bl_lock(b);
413 __hlist_bl_del(&dentry->d_hash);
414 dentry->d_hash.pprev = NULL;
415 hlist_bl_unlock(b);
416 }
417}
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434void __d_drop(struct dentry *dentry)
435{
436 if (!d_unhashed(dentry)) {
437 __d_shrink(dentry);
438 dentry_rcuwalk_barrier(dentry);
439 }
440}
441EXPORT_SYMBOL(__d_drop);
442
443void d_drop(struct dentry *dentry)
444{
445 spin_lock(&dentry->d_lock);
446 __d_drop(dentry);
447 spin_unlock(&dentry->d_lock);
448}
449EXPORT_SYMBOL(d_drop);
450
451static void __dentry_kill(struct dentry *dentry)
452{
453 struct dentry *parent;
454 if (IS_ROOT(dentry))
455 parent = NULL;
456 else
457 parent = dentry->d_parent;
458
459
460
461
462 lockref_mark_dead(&dentry->d_lockref);
463
464
465
466
467
468 if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
469 dentry->d_op->d_prune(dentry);
470
471 dentry_lru_del(dentry);
472
473 __d_drop(dentry);
474 __list_del_entry(&dentry->d_u.d_child);
475
476
477
478
479 dentry->d_flags |= DCACHE_DENTRY_KILLED;
480 if (parent)
481 spin_unlock(&parent->d_lock);
482 dentry_iput(dentry);
483
484
485
486
487 d_free(dentry);
488}
489
490
491
492
493
494
495
496static inline struct dentry *dentry_kill(struct dentry *dentry)
497 __releases(dentry->d_lock)
498{
499 struct inode *inode = dentry->d_inode;
500 struct dentry *parent = NULL;
501
502 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
503 goto failed;
504
505 if (!IS_ROOT(dentry)) {
506 parent = dentry->d_parent;
507 if (unlikely(!spin_trylock(&parent->d_lock))) {
508 if (inode)
509 spin_unlock(&inode->i_lock);
510 goto failed;
511 }
512 }
513
514 __dentry_kill(dentry);
515 return parent;
516
517failed:
518 spin_unlock(&dentry->d_lock);
519 return dentry;
520}
521
522static inline struct dentry *lock_parent(struct dentry *dentry)
523{
524 struct dentry *parent = dentry->d_parent;
525 if (IS_ROOT(dentry))
526 return NULL;
527 if (likely(spin_trylock(&parent->d_lock)))
528 return parent;
529 spin_unlock(&dentry->d_lock);
530 rcu_read_lock();
531again:
532 parent = ACCESS_ONCE(dentry->d_parent);
533 spin_lock(&parent->d_lock);
534
535
536
537
538
539
540
541
542 if (unlikely(parent != dentry->d_parent)) {
543 spin_unlock(&parent->d_lock);
544 goto again;
545 }
546 rcu_read_unlock();
547 if (parent != dentry)
548 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
549 else
550 parent = NULL;
551 return parent;
552}
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580void dput(struct dentry *dentry)
581{
582 if (unlikely(!dentry))
583 return;
584
585repeat:
586 might_sleep();
587
588 if (lockref_put_or_lock(&dentry->d_lockref))
589 return;
590
591
592 if (unlikely(d_unhashed(dentry)))
593 goto kill_it;
594
595 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
596 if (dentry->d_op->d_delete(dentry))
597 goto kill_it;
598 }
599
600 dentry->d_flags |= DCACHE_REFERENCED;
601 dentry_lru_add(dentry);
602
603 dentry->d_lockref.count--;
604 spin_unlock(&dentry->d_lock);
605 return;
606
607kill_it:
608 dentry = dentry_kill(dentry);
609 if (dentry) {
610 cond_resched();
611 goto repeat;
612 }
613}
614EXPORT_SYMBOL(dput);
615
616
617
618static inline void __dget_dlock(struct dentry *dentry)
619{
620 dentry->d_lockref.count++;
621}
622
623static inline void __dget(struct dentry *dentry)
624{
625 lockref_get(&dentry->d_lockref);
626}
627
628struct dentry *dget_parent(struct dentry *dentry)
629{
630 int gotref;
631 struct dentry *ret;
632
633
634
635
636
637 rcu_read_lock();
638 ret = ACCESS_ONCE(dentry->d_parent);
639 gotref = lockref_get_not_zero(&ret->d_lockref);
640 rcu_read_unlock();
641 if (likely(gotref)) {
642 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
643 return ret;
644 dput(ret);
645 }
646
647repeat:
648
649
650
651
652 rcu_read_lock();
653 ret = dentry->d_parent;
654 spin_lock(&ret->d_lock);
655 if (unlikely(ret != dentry->d_parent)) {
656 spin_unlock(&ret->d_lock);
657 rcu_read_unlock();
658 goto repeat;
659 }
660 rcu_read_unlock();
661 BUG_ON(!ret->d_lockref.count);
662 ret->d_lockref.count++;
663 spin_unlock(&ret->d_lock);
664 return ret;
665}
666EXPORT_SYMBOL(dget_parent);
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
686{
687 struct dentry *alias, *discon_alias;
688
689again:
690 discon_alias = NULL;
691 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
692 spin_lock(&alias->d_lock);
693 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
694 if (IS_ROOT(alias) &&
695 (alias->d_flags & DCACHE_DISCONNECTED)) {
696 discon_alias = alias;
697 } else if (!want_discon) {
698 __dget_dlock(alias);
699 spin_unlock(&alias->d_lock);
700 return alias;
701 }
702 }
703 spin_unlock(&alias->d_lock);
704 }
705 if (discon_alias) {
706 alias = discon_alias;
707 spin_lock(&alias->d_lock);
708 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
709 if (IS_ROOT(alias) &&
710 (alias->d_flags & DCACHE_DISCONNECTED)) {
711 __dget_dlock(alias);
712 spin_unlock(&alias->d_lock);
713 return alias;
714 }
715 }
716 spin_unlock(&alias->d_lock);
717 goto again;
718 }
719 return NULL;
720}
721
722struct dentry *d_find_alias(struct inode *inode)
723{
724 struct dentry *de = NULL;
725
726 if (!hlist_empty(&inode->i_dentry)) {
727 spin_lock(&inode->i_lock);
728 de = __d_find_alias(inode, 0);
729 spin_unlock(&inode->i_lock);
730 }
731 return de;
732}
733EXPORT_SYMBOL(d_find_alias);
734
735
736
737
738
739void d_prune_aliases(struct inode *inode)
740{
741 struct dentry *dentry;
742restart:
743 spin_lock(&inode->i_lock);
744 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
745 spin_lock(&dentry->d_lock);
746 if (!dentry->d_lockref.count) {
747
748
749
750
751 if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
752 !d_unhashed(dentry))
753 dentry->d_op->d_prune(dentry);
754
755 __dget_dlock(dentry);
756 __d_drop(dentry);
757 spin_unlock(&dentry->d_lock);
758 spin_unlock(&inode->i_lock);
759 dput(dentry);
760 goto restart;
761 }
762 spin_unlock(&dentry->d_lock);
763 }
764 spin_unlock(&inode->i_lock);
765}
766EXPORT_SYMBOL(d_prune_aliases);
767
768
769static void shrink_dentry_list(struct list_head *list)
770{
771 struct dentry *dentry, *parent;
772
773 rcu_read_lock();
774 for (;;) {
775 struct inode *inode;
776 dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
777 if (&dentry->d_lru == list)
778 break;
779 spin_lock(&dentry->d_lock);
780 if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
781 spin_unlock(&dentry->d_lock);
782 continue;
783 }
784
785 parent = lock_parent(dentry);
786
787
788
789
790
791
792 if (dentry->d_lockref.count) {
793 dentry_lru_del(dentry);
794 spin_unlock(&dentry->d_lock);
795 if (parent)
796 spin_unlock(&parent->d_lock);
797 continue;
798 }
799
800 rcu_read_unlock();
801
802 inode = dentry->d_inode;
803 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
804 spin_unlock(&dentry->d_lock);
805 if (parent)
806 spin_unlock(&parent->d_lock);
807 cpu_relax();
808 rcu_read_lock();
809 continue;
810 }
811
812 __dentry_kill(dentry);
813
814
815
816
817
818
819
820 dentry = parent;
821 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
822 parent = lock_parent(dentry);
823 if (dentry->d_lockref.count != 1) {
824 dentry->d_lockref.count--;
825 spin_unlock(&dentry->d_lock);
826 if (parent)
827 spin_unlock(&parent->d_lock);
828 break;
829 }
830 inode = dentry->d_inode;
831 if (unlikely(!spin_trylock(&inode->i_lock))) {
832 spin_unlock(&dentry->d_lock);
833 if (parent)
834 spin_unlock(&parent->d_lock);
835 cpu_relax();
836 continue;
837 }
838 __dentry_kill(dentry);
839 dentry = parent;
840 }
841 rcu_read_lock();
842 }
843 rcu_read_unlock();
844}
845
846
847
848
849
850
851
852
853
854
855
856
857
858void prune_dcache_sb(struct super_block *sb, int count)
859{
860 struct dentry *dentry;
861 LIST_HEAD(referenced);
862 LIST_HEAD(tmp);
863
864relock:
865 spin_lock(&dcache_lru_lock);
866 while (!list_empty(&sb->s_dentry_lru)) {
867 dentry = list_entry(sb->s_dentry_lru.prev,
868 struct dentry, d_lru);
869 BUG_ON(dentry->d_sb != sb);
870
871 if (!spin_trylock(&dentry->d_lock)) {
872 spin_unlock(&dcache_lru_lock);
873 cpu_relax();
874 goto relock;
875 }
876
877 if (dentry->d_flags & DCACHE_REFERENCED) {
878 dentry->d_flags &= ~DCACHE_REFERENCED;
879 list_move(&dentry->d_lru, &referenced);
880 spin_unlock(&dentry->d_lock);
881 } else {
882 list_move_tail(&dentry->d_lru, &tmp);
883 dentry->d_flags |= DCACHE_SHRINK_LIST;
884 spin_unlock(&dentry->d_lock);
885 if (!--count)
886 break;
887 }
888 cond_resched_lock(&dcache_lru_lock);
889 }
890 if (!list_empty(&referenced))
891 list_splice(&referenced, &sb->s_dentry_lru);
892 spin_unlock(&dcache_lru_lock);
893
894 shrink_dentry_list(&tmp);
895}
896
897
898
899
900
901
902
903
904void shrink_dcache_sb(struct super_block *sb)
905{
906 LIST_HEAD(tmp);
907
908 spin_lock(&dcache_lru_lock);
909 while (!list_empty(&sb->s_dentry_lru)) {
910 list_splice_init(&sb->s_dentry_lru, &tmp);
911 spin_unlock(&dcache_lru_lock);
912 shrink_dentry_list(&tmp);
913 spin_lock(&dcache_lru_lock);
914 }
915 spin_unlock(&dcache_lru_lock);
916}
917EXPORT_SYMBOL(shrink_dcache_sb);
918
919
920
921
922
923
924#define RESCHED_CHECK_BATCH 1024
925static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
926{
927 struct dentry *parent;
928 int batch = RESCHED_CHECK_BATCH;
929
930 BUG_ON(!IS_ROOT(dentry));
931
932 for (;;) {
933
934 while (!list_empty(&dentry->d_subdirs))
935 dentry = list_entry(dentry->d_subdirs.next,
936 struct dentry, d_u.d_child);
937
938
939
940 do {
941 struct inode *inode;
942
943
944
945
946
947 if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
948 !d_unhashed(dentry))
949 dentry->d_op->d_prune(dentry);
950
951 dentry_lru_del(dentry);
952 __d_shrink(dentry);
953
954 if (dentry->d_lockref.count != 0) {
955 printk(KERN_ERR
956 "BUG: Dentry %p{i=%lx,n=%s}"
957 " still in use (%d)"
958 " [unmount of %s %s]\n",
959 dentry,
960 dentry->d_inode ?
961 dentry->d_inode->i_ino : 0UL,
962 dentry->d_name.name,
963 dentry->d_lockref.count,
964 dentry->d_sb->s_type->name,
965 dentry->d_sb->s_id);
966 BUG();
967 }
968
969 if (IS_ROOT(dentry)) {
970 parent = NULL;
971 list_del(&dentry->d_u.d_child);
972 } else {
973 parent = dentry->d_parent;
974 parent->d_lockref.count--;
975 list_del(&dentry->d_u.d_child);
976 }
977
978 inode = dentry->d_inode;
979 if (inode) {
980 dentry->d_inode = NULL;
981 hlist_del_init(&dentry->d_alias);
982 if (dentry->d_op && dentry->d_op->d_iput)
983 dentry->d_op->d_iput(dentry, inode);
984 else
985 iput(inode);
986 }
987
988 d_free(dentry);
989
990
991
992
993 if (!parent)
994 return;
995 dentry = parent;
996 if (!--batch) {
997 cond_resched();
998 batch = RESCHED_CHECK_BATCH;
999 }
1000 } while (list_empty(&dentry->d_subdirs));
1001
1002 dentry = list_entry(dentry->d_subdirs.next,
1003 struct dentry, d_u.d_child);
1004 }
1005}
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017void shrink_dcache_for_umount(struct super_block *sb)
1018{
1019 struct dentry *dentry;
1020
1021 if (down_read_trylock(&sb->s_umount))
1022 BUG();
1023
1024 dentry = sb->s_root;
1025 sb->s_root = NULL;
1026 dentry->d_lockref.count--;
1027 shrink_dcache_for_umount_subtree(dentry);
1028
1029 while (!hlist_bl_empty(&sb->s_anon)) {
1030 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
1031 shrink_dcache_for_umount_subtree(dentry);
1032 }
1033}
1034
1035
1036
1037
1038
1039
1040
1041
1042enum d_walk_ret {
1043 D_WALK_CONTINUE,
1044 D_WALK_QUIT,
1045 D_WALK_NORETRY,
1046 D_WALK_SKIP,
1047};
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058static void d_walk(struct dentry *parent, void *data,
1059 enum d_walk_ret (*enter)(void *, struct dentry *),
1060 void (*finish)(void *))
1061{
1062 struct dentry *this_parent;
1063 struct list_head *next;
1064 unsigned seq = 0;
1065 enum d_walk_ret ret;
1066 bool retry = true;
1067
1068again:
1069 read_seqbegin_or_lock(&rename_lock, &seq);
1070 this_parent = parent;
1071 spin_lock(&this_parent->d_lock);
1072
1073 ret = enter(data, this_parent);
1074 switch (ret) {
1075 case D_WALK_CONTINUE:
1076 break;
1077 case D_WALK_QUIT:
1078 case D_WALK_SKIP:
1079 goto out_unlock;
1080 case D_WALK_NORETRY:
1081 retry = false;
1082 break;
1083 }
1084repeat:
1085 next = this_parent->d_subdirs.next;
1086resume:
1087 while (next != &this_parent->d_subdirs) {
1088 struct list_head *tmp = next;
1089 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1090 next = tmp->next;
1091
1092 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1093
1094 ret = enter(data, dentry);
1095 switch (ret) {
1096 case D_WALK_CONTINUE:
1097 break;
1098 case D_WALK_QUIT:
1099 spin_unlock(&dentry->d_lock);
1100 goto out_unlock;
1101 case D_WALK_NORETRY:
1102 retry = false;
1103 break;
1104 case D_WALK_SKIP:
1105 spin_unlock(&dentry->d_lock);
1106 continue;
1107 }
1108
1109 if (!list_empty(&dentry->d_subdirs)) {
1110 spin_unlock(&this_parent->d_lock);
1111 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1112 this_parent = dentry;
1113 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1114 goto repeat;
1115 }
1116 spin_unlock(&dentry->d_lock);
1117 }
1118
1119
1120
1121 rcu_read_lock();
1122ascend:
1123 if (this_parent != parent) {
1124 struct dentry *child = this_parent;
1125 this_parent = child->d_parent;
1126
1127 spin_unlock(&child->d_lock);
1128 spin_lock(&this_parent->d_lock);
1129
1130
1131 if (need_seqretry(&rename_lock, seq))
1132 goto rename_retry;
1133
1134 do {
1135 next = child->d_u.d_child.next;
1136 if (next == &this_parent->d_subdirs)
1137 goto ascend;
1138 child = list_entry(next, struct dentry, d_u.d_child);
1139 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1140 rcu_read_unlock();
1141 goto resume;
1142 }
1143 if (need_seqretry(&rename_lock, seq))
1144 goto rename_retry;
1145 rcu_read_unlock();
1146 if (finish)
1147 finish(data);
1148
1149out_unlock:
1150 spin_unlock(&this_parent->d_lock);
1151 done_seqretry(&rename_lock, seq);
1152 return;
1153
1154rename_retry:
1155 spin_unlock(&this_parent->d_lock);
1156 rcu_read_unlock();
1157 BUG_ON(seq & 1);
1158 if (!retry)
1159 return;
1160 seq = 1;
1161 goto again;
1162}
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1179{
1180 int *ret = data;
1181 if (d_mountpoint(dentry)) {
1182 *ret = 1;
1183 return D_WALK_QUIT;
1184 }
1185 return D_WALK_CONTINUE;
1186}
1187
1188int have_submounts(struct dentry *parent)
1189{
1190 int ret = 0;
1191
1192 d_walk(parent, &ret, check_mount, NULL);
1193
1194 return ret;
1195}
1196EXPORT_SYMBOL(have_submounts);
1197
1198struct check_mount {
1199 struct vfsmount *mnt;
1200 unsigned int mounted;
1201};
1202
1203static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1204{
1205 struct check_mount *info = data;
1206 struct path path = { .mnt = info->mnt, .dentry = dentry };
1207
1208 if (likely(!d_mountpoint(dentry)))
1209 return D_WALK_CONTINUE;
1210 if (__path_is_mountpoint(&path)) {
1211 info->mounted = 1;
1212 return D_WALK_QUIT;
1213 }
1214 return D_WALK_CONTINUE;
1215}
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225int path_has_submounts(const struct path *parent)
1226{
1227 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1228
1229 read_seqlock_excl(&mount_lock);
1230 d_walk(parent->dentry, &data, path_check_mount, NULL);
1231 read_sequnlock_excl(&mount_lock);
1232
1233 return data.mounted;
1234}
1235EXPORT_SYMBOL(path_has_submounts);
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245int d_set_mounted(struct dentry *dentry)
1246{
1247 struct dentry *p;
1248 int ret = -ENOENT;
1249 write_seqlock(&rename_lock);
1250 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1251
1252 spin_lock(&p->d_lock);
1253 if (unlikely(d_unhashed(p))) {
1254 spin_unlock(&p->d_lock);
1255 goto out;
1256 }
1257 spin_unlock(&p->d_lock);
1258 }
1259 spin_lock(&dentry->d_lock);
1260 if (!d_unlinked(dentry)) {
1261 ret = -EBUSY;
1262 if (!d_mountpoint(dentry)) {
1263 dentry->d_flags |= DCACHE_MOUNTED;
1264 ret = 0;
1265 }
1266 }
1267 spin_unlock(&dentry->d_lock);
1268out:
1269 write_sequnlock(&rename_lock);
1270 return ret;
1271}
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288struct select_data {
1289 struct dentry *start;
1290 struct list_head dispose;
1291 int found;
1292};
1293
1294static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1295{
1296 struct select_data *data = _data;
1297 enum d_walk_ret ret = D_WALK_CONTINUE;
1298
1299 if (data->start == dentry)
1300 goto out;
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310 if (dentry->d_lockref.count) {
1311 dentry_lru_del(dentry);
1312 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1313 dentry_lru_move_list(dentry, &data->dispose);
1314 dentry->d_flags |= DCACHE_SHRINK_LIST;
1315 data->found++;
1316 ret = D_WALK_NORETRY;
1317 }
1318
1319
1320
1321
1322
1323 if (data->found && need_resched())
1324 ret = D_WALK_QUIT;
1325out:
1326 return ret;
1327}
1328
1329
1330
1331
1332
1333
1334
1335void shrink_dcache_parent(struct dentry *parent)
1336{
1337 for (;;) {
1338 struct select_data data;
1339
1340 INIT_LIST_HEAD(&data.dispose);
1341 data.start = parent;
1342 data.found = 0;
1343
1344 d_walk(parent, &data, select_collect, NULL);
1345 if (!data.found)
1346 break;
1347
1348 shrink_dentry_list(&data.dispose);
1349 cond_resched();
1350 }
1351}
1352EXPORT_SYMBOL(shrink_dcache_parent);
1353
1354struct detach_data {
1355 struct select_data select;
1356 struct dentry *mountpoint;
1357};
1358static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1359{
1360 struct detach_data *data = _data;
1361
1362 if (d_mountpoint(dentry)) {
1363 __dget_dlock(dentry);
1364 data->mountpoint = dentry;
1365 return D_WALK_QUIT;
1366 }
1367
1368 return select_collect(&data->select, dentry);
1369}
1370
1371static void check_and_drop(void *_data)
1372{
1373 struct detach_data *data = _data;
1374
1375 if (!data->mountpoint && !data->select.found)
1376 __d_drop(data->select.start);
1377}
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389int d_invalidate(struct dentry *dentry)
1390{
1391
1392
1393
1394 spin_lock(&dentry->d_lock);
1395 if (d_unhashed(dentry)) {
1396 spin_unlock(&dentry->d_lock);
1397 return 0;
1398 }
1399 spin_unlock(&dentry->d_lock);
1400
1401
1402 if (!dentry->d_inode) {
1403 d_drop(dentry);
1404 return 0;
1405 }
1406
1407 for (;;) {
1408 struct detach_data data;
1409
1410 data.mountpoint = NULL;
1411 INIT_LIST_HEAD(&data.select.dispose);
1412 data.select.start = dentry;
1413 data.select.found = 0;
1414
1415 d_walk(dentry, &data, detach_and_collect, check_and_drop);
1416
1417 if (data.select.found)
1418 shrink_dentry_list(&data.select.dispose);
1419
1420 if (data.mountpoint) {
1421 if (may_detach_mounts) {
1422 detach_mounts(data.mountpoint);
1423 dput(data.mountpoint);
1424 } else {
1425 dput(data.mountpoint);
1426 return -EBUSY;
1427 }
1428 }
1429
1430 if (!data.mountpoint && !data.select.found)
1431 return 0;
1432
1433 cond_resched();
1434 }
1435}
1436EXPORT_SYMBOL(d_invalidate);
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1449{
1450 struct dentry *dentry;
1451 char *dname;
1452
1453 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1454 if (!dentry)
1455 return NULL;
1456
1457
1458
1459
1460
1461
1462
1463 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1464 if (name->len > DNAME_INLINE_LEN-1) {
1465 dname = kmalloc(name->len + 1, GFP_KERNEL);
1466 if (!dname) {
1467 kmem_cache_free(dentry_cache, dentry);
1468 return NULL;
1469 }
1470 } else {
1471 dname = dentry->d_iname;
1472 }
1473
1474 dentry->d_name.len = name->len;
1475 dentry->d_name.hash = name->hash;
1476 memcpy(dname, name->name, name->len);
1477 dname[name->len] = 0;
1478
1479
1480 smp_wmb();
1481 dentry->d_name.name = dname;
1482
1483 dentry->d_lockref.count = 1;
1484 dentry->d_flags = 0;
1485 spin_lock_init(&dentry->d_lock);
1486 seqcount_init(&dentry->d_seq);
1487 dentry->d_inode = NULL;
1488 dentry->d_parent = dentry;
1489 dentry->d_sb = sb;
1490 dentry->d_op = NULL;
1491 dentry->d_fsdata = NULL;
1492 INIT_HLIST_BL_NODE(&dentry->d_hash);
1493 INIT_LIST_HEAD(&dentry->d_lru);
1494 INIT_LIST_HEAD(&dentry->d_subdirs);
1495 INIT_HLIST_NODE(&dentry->d_alias);
1496 INIT_LIST_HEAD(&dentry->d_u.d_child);
1497 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1498
1499 this_cpu_inc(nr_dentry);
1500
1501 return dentry;
1502}
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1514{
1515 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1516 if (!dentry)
1517 return NULL;
1518
1519 dentry->d_flags |= DCACHE_RCUACCESS;
1520 spin_lock(&parent->d_lock);
1521
1522
1523
1524
1525 __dget_dlock(parent);
1526 dentry->d_parent = parent;
1527 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1528 spin_unlock(&parent->d_lock);
1529
1530 return dentry;
1531}
1532EXPORT_SYMBOL(d_alloc);
1533
1534struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1535{
1536 struct dentry *dentry = __d_alloc(sb, name);
1537 if (dentry)
1538 dentry->d_flags |= DCACHE_DISCONNECTED;
1539 return dentry;
1540}
1541EXPORT_SYMBOL(d_alloc_pseudo);
1542
1543struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1544{
1545 struct qstr q;
1546
1547 q.name = name;
1548 q.len = strlen(name);
1549 q.hash = full_name_hash(q.name, q.len);
1550 return d_alloc(parent, &q);
1551}
1552EXPORT_SYMBOL(d_alloc_name);
1553
1554void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1555{
1556 WARN_ON_ONCE(dentry->d_op);
1557 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1558 DCACHE_OP_COMPARE |
1559 DCACHE_OP_REVALIDATE |
1560 DCACHE_OP_WEAK_REVALIDATE |
1561 DCACHE_OP_DELETE |
1562 DCACHE_OP_REAL));
1563 dentry->d_op = op;
1564 if (!op)
1565 return;
1566 if (op->d_hash)
1567 dentry->d_flags |= DCACHE_OP_HASH;
1568 if (op->d_compare)
1569 dentry->d_flags |= DCACHE_OP_COMPARE;
1570 if (op->d_revalidate)
1571 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1572 if (op->d_weak_revalidate)
1573 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1574 if (op->d_delete)
1575 dentry->d_flags |= DCACHE_OP_DELETE;
1576 if (op->d_prune)
1577 dentry->d_flags |= DCACHE_OP_PRUNE;
1578
1579 if (get_real_dop(dentry))
1580 dentry->d_flags |= DCACHE_OP_REAL;
1581}
1582EXPORT_SYMBOL(d_set_d_op);
1583
1584static unsigned d_flags_for_inode(struct inode *inode)
1585{
1586 unsigned add_flags = DCACHE_FILE_TYPE;
1587
1588 if (!inode)
1589 return DCACHE_MISS_TYPE;
1590
1591 if (S_ISDIR(inode->i_mode)) {
1592 add_flags = DCACHE_DIRECTORY_TYPE;
1593 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1594 if (unlikely(!inode->i_op->lookup))
1595 add_flags = DCACHE_AUTODIR_TYPE;
1596 else
1597 inode->i_opflags |= IOP_LOOKUP;
1598 }
1599 } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1600 if (unlikely(inode->i_op->follow_link))
1601 add_flags = DCACHE_SYMLINK_TYPE;
1602 else
1603 inode->i_opflags |= IOP_NOFOLLOW;
1604 }
1605
1606 if (unlikely(IS_AUTOMOUNT(inode)))
1607 add_flags |= DCACHE_NEED_AUTOMOUNT;
1608 return add_flags;
1609}
1610
1611static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1612{
1613 unsigned add_flags = d_flags_for_inode(inode);
1614
1615 spin_lock(&dentry->d_lock);
1616 __d_set_type(dentry, add_flags);
1617 if (inode)
1618 hlist_add_head(&dentry->d_alias, &inode->i_dentry);
1619 dentry->d_inode = inode;
1620 dentry_rcuwalk_barrier(dentry);
1621 if (inode)
1622 fsnotify_update_flags(dentry);
1623 spin_unlock(&dentry->d_lock);
1624}
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641void d_instantiate(struct dentry *entry, struct inode * inode)
1642{
1643 BUG_ON(!hlist_unhashed(&entry->d_alias));
1644 if (inode)
1645 spin_lock(&inode->i_lock);
1646 __d_instantiate(entry, inode);
1647 if (inode)
1648 spin_unlock(&inode->i_lock);
1649 security_d_instantiate(entry, inode);
1650}
1651EXPORT_SYMBOL(d_instantiate);
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669static struct dentry *__d_instantiate_unique(struct dentry *entry,
1670 struct inode *inode)
1671{
1672 struct dentry *alias;
1673 int len = entry->d_name.len;
1674 const char *name = entry->d_name.name;
1675 unsigned int hash = entry->d_name.hash;
1676
1677 if (!inode) {
1678 __d_instantiate(entry, NULL);
1679 return NULL;
1680 }
1681
1682 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1683
1684
1685
1686
1687
1688 if (alias->d_name.hash != hash)
1689 continue;
1690 if (alias->d_parent != entry->d_parent)
1691 continue;
1692 if (alias->d_name.len != len)
1693 continue;
1694 if (dentry_cmp(alias, name, len))
1695 continue;
1696 __dget(alias);
1697 return alias;
1698 }
1699
1700 __d_instantiate(entry, inode);
1701 return NULL;
1702}
1703
1704struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1705{
1706 struct dentry *result;
1707
1708 BUG_ON(!hlist_unhashed(&entry->d_alias));
1709
1710 if (inode)
1711 spin_lock(&inode->i_lock);
1712 result = __d_instantiate_unique(entry, inode);
1713 if (inode)
1714 spin_unlock(&inode->i_lock);
1715
1716 if (!result) {
1717 security_d_instantiate(entry, inode);
1718 return NULL;
1719 }
1720
1721 BUG_ON(!d_unhashed(result));
1722 iput(inode);
1723 return result;
1724}
1725
1726EXPORT_SYMBOL(d_instantiate_unique);
1727
1728struct dentry *d_make_root(struct inode *root_inode)
1729{
1730 struct dentry *res = NULL;
1731
1732 if (root_inode) {
1733 static const struct qstr name = QSTR_INIT("/", 1);
1734
1735 res = __d_alloc(root_inode->i_sb, &name);
1736 if (res)
1737 d_instantiate(res, root_inode);
1738 else
1739 iput(root_inode);
1740 }
1741 return res;
1742}
1743EXPORT_SYMBOL(d_make_root);
1744
1745static struct dentry * __d_find_any_alias(struct inode *inode)
1746{
1747 struct dentry *alias;
1748
1749 if (hlist_empty(&inode->i_dentry))
1750 return NULL;
1751 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1752 __dget(alias);
1753 return alias;
1754}
1755
1756
1757
1758
1759
1760
1761
1762
1763struct dentry *d_find_any_alias(struct inode *inode)
1764{
1765 struct dentry *de;
1766
1767 spin_lock(&inode->i_lock);
1768 de = __d_find_any_alias(inode);
1769 spin_unlock(&inode->i_lock);
1770 return de;
1771}
1772EXPORT_SYMBOL(d_find_any_alias);
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792struct dentry *d_obtain_alias(struct inode *inode)
1793{
1794 static const struct qstr anonstring = QSTR_INIT("/", 1);
1795 struct dentry *tmp;
1796 struct dentry *res;
1797 unsigned add_flags;
1798
1799 if (!inode)
1800 return ERR_PTR(-ESTALE);
1801 if (IS_ERR(inode))
1802 return ERR_CAST(inode);
1803
1804 res = d_find_any_alias(inode);
1805 if (res)
1806 goto out_iput;
1807
1808 tmp = __d_alloc(inode->i_sb, &anonstring);
1809 if (!tmp) {
1810 res = ERR_PTR(-ENOMEM);
1811 goto out_iput;
1812 }
1813
1814 spin_lock(&inode->i_lock);
1815 res = __d_find_any_alias(inode);
1816 if (res) {
1817 spin_unlock(&inode->i_lock);
1818 dput(tmp);
1819 goto out_iput;
1820 }
1821
1822
1823 add_flags = d_flags_for_inode(inode) | DCACHE_DISCONNECTED;
1824
1825 spin_lock(&tmp->d_lock);
1826 tmp->d_inode = inode;
1827 tmp->d_flags |= add_flags;
1828 hlist_add_head(&tmp->d_alias, &inode->i_dentry);
1829 hlist_bl_lock(&tmp->d_sb->s_anon);
1830 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1831 hlist_bl_unlock(&tmp->d_sb->s_anon);
1832 spin_unlock(&tmp->d_lock);
1833 spin_unlock(&inode->i_lock);
1834 security_d_instantiate(tmp, inode);
1835
1836 return tmp;
1837
1838 out_iput:
1839 if (res && !IS_ERR(res))
1840 security_d_instantiate(res, inode);
1841 iput(inode);
1842 return res;
1843}
1844EXPORT_SYMBOL(d_obtain_alias);
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1867{
1868 struct dentry *new = NULL;
1869
1870 if (IS_ERR(inode))
1871 return ERR_CAST(inode);
1872
1873 if (inode && S_ISDIR(inode->i_mode)) {
1874 spin_lock(&inode->i_lock);
1875 new = __d_find_alias(inode, 1);
1876 if (new) {
1877 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1878 spin_unlock(&inode->i_lock);
1879 security_d_instantiate(new, inode);
1880 d_move(new, dentry);
1881 iput(inode);
1882 } else {
1883
1884 __d_instantiate(dentry, inode);
1885 spin_unlock(&inode->i_lock);
1886 security_d_instantiate(dentry, inode);
1887 d_rehash(dentry);
1888 }
1889 } else {
1890 d_instantiate(dentry, inode);
1891 if (d_unhashed(dentry))
1892 d_rehash(dentry);
1893 }
1894 return new;
1895}
1896EXPORT_SYMBOL(d_splice_alias);
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1915 struct qstr *name)
1916{
1917 struct dentry *found;
1918 struct dentry *new;
1919
1920
1921
1922
1923
1924 found = d_hash_and_lookup(dentry->d_parent, name);
1925 if (unlikely(IS_ERR(found)))
1926 goto err_out;
1927 if (!found) {
1928 new = d_alloc(dentry->d_parent, name);
1929 if (!new) {
1930 found = ERR_PTR(-ENOMEM);
1931 goto err_out;
1932 }
1933
1934 found = d_splice_alias(inode, new);
1935 if (found) {
1936 dput(new);
1937 return found;
1938 }
1939 return new;
1940 }
1941
1942
1943
1944
1945
1946
1947
1948 if (found->d_inode) {
1949 if (unlikely(found->d_inode != inode)) {
1950
1951 BUG_ON(!is_bad_inode(inode));
1952 BUG_ON(!is_bad_inode(found->d_inode));
1953 }
1954 iput(inode);
1955 return found;
1956 }
1957
1958
1959
1960
1961
1962 new = d_splice_alias(inode, found);
1963 if (new) {
1964 dput(found);
1965 found = new;
1966 }
1967 return found;
1968
1969err_out:
1970 iput(inode);
1971 return found;
1972}
1973EXPORT_SYMBOL(d_add_ci);
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989enum slow_d_compare {
1990 D_COMP_OK,
1991 D_COMP_NOMATCH,
1992 D_COMP_SEQRETRY,
1993};
1994
1995static noinline enum slow_d_compare slow_dentry_cmp(
1996 const struct dentry *parent,
1997 struct dentry *dentry,
1998 unsigned int seq,
1999 const struct qstr *name)
2000{
2001 int tlen = dentry->d_name.len;
2002 const char *tname = dentry->d_name.name;
2003
2004 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2005 cpu_relax();
2006 return D_COMP_SEQRETRY;
2007 }
2008 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2009 return D_COMP_NOMATCH;
2010 return D_COMP_OK;
2011}
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042struct dentry *__d_lookup_rcu(const struct dentry *parent,
2043 const struct qstr *name,
2044 unsigned *seqp)
2045{
2046 u64 hashlen = name->hash_len;
2047 const unsigned char *str = name->name;
2048 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
2049 struct hlist_bl_node *node;
2050 struct dentry *dentry;
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2073 unsigned seq;
2074
2075seqretry:
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090 seq = raw_seqcount_begin(&dentry->d_seq);
2091 if (dentry->d_parent != parent)
2092 continue;
2093 if (d_unhashed(dentry))
2094 continue;
2095
2096 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2097 if (dentry->d_name.hash != hashlen_hash(hashlen))
2098 continue;
2099 *seqp = seq;
2100 switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2101 case D_COMP_OK:
2102 return dentry;
2103 case D_COMP_NOMATCH:
2104 continue;
2105 default:
2106 goto seqretry;
2107 }
2108 }
2109
2110 if (dentry->d_name.hash_len != hashlen)
2111 continue;
2112 *seqp = seq;
2113 if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2114 return dentry;
2115 }
2116 return NULL;
2117}
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2131{
2132 struct dentry *dentry;
2133 unsigned seq;
2134
2135 do {
2136 seq = read_seqbegin(&rename_lock);
2137 dentry = __d_lookup(parent, name);
2138 if (dentry)
2139 break;
2140 } while (read_seqretry(&rename_lock, seq));
2141 return dentry;
2142}
2143EXPORT_SYMBOL(d_lookup);
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2161{
2162 unsigned int len = name->len;
2163 unsigned int hash = name->hash;
2164 const unsigned char *str = name->name;
2165 struct hlist_bl_head *b = d_hash(parent, hash);
2166 struct hlist_bl_node *node;
2167 struct dentry *found = NULL;
2168 struct dentry *dentry;
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190 rcu_read_lock();
2191
2192 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2193
2194 if (dentry->d_name.hash != hash)
2195 continue;
2196
2197 spin_lock(&dentry->d_lock);
2198 if (dentry->d_parent != parent)
2199 goto next;
2200 if (d_unhashed(dentry))
2201 goto next;
2202
2203
2204
2205
2206
2207 if (parent->d_flags & DCACHE_OP_COMPARE) {
2208 int tlen = dentry->d_name.len;
2209 const char *tname = dentry->d_name.name;
2210 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2211 goto next;
2212 } else {
2213 if (dentry->d_name.len != len)
2214 goto next;
2215 if (dentry_cmp(dentry, str, len))
2216 goto next;
2217 }
2218
2219 dentry->d_lockref.count++;
2220 found = dentry;
2221 spin_unlock(&dentry->d_lock);
2222 break;
2223next:
2224 spin_unlock(&dentry->d_lock);
2225 }
2226 rcu_read_unlock();
2227
2228 return found;
2229}
2230
2231
2232
2233
2234
2235
2236
2237
2238struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2239{
2240
2241
2242
2243
2244
2245 name->hash = full_name_hash(name->name, name->len);
2246 if (dir->d_flags & DCACHE_OP_HASH) {
2247 int err = dir->d_op->d_hash(dir, name);
2248 if (unlikely(err < 0))
2249 return ERR_PTR(err);
2250 }
2251 return d_lookup(dir, name);
2252}
2253EXPORT_SYMBOL(d_hash_and_lookup);
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266int d_validate(struct dentry *dentry, struct dentry *dparent)
2267{
2268 struct dentry *child;
2269
2270 spin_lock(&dparent->d_lock);
2271 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2272 if (dentry == child) {
2273 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2274 __dget_dlock(dentry);
2275 spin_unlock(&dentry->d_lock);
2276 spin_unlock(&dparent->d_lock);
2277 return 1;
2278 }
2279 }
2280 spin_unlock(&dparent->d_lock);
2281
2282 return 0;
2283}
2284EXPORT_SYMBOL(d_validate);
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307void d_delete(struct dentry * dentry)
2308{
2309 struct inode *inode;
2310 int isdir = 0;
2311
2312
2313
2314again:
2315 spin_lock(&dentry->d_lock);
2316 inode = dentry->d_inode;
2317 isdir = S_ISDIR(inode->i_mode);
2318 if (dentry->d_lockref.count == 1) {
2319 if (!spin_trylock(&inode->i_lock)) {
2320 spin_unlock(&dentry->d_lock);
2321 cpu_relax();
2322 goto again;
2323 }
2324 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2325 dentry_unlink_inode(dentry);
2326 fsnotify_nameremove(dentry, isdir);
2327 return;
2328 }
2329
2330 if (!d_unhashed(dentry))
2331 __d_drop(dentry);
2332
2333 spin_unlock(&dentry->d_lock);
2334
2335 fsnotify_nameremove(dentry, isdir);
2336}
2337EXPORT_SYMBOL(d_delete);
2338
2339static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2340{
2341 BUG_ON(!d_unhashed(entry));
2342 hlist_bl_lock(b);
2343 hlist_bl_add_head_rcu(&entry->d_hash, b);
2344 hlist_bl_unlock(b);
2345}
2346
2347static void _d_rehash(struct dentry * entry)
2348{
2349 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2350}
2351
2352
2353
2354
2355
2356
2357
2358
2359void d_rehash(struct dentry * entry)
2360{
2361 spin_lock(&entry->d_lock);
2362 _d_rehash(entry);
2363 spin_unlock(&entry->d_lock);
2364}
2365EXPORT_SYMBOL(d_rehash);
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2382{
2383 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2384 BUG_ON(dentry->d_name.len != name->len);
2385
2386 spin_lock(&dentry->d_lock);
2387 write_seqcount_begin(&dentry->d_seq);
2388 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2389 write_seqcount_end(&dentry->d_seq);
2390 spin_unlock(&dentry->d_lock);
2391}
2392EXPORT_SYMBOL(dentry_update_name_case);
2393
2394static void switch_names(struct dentry *dentry, struct dentry *target)
2395{
2396 if (dname_external(target)) {
2397 if (dname_external(dentry)) {
2398
2399
2400
2401 swap(target->d_name.name, dentry->d_name.name);
2402 } else {
2403
2404
2405
2406
2407 memcpy(target->d_iname, dentry->d_name.name,
2408 dentry->d_name.len + 1);
2409 dentry->d_name.name = target->d_name.name;
2410 target->d_name.name = target->d_iname;
2411 }
2412 } else {
2413 if (dname_external(dentry)) {
2414
2415
2416
2417
2418 memcpy(dentry->d_iname, target->d_name.name,
2419 target->d_name.len + 1);
2420 target->d_name.name = dentry->d_name.name;
2421 dentry->d_name.name = dentry->d_iname;
2422 } else {
2423
2424
2425
2426 unsigned int i;
2427 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2428 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2429 swap(((long *) &dentry->d_iname)[i],
2430 ((long *) &target->d_iname)[i]);
2431 }
2432 }
2433 }
2434 swap(dentry->d_name.len, target->d_name.len);
2435}
2436
2437static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2438{
2439
2440
2441
2442 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2443 spin_lock(&target->d_parent->d_lock);
2444 else {
2445 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2446 spin_lock(&dentry->d_parent->d_lock);
2447 spin_lock_nested(&target->d_parent->d_lock,
2448 DENTRY_D_LOCK_NESTED);
2449 } else {
2450 spin_lock(&target->d_parent->d_lock);
2451 spin_lock_nested(&dentry->d_parent->d_lock,
2452 DENTRY_D_LOCK_NESTED);
2453 }
2454 }
2455 if (target < dentry) {
2456 spin_lock_nested(&target->d_lock, 2);
2457 spin_lock_nested(&dentry->d_lock, 3);
2458 } else {
2459 spin_lock_nested(&dentry->d_lock, 2);
2460 spin_lock_nested(&target->d_lock, 3);
2461 }
2462}
2463
2464static void dentry_unlock_parents_for_move(struct dentry *dentry,
2465 struct dentry *target)
2466{
2467 if (target->d_parent != dentry->d_parent)
2468 spin_unlock(&dentry->d_parent->d_lock);
2469 if (target->d_parent != target)
2470 spin_unlock(&target->d_parent->d_lock);
2471}
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495static void __d_move(struct dentry *dentry, struct dentry *target,
2496 bool exchange)
2497{
2498 if (!dentry->d_inode)
2499 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2500
2501 BUG_ON(d_ancestor(dentry, target));
2502 BUG_ON(d_ancestor(target, dentry));
2503
2504 dentry_lock_for_move(dentry, target);
2505
2506 write_seqcount_begin(&dentry->d_seq);
2507 write_seqcount_begin(&target->d_seq);
2508
2509
2510
2511
2512
2513
2514
2515 __d_drop(dentry);
2516 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2517
2518
2519
2520
2521
2522 __d_drop(target);
2523 if (exchange) {
2524 __d_rehash(target,
2525 d_hash(dentry->d_parent, dentry->d_name.hash));
2526 }
2527
2528 list_del(&dentry->d_u.d_child);
2529 list_del(&target->d_u.d_child);
2530
2531
2532 switch_names(dentry, target);
2533 swap(dentry->d_name.hash, target->d_name.hash);
2534
2535
2536 if (IS_ROOT(dentry)) {
2537 dentry->d_flags |= DCACHE_RCUACCESS;
2538 dentry->d_parent = target->d_parent;
2539 target->d_parent = target;
2540 INIT_LIST_HEAD(&target->d_u.d_child);
2541 } else {
2542 swap(dentry->d_parent, target->d_parent);
2543
2544
2545 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2546 }
2547
2548 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2549
2550 write_seqcount_end(&target->d_seq);
2551 write_seqcount_end(&dentry->d_seq);
2552
2553 dentry_unlock_parents_for_move(dentry, target);
2554 if (exchange)
2555 fsnotify_update_flags(target);
2556 spin_unlock(&target->d_lock);
2557 fsnotify_update_flags(dentry);
2558 spin_unlock(&dentry->d_lock);
2559}
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570void d_move(struct dentry *dentry, struct dentry *target)
2571{
2572 write_seqlock(&rename_lock);
2573 __d_move(dentry, target, false);
2574 write_sequnlock(&rename_lock);
2575}
2576EXPORT_SYMBOL(d_move);
2577
2578
2579
2580
2581
2582
2583void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2584{
2585 write_seqlock(&rename_lock);
2586
2587 WARN_ON(!dentry1->d_inode);
2588 WARN_ON(!dentry2->d_inode);
2589 WARN_ON(IS_ROOT(dentry1));
2590 WARN_ON(IS_ROOT(dentry2));
2591
2592 __d_move(dentry1, dentry2, true);
2593
2594 write_sequnlock(&rename_lock);
2595}
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2606{
2607 struct dentry *p;
2608
2609 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2610 if (p->d_parent == p1)
2611 return p;
2612 }
2613 return NULL;
2614}
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625static struct dentry *__d_unalias(struct inode *inode,
2626 struct dentry *dentry, struct dentry *alias)
2627{
2628 struct mutex *m1 = NULL, *m2 = NULL;
2629 struct dentry *ret = ERR_PTR(-EBUSY);
2630
2631
2632 if (alias->d_parent == dentry->d_parent)
2633 goto out_unalias;
2634
2635
2636 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2637 goto out_err;
2638 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2639 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2640 goto out_err;
2641 m2 = &alias->d_parent->d_inode->i_mutex;
2642out_unalias:
2643 __d_move(alias, dentry, false);
2644 ret = alias;
2645out_err:
2646 spin_unlock(&inode->i_lock);
2647 if (m2)
2648 mutex_unlock(m2);
2649 if (m1)
2650 mutex_unlock(m1);
2651 return ret;
2652}
2653
2654
2655
2656
2657
2658
2659static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2660{
2661 struct dentry *dparent;
2662
2663 dentry_lock_for_move(anon, dentry);
2664
2665 write_seqcount_begin(&dentry->d_seq);
2666 write_seqcount_begin(&anon->d_seq);
2667
2668 dparent = dentry->d_parent;
2669
2670 switch_names(dentry, anon);
2671 swap(dentry->d_name.hash, anon->d_name.hash);
2672
2673 dentry->d_parent = dentry;
2674 list_del_init(&dentry->d_u.d_child);
2675 anon->d_flags |= DCACHE_RCUACCESS;
2676 anon->d_parent = dparent;
2677 list_move(&anon->d_u.d_child, &dparent->d_subdirs);
2678
2679 write_seqcount_end(&dentry->d_seq);
2680 write_seqcount_end(&anon->d_seq);
2681
2682 dentry_unlock_parents_for_move(anon, dentry);
2683 spin_unlock(&dentry->d_lock);
2684
2685
2686 anon->d_flags &= ~DCACHE_DISCONNECTED;
2687}
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2699{
2700 struct dentry *actual;
2701
2702 BUG_ON(!d_unhashed(dentry));
2703
2704 if (!inode) {
2705 actual = dentry;
2706 __d_instantiate(dentry, NULL);
2707 d_rehash(actual);
2708 goto out_nolock;
2709 }
2710
2711 spin_lock(&inode->i_lock);
2712
2713 if (S_ISDIR(inode->i_mode)) {
2714 struct dentry *alias;
2715
2716
2717 alias = __d_find_alias(inode, 0);
2718 if (alias) {
2719 actual = alias;
2720 write_seqlock(&rename_lock);
2721
2722 if (d_ancestor(alias, dentry)) {
2723
2724 actual = ERR_PTR(-ELOOP);
2725 spin_unlock(&inode->i_lock);
2726 } else if (IS_ROOT(alias)) {
2727
2728
2729 __d_materialise_dentry(dentry, alias);
2730 write_sequnlock(&rename_lock);
2731 __d_drop(alias);
2732 goto found;
2733 } else {
2734
2735
2736 actual = __d_unalias(inode, dentry, alias);
2737 }
2738 write_sequnlock(&rename_lock);
2739 if (IS_ERR(actual)) {
2740 if (PTR_ERR(actual) == -ELOOP)
2741 pr_warn_ratelimited(
2742 "VFS: Lookup of '%s' in %s %s"
2743 " would have caused loop\n",
2744 dentry->d_name.name,
2745 inode->i_sb->s_type->name,
2746 inode->i_sb->s_id);
2747 dput(alias);
2748 }
2749 goto out_nolock;
2750 }
2751 }
2752
2753
2754 actual = __d_instantiate_unique(dentry, inode);
2755 if (!actual)
2756 actual = dentry;
2757 else
2758 BUG_ON(!d_unhashed(actual));
2759
2760 spin_lock(&actual->d_lock);
2761found:
2762 _d_rehash(actual);
2763 spin_unlock(&actual->d_lock);
2764 spin_unlock(&inode->i_lock);
2765out_nolock:
2766 if (actual == dentry) {
2767 security_d_instantiate(dentry, inode);
2768 return NULL;
2769 }
2770
2771 iput(inode);
2772 return actual;
2773}
2774EXPORT_SYMBOL_GPL(d_materialise_unique);
2775
2776static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2777{
2778 *buflen -= namelen;
2779 if (*buflen < 0)
2780 return -ENAMETOOLONG;
2781 *buffer -= namelen;
2782 memcpy(*buffer, str, namelen);
2783 return 0;
2784}
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2802{
2803 const char *dname = ACCESS_ONCE(name->name);
2804 u32 dlen = ACCESS_ONCE(name->len);
2805 char *p;
2806
2807 *buflen -= dlen + 1;
2808 if (*buflen < 0)
2809 return -ENAMETOOLONG;
2810 p = *buffer -= dlen + 1;
2811 *p++ = '/';
2812 while (dlen--) {
2813 char c = *dname++;
2814 if (!c)
2815 break;
2816 *p++ = c;
2817 }
2818 return 0;
2819}
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838static int prepend_path(const struct path *path,
2839 const struct path *root,
2840 char **buffer, int *buflen)
2841{
2842 struct dentry *dentry;
2843 struct vfsmount *vfsmnt;
2844 struct mount *mnt;
2845 int error = 0;
2846 unsigned seq, m_seq = 0;
2847 char *bptr;
2848 int blen;
2849
2850 rcu_read_lock();
2851restart_mnt:
2852 read_seqbegin_or_lock(&mount_lock, &m_seq);
2853 seq = 0;
2854 rcu_read_lock();
2855restart:
2856 bptr = *buffer;
2857 blen = *buflen;
2858 error = 0;
2859 dentry = path->dentry;
2860 vfsmnt = path->mnt;
2861 mnt = real_mount(vfsmnt);
2862 read_seqbegin_or_lock(&rename_lock, &seq);
2863 while (dentry != root->dentry || vfsmnt != root->mnt) {
2864 struct dentry * parent;
2865
2866 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2867 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
2868
2869 if (dentry != vfsmnt->mnt_root) {
2870 bptr = *buffer;
2871 blen = *buflen;
2872 error = 3;
2873 break;
2874 }
2875
2876 if (mnt != parent) {
2877 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
2878 mnt = parent;
2879 vfsmnt = &mnt->mnt;
2880 continue;
2881 }
2882
2883
2884
2885
2886 if (IS_ROOT(dentry) &&
2887 (dentry->d_name.len != 1 ||
2888 dentry->d_name.name[0] != '/')) {
2889 WARN(1, "Root dentry has weird name <%.*s>\n",
2890 (int) dentry->d_name.len,
2891 dentry->d_name.name);
2892 }
2893 if (!error)
2894 error = is_mounted(vfsmnt) ? 1 : 2;
2895 break;
2896 }
2897 parent = dentry->d_parent;
2898 prefetch(parent);
2899 error = prepend_name(&bptr, &blen, &dentry->d_name);
2900 if (error)
2901 break;
2902
2903 dentry = parent;
2904 }
2905 if (!(seq & 1))
2906 rcu_read_unlock();
2907 if (need_seqretry(&rename_lock, seq)) {
2908 seq = 1;
2909 goto restart;
2910 }
2911 done_seqretry(&rename_lock, seq);
2912
2913 if (!(m_seq & 1))
2914 rcu_read_unlock();
2915 if (need_seqretry(&mount_lock, m_seq)) {
2916 m_seq = 1;
2917 goto restart_mnt;
2918 }
2919 done_seqretry(&mount_lock, m_seq);
2920
2921 if (error >= 0 && bptr == *buffer) {
2922 if (--blen < 0)
2923 error = -ENAMETOOLONG;
2924 else
2925 *--bptr = '/';
2926 }
2927 *buffer = bptr;
2928 *buflen = blen;
2929 return error;
2930}
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948char *__d_path(const struct path *path,
2949 const struct path *root,
2950 char *buf, int buflen)
2951{
2952 char *res = buf + buflen;
2953 int error;
2954
2955 prepend(&res, &buflen, "\0", 1);
2956 error = prepend_path(path, root, &res, &buflen);
2957
2958 if (error < 0)
2959 return ERR_PTR(error);
2960 if (error > 0)
2961 return NULL;
2962 return res;
2963}
2964
2965char *d_absolute_path(const struct path *path,
2966 char *buf, int buflen)
2967{
2968 struct path root = {};
2969 char *res = buf + buflen;
2970 int error;
2971
2972 prepend(&res, &buflen, "\0", 1);
2973 error = prepend_path(path, &root, &res, &buflen);
2974
2975 if (error > 1)
2976 error = -EINVAL;
2977 if (error < 0)
2978 return ERR_PTR(error);
2979 return res;
2980}
2981
2982
2983
2984
2985static int path_with_deleted(const struct path *path,
2986 const struct path *root,
2987 char **buf, int *buflen)
2988{
2989 prepend(buf, buflen, "\0", 1);
2990 if (d_unlinked(path->dentry)) {
2991 int error = prepend(buf, buflen, " (deleted)", 10);
2992 if (error)
2993 return error;
2994 }
2995
2996 return prepend_path(path, root, buf, buflen);
2997}
2998
2999static int prepend_unreachable(char **buffer, int *buflen)
3000{
3001 return prepend(buffer, buflen, "(unreachable)", 13);
3002}
3003
3004static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3005{
3006 unsigned seq;
3007
3008 do {
3009 seq = read_seqcount_begin(&fs->seq);
3010 *root = fs->root;
3011 } while (read_seqcount_retry(&fs->seq, seq));
3012}
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030char *d_path(const struct path *path, char *buf, int buflen)
3031{
3032 char *res = buf + buflen;
3033 struct path root;
3034 int error;
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047 if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3048 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3049 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3050
3051 rcu_read_lock();
3052 get_fs_root_rcu(current->fs, &root);
3053 error = path_with_deleted(path, &root, &res, &buflen);
3054 rcu_read_unlock();
3055
3056 if (error < 0)
3057 res = ERR_PTR(error);
3058 return res;
3059}
3060EXPORT_SYMBOL(d_path);
3061
3062
3063
3064
3065char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3066 const char *fmt, ...)
3067{
3068 va_list args;
3069 char temp[64];
3070 int sz;
3071
3072 va_start(args, fmt);
3073 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3074 va_end(args);
3075
3076 if (sz > sizeof(temp) || sz > buflen)
3077 return ERR_PTR(-ENAMETOOLONG);
3078
3079 buffer += buflen - sz;
3080 return memcpy(buffer, temp, sz);
3081}
3082
3083char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3084{
3085 char *end = buffer + buflen;
3086
3087 if (prepend(&end, &buflen, " (deleted)", 11) ||
3088 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3089 prepend(&end, &buflen, "/", 1))
3090 end = ERR_PTR(-ENAMETOOLONG);
3091 return end;
3092}
3093EXPORT_SYMBOL(simple_dname);
3094
3095
3096
3097
3098static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3099{
3100 struct dentry *dentry;
3101 char *end, *retval;
3102 int len, seq = 0;
3103 int error = 0;
3104
3105 if (buflen < 2)
3106 goto Elong;
3107
3108 rcu_read_lock();
3109restart:
3110 dentry = d;
3111 end = buf + buflen;
3112 len = buflen;
3113 prepend(&end, &len, "\0", 1);
3114
3115 retval = end-1;
3116 *retval = '/';
3117 read_seqbegin_or_lock(&rename_lock, &seq);
3118 while (!IS_ROOT(dentry)) {
3119 struct dentry *parent = dentry->d_parent;
3120 int error;
3121
3122 prefetch(parent);
3123 error = prepend_name(&end, &len, &dentry->d_name);
3124 if (error)
3125 break;
3126
3127 retval = end;
3128 dentry = parent;
3129 }
3130 if (!(seq & 1))
3131 rcu_read_unlock();
3132 if (need_seqretry(&rename_lock, seq)) {
3133 seq = 1;
3134 goto restart;
3135 }
3136 done_seqretry(&rename_lock, seq);
3137 if (error)
3138 goto Elong;
3139 return retval;
3140Elong:
3141 return ERR_PTR(-ENAMETOOLONG);
3142}
3143
3144char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3145{
3146 return __dentry_path(dentry, buf, buflen);
3147}
3148EXPORT_SYMBOL(dentry_path_raw);
3149
3150char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3151{
3152 char *p = NULL;
3153 char *retval;
3154
3155 if (d_unlinked(dentry)) {
3156 p = buf + buflen;
3157 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3158 goto Elong;
3159 buflen++;
3160 }
3161 retval = __dentry_path(dentry, buf, buflen);
3162 if (!IS_ERR(retval) && p)
3163 *p = '/';
3164 return retval;
3165Elong:
3166 return ERR_PTR(-ENAMETOOLONG);
3167}
3168
3169static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3170 struct path *pwd)
3171{
3172 unsigned seq;
3173
3174 do {
3175 seq = read_seqcount_begin(&fs->seq);
3176 *root = fs->root;
3177 *pwd = fs->pwd;
3178 } while (read_seqcount_retry(&fs->seq, seq));
3179}
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3200{
3201 int error;
3202 struct path pwd, root;
3203 char *page = (char *) __get_free_page(GFP_USER);
3204
3205 if (!page)
3206 return -ENOMEM;
3207
3208 rcu_read_lock();
3209 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3210
3211 error = -ENOENT;
3212 if (!d_unlinked(pwd.dentry)) {
3213 unsigned long len;
3214 char *cwd = page + PAGE_SIZE;
3215 int buflen = PAGE_SIZE;
3216
3217 prepend(&cwd, &buflen, "\0", 1);
3218 error = prepend_path(&pwd, &root, &cwd, &buflen);
3219 rcu_read_unlock();
3220
3221 if (error < 0)
3222 goto out;
3223
3224
3225 if (error > 0) {
3226 error = prepend_unreachable(&cwd, &buflen);
3227 if (error)
3228 goto out;
3229 }
3230
3231 error = -ERANGE;
3232 len = PAGE_SIZE + page - cwd;
3233 if (len <= size) {
3234 error = len;
3235 if (copy_to_user(buf, cwd, len))
3236 error = -EFAULT;
3237 }
3238 } else {
3239 rcu_read_unlock();
3240 }
3241
3242out:
3243 free_page((unsigned long) page);
3244 return error;
3245}
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3264{
3265 int result;
3266 unsigned seq;
3267
3268 if (new_dentry == old_dentry)
3269 return 1;
3270
3271 do {
3272
3273 seq = read_seqbegin(&rename_lock);
3274
3275
3276
3277
3278 rcu_read_lock();
3279 if (d_ancestor(old_dentry, new_dentry))
3280 result = 1;
3281 else
3282 result = 0;
3283 rcu_read_unlock();
3284 } while (read_seqretry(&rename_lock, seq));
3285
3286 return result;
3287}
3288
3289static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3290{
3291 struct dentry *root = data;
3292 if (dentry != root) {
3293 if (d_unhashed(dentry) || !dentry->d_inode)
3294 return D_WALK_SKIP;
3295
3296 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3297 dentry->d_flags |= DCACHE_GENOCIDE;
3298 dentry->d_lockref.count--;
3299 }
3300 }
3301 return D_WALK_CONTINUE;
3302}
3303
3304void d_genocide(struct dentry *parent)
3305{
3306 d_walk(parent, parent, d_genocide_kill, NULL);
3307}
3308
3309void d_tmpfile(struct dentry *dentry, struct inode *inode)
3310{
3311 inode_dec_link_count(inode);
3312 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3313 !hlist_unhashed(&dentry->d_alias) ||
3314 !d_unlinked(dentry));
3315 spin_lock(&dentry->d_parent->d_lock);
3316 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3317 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3318 (unsigned long long)inode->i_ino);
3319 spin_unlock(&dentry->d_lock);
3320 spin_unlock(&dentry->d_parent->d_lock);
3321 d_instantiate(dentry, inode);
3322}
3323EXPORT_SYMBOL(d_tmpfile);
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339ino_t find_inode_number(struct dentry *dir, struct qstr *name)
3340{
3341 struct dentry * dentry;
3342 ino_t ino = 0;
3343
3344 dentry = d_hash_and_lookup(dir, name);
3345 if (!IS_ERR_OR_NULL(dentry)) {
3346 if (dentry->d_inode)
3347 ino = dentry->d_inode->i_ino;
3348 dput(dentry);
3349 }
3350 return ino;
3351}
3352EXPORT_SYMBOL(find_inode_number);
3353
3354static __initdata unsigned long dhash_entries;
3355static int __init set_dhash_entries(char *str)
3356{
3357 if (!str)
3358 return 0;
3359 dhash_entries = simple_strtoul(str, &str, 0);
3360 return 1;
3361}
3362__setup("dhash_entries=", set_dhash_entries);
3363
3364static void __init dcache_init_early(void)
3365{
3366 unsigned int loop;
3367
3368
3369
3370
3371 if (hashdist)
3372 return;
3373
3374 dentry_hashtable =
3375 alloc_large_system_hash("Dentry cache",
3376 sizeof(struct hlist_bl_head),
3377 dhash_entries,
3378 13,
3379 HASH_EARLY,
3380 &d_hash_shift,
3381 &d_hash_mask,
3382 0,
3383 0);
3384
3385 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3386 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3387}
3388
3389static void __init dcache_init(void)
3390{
3391 unsigned int loop;
3392
3393
3394
3395
3396
3397
3398 dentry_cache = KMEM_CACHE(dentry,
3399 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3400
3401
3402 if (!hashdist)
3403 return;
3404
3405 dentry_hashtable =
3406 alloc_large_system_hash("Dentry cache",
3407 sizeof(struct hlist_bl_head),
3408 dhash_entries,
3409 13,
3410 0,
3411 &d_hash_shift,
3412 &d_hash_mask,
3413 0,
3414 0);
3415
3416 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3417 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3418}
3419
3420
3421struct kmem_cache *names_cachep __read_mostly;
3422EXPORT_SYMBOL(names_cachep);
3423
3424EXPORT_SYMBOL(d_genocide);
3425
3426void __init vfs_caches_init_early(void)
3427{
3428 dcache_init_early();
3429 inode_init_early();
3430}
3431
3432void __init vfs_caches_init(void)
3433{
3434 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3435 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3436
3437 dcache_init();
3438 inode_init();
3439 files_init();
3440 files_maxfiles_init();
3441 mnt_init();
3442 bdev_cache_init();
3443 chrdev_init();
3444}
3445