1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/syscalls.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/fsnotify.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/hash.h>
25#include <linux/cache.h>
26#include <linux/module.h>
27#include <linux/mount.h>
28#include <linux/file.h>
29#include <asm/uaccess.h>
30#include <linux/security.h>
31#include <linux/seqlock.h>
32#include <linux/swap.h>
33#include <linux/bootmem.h>
34#include <linux/fs_struct.h>
35#include <linux/hardirq.h>
36#include "internal.h"
37
38int sysctl_vfs_cache_pressure __read_mostly = 100;
39EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
40
41 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
42__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
43
44EXPORT_SYMBOL(dcache_lock);
45
46static struct kmem_cache *dentry_cache __read_mostly;
47
48#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
49
50
51
52
53
54
55
56
57
58#define D_HASHBITS d_hash_shift
59#define D_HASHMASK d_hash_mask
60
61static unsigned int d_hash_mask __read_mostly;
62static unsigned int d_hash_shift __read_mostly;
63static struct hlist_head *dentry_hashtable __read_mostly;
64
65
66struct dentry_stat_t dentry_stat = {
67 .age_limit = 45,
68};
69
70static void __d_free(struct dentry *dentry)
71{
72 WARN_ON(!list_empty(&dentry->d_alias));
73 if (dname_external(dentry))
74 kfree(dentry->d_name.name);
75 kmem_cache_free(dentry_cache, dentry);
76}
77
78static void d_callback(struct rcu_head *head)
79{
80 struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
81 __d_free(dentry);
82}
83
84
85
86
87
88static void d_free(struct dentry *dentry)
89{
90 if (dentry->d_op && dentry->d_op->d_release)
91 dentry->d_op->d_release(dentry);
92
93 if (hlist_unhashed(&dentry->d_hash))
94 __d_free(dentry);
95 else
96 call_rcu(&dentry->d_u.d_rcu, d_callback);
97}
98
99
100
101
102
103static void dentry_iput(struct dentry * dentry)
104 __releases(dentry->d_lock)
105 __releases(dcache_lock)
106{
107 struct inode *inode = dentry->d_inode;
108 if (inode) {
109 dentry->d_inode = NULL;
110 list_del_init(&dentry->d_alias);
111 spin_unlock(&dentry->d_lock);
112 spin_unlock(&dcache_lock);
113 if (!inode->i_nlink)
114 fsnotify_inoderemove(inode);
115 if (dentry->d_op && dentry->d_op->d_iput)
116 dentry->d_op->d_iput(dentry, inode);
117 else
118 iput(inode);
119 } else {
120 spin_unlock(&dentry->d_lock);
121 spin_unlock(&dcache_lock);
122 }
123}
124
125
126
127
128static void dentry_lru_add(struct dentry *dentry)
129{
130 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
131 dentry->d_sb->s_nr_dentry_unused++;
132 dentry_stat.nr_unused++;
133}
134
135static void dentry_lru_add_tail(struct dentry *dentry)
136{
137 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
138 dentry->d_sb->s_nr_dentry_unused++;
139 dentry_stat.nr_unused++;
140}
141
142static void dentry_lru_del(struct dentry *dentry)
143{
144 if (!list_empty(&dentry->d_lru)) {
145 list_del(&dentry->d_lru);
146 dentry->d_sb->s_nr_dentry_unused--;
147 dentry_stat.nr_unused--;
148 }
149}
150
151static void dentry_lru_del_init(struct dentry *dentry)
152{
153 if (likely(!list_empty(&dentry->d_lru))) {
154 list_del_init(&dentry->d_lru);
155 dentry->d_sb->s_nr_dentry_unused--;
156 dentry_stat.nr_unused--;
157 }
158}
159
160
161
162
163
164
165
166
167
168static struct dentry *d_kill(struct dentry *dentry)
169 __releases(dentry->d_lock)
170 __releases(dcache_lock)
171{
172 struct dentry *parent;
173
174 list_del(&dentry->d_u.d_child);
175 dentry_stat.nr_dentry--;
176
177 dentry_iput(dentry);
178 if (IS_ROOT(dentry))
179 parent = NULL;
180 else
181 parent = dentry->d_parent;
182 d_free(dentry);
183 return parent;
184}
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215void dput(struct dentry *dentry)
216{
217 if (!dentry)
218 return;
219
220repeat:
221 if (atomic_read(&dentry->d_count) == 1)
222 might_sleep();
223 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
224 return;
225
226 spin_lock(&dentry->d_lock);
227 if (atomic_read(&dentry->d_count)) {
228 spin_unlock(&dentry->d_lock);
229 spin_unlock(&dcache_lock);
230 return;
231 }
232
233
234
235
236 if (dentry->d_op && dentry->d_op->d_delete) {
237 if (dentry->d_op->d_delete(dentry))
238 goto unhash_it;
239 }
240
241 if (d_unhashed(dentry))
242 goto kill_it;
243 if (list_empty(&dentry->d_lru)) {
244 dentry->d_flags |= DCACHE_REFERENCED;
245 dentry_lru_add(dentry);
246 }
247 spin_unlock(&dentry->d_lock);
248 spin_unlock(&dcache_lock);
249 return;
250
251unhash_it:
252 __d_drop(dentry);
253kill_it:
254
255 dentry_lru_del(dentry);
256 dentry = d_kill(dentry);
257 if (dentry)
258 goto repeat;
259}
260
261
262
263
264
265
266
267
268
269
270
271
272
273int d_invalidate(struct dentry * dentry)
274{
275
276
277
278 spin_lock(&dcache_lock);
279 if (d_unhashed(dentry)) {
280 spin_unlock(&dcache_lock);
281 return 0;
282 }
283
284
285
286
287 if (!list_empty(&dentry->d_subdirs)) {
288 spin_unlock(&dcache_lock);
289 shrink_dcache_parent(dentry);
290 spin_lock(&dcache_lock);
291 }
292
293
294
295
296
297
298
299
300
301
302
303 spin_lock(&dentry->d_lock);
304 if (atomic_read(&dentry->d_count) > 1) {
305 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
306 spin_unlock(&dentry->d_lock);
307 spin_unlock(&dcache_lock);
308 return -EBUSY;
309 }
310 }
311
312 __d_drop(dentry);
313 spin_unlock(&dentry->d_lock);
314 spin_unlock(&dcache_lock);
315 return 0;
316}
317
318
319
320static inline struct dentry * __dget_locked(struct dentry *dentry)
321{
322 atomic_inc(&dentry->d_count);
323 dentry_lru_del_init(dentry);
324 return dentry;
325}
326
327struct dentry * dget_locked(struct dentry *dentry)
328{
329 return __dget_locked(dentry);
330}
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
350{
351 struct list_head *head, *next, *tmp;
352 struct dentry *alias, *discon_alias=NULL;
353
354 head = &inode->i_dentry;
355 next = inode->i_dentry.next;
356 while (next != head) {
357 tmp = next;
358 next = tmp->next;
359 prefetch(next);
360 alias = list_entry(tmp, struct dentry, d_alias);
361 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
362 if (IS_ROOT(alias) &&
363 (alias->d_flags & DCACHE_DISCONNECTED))
364 discon_alias = alias;
365 else if (!want_discon) {
366 __dget_locked(alias);
367 return alias;
368 }
369 }
370 }
371 if (discon_alias)
372 __dget_locked(discon_alias);
373 return discon_alias;
374}
375
376struct dentry * d_find_alias(struct inode *inode)
377{
378 struct dentry *de = NULL;
379
380 if (!list_empty(&inode->i_dentry)) {
381 spin_lock(&dcache_lock);
382 de = __d_find_alias(inode, 0);
383 spin_unlock(&dcache_lock);
384 }
385 return de;
386}
387
388
389
390
391
392void d_prune_aliases(struct inode *inode)
393{
394 struct dentry *dentry;
395restart:
396 spin_lock(&dcache_lock);
397 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
398 spin_lock(&dentry->d_lock);
399 if (!atomic_read(&dentry->d_count)) {
400 __dget_locked(dentry);
401 __d_drop(dentry);
402 spin_unlock(&dentry->d_lock);
403 spin_unlock(&dcache_lock);
404 dput(dentry);
405 goto restart;
406 }
407 spin_unlock(&dentry->d_lock);
408 }
409 spin_unlock(&dcache_lock);
410}
411
412
413
414
415
416
417
418
419
420static void prune_one_dentry(struct dentry * dentry)
421 __releases(dentry->d_lock)
422 __releases(dcache_lock)
423 __acquires(dcache_lock)
424{
425 __d_drop(dentry);
426 dentry = d_kill(dentry);
427
428
429
430
431
432 spin_lock(&dcache_lock);
433 while (dentry) {
434 if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock))
435 return;
436
437 if (dentry->d_op && dentry->d_op->d_delete)
438 dentry->d_op->d_delete(dentry);
439 dentry_lru_del_init(dentry);
440 __d_drop(dentry);
441 dentry = d_kill(dentry);
442 spin_lock(&dcache_lock);
443 }
444}
445
446
447
448
449
450
451
452
453
454static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
455{
456 LIST_HEAD(referenced);
457 LIST_HEAD(tmp);
458 struct dentry *dentry;
459 int cnt = 0;
460
461 BUG_ON(!sb);
462 BUG_ON((flags & DCACHE_REFERENCED) && count == NULL);
463 spin_lock(&dcache_lock);
464 if (count != NULL)
465
466 cnt = *count;
467restart:
468 if (count == NULL)
469 list_splice_init(&sb->s_dentry_lru, &tmp);
470 else {
471 while (!list_empty(&sb->s_dentry_lru)) {
472 dentry = list_entry(sb->s_dentry_lru.prev,
473 struct dentry, d_lru);
474 BUG_ON(dentry->d_sb != sb);
475
476 spin_lock(&dentry->d_lock);
477
478
479
480
481
482 if ((flags & DCACHE_REFERENCED)
483 && (dentry->d_flags & DCACHE_REFERENCED)) {
484 dentry->d_flags &= ~DCACHE_REFERENCED;
485 list_move(&dentry->d_lru, &referenced);
486 spin_unlock(&dentry->d_lock);
487 } else {
488 list_move_tail(&dentry->d_lru, &tmp);
489 spin_unlock(&dentry->d_lock);
490 cnt--;
491 if (!cnt)
492 break;
493 }
494 cond_resched_lock(&dcache_lock);
495 }
496 }
497 while (!list_empty(&tmp)) {
498 dentry = list_entry(tmp.prev, struct dentry, d_lru);
499 dentry_lru_del_init(dentry);
500 spin_lock(&dentry->d_lock);
501
502
503
504
505
506 if (atomic_read(&dentry->d_count)) {
507 spin_unlock(&dentry->d_lock);
508 continue;
509 }
510 prune_one_dentry(dentry);
511
512 cond_resched_lock(&dcache_lock);
513 }
514 if (count == NULL && !list_empty(&sb->s_dentry_lru))
515 goto restart;
516 if (count != NULL)
517 *count = cnt;
518 if (!list_empty(&referenced))
519 list_splice(&referenced, &sb->s_dentry_lru);
520 spin_unlock(&dcache_lock);
521}
522
523
524
525
526
527
528
529
530
531
532static void prune_dcache(int count)
533{
534 struct super_block *sb;
535 int w_count;
536 int unused = dentry_stat.nr_unused;
537 int prune_ratio;
538 int pruned;
539
540 if (unused == 0 || count == 0)
541 return;
542 spin_lock(&dcache_lock);
543restart:
544 if (count >= unused)
545 prune_ratio = 1;
546 else
547 prune_ratio = unused / count;
548 spin_lock(&sb_lock);
549 list_for_each_entry(sb, &super_blocks, s_list) {
550 if (sb->s_nr_dentry_unused == 0)
551 continue;
552 sb->s_count++;
553
554
555
556
557
558
559
560
561
562 spin_unlock(&sb_lock);
563 if (prune_ratio != 1)
564 w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1;
565 else
566 w_count = sb->s_nr_dentry_unused;
567 pruned = w_count;
568
569
570
571
572
573
574
575 if (down_read_trylock(&sb->s_umount)) {
576 if ((sb->s_root != NULL) &&
577 (!list_empty(&sb->s_dentry_lru))) {
578 spin_unlock(&dcache_lock);
579 __shrink_dcache_sb(sb, &w_count,
580 DCACHE_REFERENCED);
581 pruned -= w_count;
582 spin_lock(&dcache_lock);
583 }
584 up_read(&sb->s_umount);
585 }
586 spin_lock(&sb_lock);
587 count -= pruned;
588
589
590
591
592 if (__put_super_and_need_restart(sb) && count > 0) {
593 spin_unlock(&sb_lock);
594 goto restart;
595 }
596 }
597 spin_unlock(&sb_lock);
598 spin_unlock(&dcache_lock);
599}
600
601
602
603
604
605
606
607
608
609void shrink_dcache_sb(struct super_block * sb)
610{
611 __shrink_dcache_sb(sb, NULL, 0);
612}
613
614
615
616
617
618
619static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
620{
621 struct dentry *parent;
622 unsigned detached = 0;
623
624 BUG_ON(!IS_ROOT(dentry));
625
626
627 spin_lock(&dcache_lock);
628 dentry_lru_del_init(dentry);
629 __d_drop(dentry);
630 spin_unlock(&dcache_lock);
631
632 for (;;) {
633
634 while (!list_empty(&dentry->d_subdirs)) {
635 struct dentry *loop;
636
637
638
639 spin_lock(&dcache_lock);
640 list_for_each_entry(loop, &dentry->d_subdirs,
641 d_u.d_child) {
642 dentry_lru_del_init(loop);
643 __d_drop(loop);
644 cond_resched_lock(&dcache_lock);
645 }
646 spin_unlock(&dcache_lock);
647
648
649 dentry = list_entry(dentry->d_subdirs.next,
650 struct dentry, d_u.d_child);
651 }
652
653
654
655 do {
656 struct inode *inode;
657
658 if (atomic_read(&dentry->d_count) != 0) {
659 printk(KERN_ERR
660 "BUG: Dentry %p{i=%lx,n=%s}"
661 " still in use (%d)"
662 " [unmount of %s %s]\n",
663 dentry,
664 dentry->d_inode ?
665 dentry->d_inode->i_ino : 0UL,
666 dentry->d_name.name,
667 atomic_read(&dentry->d_count),
668 dentry->d_sb->s_type->name,
669 dentry->d_sb->s_id);
670 BUG();
671 }
672
673 if (IS_ROOT(dentry))
674 parent = NULL;
675 else {
676 parent = dentry->d_parent;
677 atomic_dec(&parent->d_count);
678 }
679
680 list_del(&dentry->d_u.d_child);
681 detached++;
682
683 inode = dentry->d_inode;
684 if (inode) {
685 dentry->d_inode = NULL;
686 list_del_init(&dentry->d_alias);
687 if (dentry->d_op && dentry->d_op->d_iput)
688 dentry->d_op->d_iput(dentry, inode);
689 else
690 iput(inode);
691 }
692
693 d_free(dentry);
694
695
696
697
698 if (!parent)
699 goto out;
700
701 dentry = parent;
702
703 } while (list_empty(&dentry->d_subdirs));
704
705 dentry = list_entry(dentry->d_subdirs.next,
706 struct dentry, d_u.d_child);
707 }
708out:
709
710 spin_lock(&dcache_lock);
711 dentry_stat.nr_dentry -= detached;
712 spin_unlock(&dcache_lock);
713}
714
715
716
717
718
719
720
721
722
723
724
725
726void shrink_dcache_for_umount(struct super_block *sb)
727{
728 struct dentry *dentry;
729
730 if (down_read_trylock(&sb->s_umount))
731 BUG();
732
733 dentry = sb->s_root;
734 sb->s_root = NULL;
735 atomic_dec(&dentry->d_count);
736 shrink_dcache_for_umount_subtree(dentry);
737
738 while (!hlist_empty(&sb->s_anon)) {
739 dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash);
740 shrink_dcache_for_umount_subtree(dentry);
741 }
742}
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758int have_submounts(struct dentry *parent)
759{
760 struct dentry *this_parent = parent;
761 struct list_head *next;
762
763 spin_lock(&dcache_lock);
764 if (d_mountpoint(parent))
765 goto positive;
766repeat:
767 next = this_parent->d_subdirs.next;
768resume:
769 while (next != &this_parent->d_subdirs) {
770 struct list_head *tmp = next;
771 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
772 next = tmp->next;
773
774 if (d_mountpoint(dentry))
775 goto positive;
776 if (!list_empty(&dentry->d_subdirs)) {
777 this_parent = dentry;
778 goto repeat;
779 }
780 }
781
782
783
784 if (this_parent != parent) {
785 next = this_parent->d_u.d_child.next;
786 this_parent = this_parent->d_parent;
787 goto resume;
788 }
789 spin_unlock(&dcache_lock);
790 return 0;
791positive:
792 spin_unlock(&dcache_lock);
793 return 1;
794}
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810static int select_parent(struct dentry * parent)
811{
812 struct dentry *this_parent = parent;
813 struct list_head *next;
814 int found = 0;
815
816 spin_lock(&dcache_lock);
817repeat:
818 next = this_parent->d_subdirs.next;
819resume:
820 while (next != &this_parent->d_subdirs) {
821 struct list_head *tmp = next;
822 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
823 next = tmp->next;
824
825 dentry_lru_del_init(dentry);
826
827
828
829
830 if (!atomic_read(&dentry->d_count)) {
831 dentry_lru_add_tail(dentry);
832 found++;
833 }
834
835
836
837
838
839
840 if (found && need_resched())
841 goto out;
842
843
844
845
846 if (!list_empty(&dentry->d_subdirs)) {
847 this_parent = dentry;
848 goto repeat;
849 }
850 }
851
852
853
854 if (this_parent != parent) {
855 next = this_parent->d_u.d_child.next;
856 this_parent = this_parent->d_parent;
857 goto resume;
858 }
859out:
860 spin_unlock(&dcache_lock);
861 return found;
862}
863
864
865
866
867
868
869
870
871void shrink_dcache_parent(struct dentry * parent)
872{
873 struct super_block *sb = parent->d_sb;
874 int found;
875
876 while ((found = select_parent(parent)) != 0)
877 __shrink_dcache_sb(sb, &found, 0);
878}
879
880
881
882
883
884
885
886
887
888
889
890
891
892static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
893{
894 if (nr) {
895 if (!(gfp_mask & __GFP_FS))
896 return -1;
897 prune_dcache(nr);
898 }
899 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
900}
901
902static struct shrinker dcache_shrinker = {
903 .shrink = shrink_dcache_memory,
904 .seeks = DEFAULT_SEEKS,
905};
906
907
908
909
910
911
912
913
914
915
916
917struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
918{
919 struct dentry *dentry;
920 char *dname;
921
922 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
923 if (!dentry)
924 return NULL;
925
926 if (name->len > DNAME_INLINE_LEN-1) {
927 dname = kmalloc(name->len + 1, GFP_KERNEL);
928 if (!dname) {
929 kmem_cache_free(dentry_cache, dentry);
930 return NULL;
931 }
932 } else {
933 dname = dentry->d_iname;
934 }
935 dentry->d_name.name = dname;
936
937 dentry->d_name.len = name->len;
938 dentry->d_name.hash = name->hash;
939 memcpy(dname, name->name, name->len);
940 dname[name->len] = 0;
941
942 atomic_set(&dentry->d_count, 1);
943 dentry->d_flags = DCACHE_UNHASHED;
944 spin_lock_init(&dentry->d_lock);
945 dentry->d_inode = NULL;
946 dentry->d_parent = NULL;
947 dentry->d_sb = NULL;
948 dentry->d_op = NULL;
949 dentry->d_fsdata = NULL;
950 dentry->d_mounted = 0;
951 INIT_HLIST_NODE(&dentry->d_hash);
952 INIT_LIST_HEAD(&dentry->d_lru);
953 INIT_LIST_HEAD(&dentry->d_subdirs);
954 INIT_LIST_HEAD(&dentry->d_alias);
955
956 if (parent) {
957 dentry->d_parent = dget(parent);
958 dentry->d_sb = parent->d_sb;
959 } else {
960 INIT_LIST_HEAD(&dentry->d_u.d_child);
961 }
962
963 spin_lock(&dcache_lock);
964 if (parent)
965 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
966 dentry_stat.nr_dentry++;
967 spin_unlock(&dcache_lock);
968
969 return dentry;
970}
971
972struct dentry *d_alloc_name(struct dentry *parent, const char *name)
973{
974 struct qstr q;
975
976 q.name = name;
977 q.len = strlen(name);
978 q.hash = full_name_hash(q.name, q.len);
979 return d_alloc(parent, &q);
980}
981
982
983static void __d_instantiate(struct dentry *dentry, struct inode *inode)
984{
985 if (inode)
986 list_add(&dentry->d_alias, &inode->i_dentry);
987 dentry->d_inode = inode;
988 fsnotify_d_instantiate(dentry, inode);
989}
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006void d_instantiate(struct dentry *entry, struct inode * inode)
1007{
1008 BUG_ON(!list_empty(&entry->d_alias));
1009 spin_lock(&dcache_lock);
1010 __d_instantiate(entry, inode);
1011 spin_unlock(&dcache_lock);
1012 security_d_instantiate(entry, inode);
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031static struct dentry *__d_instantiate_unique(struct dentry *entry,
1032 struct inode *inode)
1033{
1034 struct dentry *alias;
1035 int len = entry->d_name.len;
1036 const char *name = entry->d_name.name;
1037 unsigned int hash = entry->d_name.hash;
1038
1039 if (!inode) {
1040 __d_instantiate(entry, NULL);
1041 return NULL;
1042 }
1043
1044 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
1045 struct qstr *qstr = &alias->d_name;
1046
1047 if (qstr->hash != hash)
1048 continue;
1049 if (alias->d_parent != entry->d_parent)
1050 continue;
1051 if (qstr->len != len)
1052 continue;
1053 if (memcmp(qstr->name, name, len))
1054 continue;
1055 dget_locked(alias);
1056 return alias;
1057 }
1058
1059 __d_instantiate(entry, inode);
1060 return NULL;
1061}
1062
1063struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1064{
1065 struct dentry *result;
1066
1067 BUG_ON(!list_empty(&entry->d_alias));
1068
1069 spin_lock(&dcache_lock);
1070 result = __d_instantiate_unique(entry, inode);
1071 spin_unlock(&dcache_lock);
1072
1073 if (!result) {
1074 security_d_instantiate(entry, inode);
1075 return NULL;
1076 }
1077
1078 BUG_ON(!d_unhashed(result));
1079 iput(inode);
1080 return result;
1081}
1082
1083EXPORT_SYMBOL(d_instantiate_unique);
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094struct dentry * d_alloc_root(struct inode * root_inode)
1095{
1096 struct dentry *res = NULL;
1097
1098 if (root_inode) {
1099 static const struct qstr name = { .name = "/", .len = 1 };
1100
1101 res = d_alloc(NULL, &name);
1102 if (res) {
1103 res->d_sb = root_inode->i_sb;
1104 res->d_parent = res;
1105 d_instantiate(res, root_inode);
1106 }
1107 }
1108 return res;
1109}
1110
1111static inline struct hlist_head *d_hash(struct dentry *parent,
1112 unsigned long hash)
1113{
1114 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
1115 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
1116 return dentry_hashtable + (hash & D_HASHMASK);
1117}
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137struct dentry *d_obtain_alias(struct inode *inode)
1138{
1139 static const struct qstr anonstring = { .name = "" };
1140 struct dentry *tmp;
1141 struct dentry *res;
1142
1143 if (!inode)
1144 return ERR_PTR(-ESTALE);
1145 if (IS_ERR(inode))
1146 return ERR_CAST(inode);
1147
1148 res = d_find_alias(inode);
1149 if (res)
1150 goto out_iput;
1151
1152 tmp = d_alloc(NULL, &anonstring);
1153 if (!tmp) {
1154 res = ERR_PTR(-ENOMEM);
1155 goto out_iput;
1156 }
1157 tmp->d_parent = tmp;
1158
1159 spin_lock(&dcache_lock);
1160 res = __d_find_alias(inode, 0);
1161 if (res) {
1162 spin_unlock(&dcache_lock);
1163 dput(tmp);
1164 goto out_iput;
1165 }
1166
1167
1168 spin_lock(&tmp->d_lock);
1169 tmp->d_sb = inode->i_sb;
1170 tmp->d_inode = inode;
1171 tmp->d_flags |= DCACHE_DISCONNECTED;
1172 tmp->d_flags &= ~DCACHE_UNHASHED;
1173 list_add(&tmp->d_alias, &inode->i_dentry);
1174 hlist_add_head(&tmp->d_hash, &inode->i_sb->s_anon);
1175 spin_unlock(&tmp->d_lock);
1176
1177 spin_unlock(&dcache_lock);
1178 return tmp;
1179
1180 out_iput:
1181 iput(inode);
1182 return res;
1183}
1184EXPORT_SYMBOL(d_obtain_alias);
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1203{
1204 struct dentry *new = NULL;
1205
1206 if (inode && S_ISDIR(inode->i_mode)) {
1207 spin_lock(&dcache_lock);
1208 new = __d_find_alias(inode, 1);
1209 if (new) {
1210 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1211 spin_unlock(&dcache_lock);
1212 security_d_instantiate(new, inode);
1213 d_rehash(dentry);
1214 d_move(new, dentry);
1215 iput(inode);
1216 } else {
1217
1218 __d_instantiate(dentry, inode);
1219 spin_unlock(&dcache_lock);
1220 security_d_instantiate(dentry, inode);
1221 d_rehash(dentry);
1222 }
1223 } else
1224 d_add(dentry, inode);
1225 return new;
1226}
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1245 struct qstr *name)
1246{
1247 int error;
1248 struct dentry *found;
1249 struct dentry *new;
1250
1251
1252
1253
1254
1255 found = d_hash_and_lookup(dentry->d_parent, name);
1256 if (!found) {
1257 new = d_alloc(dentry->d_parent, name);
1258 if (!new) {
1259 error = -ENOMEM;
1260 goto err_out;
1261 }
1262
1263 found = d_splice_alias(inode, new);
1264 if (found) {
1265 dput(new);
1266 return found;
1267 }
1268 return new;
1269 }
1270
1271
1272
1273
1274
1275
1276
1277 if (found->d_inode) {
1278 if (unlikely(found->d_inode != inode)) {
1279
1280 BUG_ON(!is_bad_inode(inode));
1281 BUG_ON(!is_bad_inode(found->d_inode));
1282 }
1283 iput(inode);
1284 return found;
1285 }
1286
1287
1288
1289
1290
1291 spin_lock(&dcache_lock);
1292 if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
1293 __d_instantiate(found, inode);
1294 spin_unlock(&dcache_lock);
1295 security_d_instantiate(found, inode);
1296 return found;
1297 }
1298
1299
1300
1301
1302
1303 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
1304 dget_locked(new);
1305 spin_unlock(&dcache_lock);
1306 security_d_instantiate(found, inode);
1307 d_move(new, found);
1308 iput(inode);
1309 dput(found);
1310 return new;
1311
1312err_out:
1313 iput(inode);
1314 return ERR_PTR(error);
1315}
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
1347{
1348 struct dentry * dentry = NULL;
1349 unsigned long seq;
1350
1351 do {
1352 seq = read_seqbegin(&rename_lock);
1353 dentry = __d_lookup(parent, name);
1354 if (dentry)
1355 break;
1356 } while (read_seqretry(&rename_lock, seq));
1357 return dentry;
1358}
1359
1360struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
1361{
1362 unsigned int len = name->len;
1363 unsigned int hash = name->hash;
1364 const unsigned char *str = name->name;
1365 struct hlist_head *head = d_hash(parent,hash);
1366 struct dentry *found = NULL;
1367 struct hlist_node *node;
1368 struct dentry *dentry;
1369
1370 rcu_read_lock();
1371
1372 hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
1373 struct qstr *qstr;
1374
1375 if (dentry->d_name.hash != hash)
1376 continue;
1377 if (dentry->d_parent != parent)
1378 continue;
1379
1380 spin_lock(&dentry->d_lock);
1381
1382
1383
1384
1385
1386
1387 if (dentry->d_parent != parent)
1388 goto next;
1389
1390
1391 if (d_unhashed(dentry))
1392 goto next;
1393
1394
1395
1396
1397
1398 qstr = &dentry->d_name;
1399 if (parent->d_op && parent->d_op->d_compare) {
1400 if (parent->d_op->d_compare(parent, qstr, name))
1401 goto next;
1402 } else {
1403 if (qstr->len != len)
1404 goto next;
1405 if (memcmp(qstr->name, str, len))
1406 goto next;
1407 }
1408
1409 atomic_inc(&dentry->d_count);
1410 found = dentry;
1411 spin_unlock(&dentry->d_lock);
1412 break;
1413next:
1414 spin_unlock(&dentry->d_lock);
1415 }
1416 rcu_read_unlock();
1417
1418 return found;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
1429{
1430 struct dentry *dentry = NULL;
1431
1432
1433
1434
1435
1436
1437 name->hash = full_name_hash(name->name, name->len);
1438 if (dir->d_op && dir->d_op->d_hash) {
1439 if (dir->d_op->d_hash(dir, name) < 0)
1440 goto out;
1441 }
1442 dentry = d_lookup(dir, name);
1443out:
1444 return dentry;
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457int d_validate(struct dentry *dentry, struct dentry *dparent)
1458{
1459 struct hlist_head *base;
1460 struct hlist_node *lhp;
1461
1462
1463 if (!kmem_ptr_validate(dentry_cache, dentry))
1464 goto out;
1465
1466 if (dentry->d_parent != dparent)
1467 goto out;
1468
1469 spin_lock(&dcache_lock);
1470 base = d_hash(dparent, dentry->d_name.hash);
1471 hlist_for_each(lhp,base) {
1472
1473
1474
1475 if (dentry == hlist_entry(lhp, struct dentry, d_hash)) {
1476 __dget_locked(dentry);
1477 spin_unlock(&dcache_lock);
1478 return 1;
1479 }
1480 }
1481 spin_unlock(&dcache_lock);
1482out:
1483 return 0;
1484}
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507void d_delete(struct dentry * dentry)
1508{
1509 int isdir = 0;
1510
1511
1512
1513 spin_lock(&dcache_lock);
1514 spin_lock(&dentry->d_lock);
1515 isdir = S_ISDIR(dentry->d_inode->i_mode);
1516 if (atomic_read(&dentry->d_count) == 1) {
1517 dentry_iput(dentry);
1518 fsnotify_nameremove(dentry, isdir);
1519 return;
1520 }
1521
1522 if (!d_unhashed(dentry))
1523 __d_drop(dentry);
1524
1525 spin_unlock(&dentry->d_lock);
1526 spin_unlock(&dcache_lock);
1527
1528 fsnotify_nameremove(dentry, isdir);
1529}
1530
1531static void __d_rehash(struct dentry * entry, struct hlist_head *list)
1532{
1533
1534 entry->d_flags &= ~DCACHE_UNHASHED;
1535 hlist_add_head_rcu(&entry->d_hash, list);
1536}
1537
1538static void _d_rehash(struct dentry * entry)
1539{
1540 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550void d_rehash(struct dentry * entry)
1551{
1552 spin_lock(&dcache_lock);
1553 spin_lock(&entry->d_lock);
1554 _d_rehash(entry);
1555 spin_unlock(&entry->d_lock);
1556 spin_unlock(&dcache_lock);
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570static void switch_names(struct dentry *dentry, struct dentry *target)
1571{
1572 if (dname_external(target)) {
1573 if (dname_external(dentry)) {
1574
1575
1576
1577 swap(target->d_name.name, dentry->d_name.name);
1578 } else {
1579
1580
1581
1582
1583 memcpy(target->d_iname, dentry->d_name.name,
1584 dentry->d_name.len + 1);
1585 dentry->d_name.name = target->d_name.name;
1586 target->d_name.name = target->d_iname;
1587 }
1588 } else {
1589 if (dname_external(dentry)) {
1590
1591
1592
1593
1594 memcpy(dentry->d_iname, target->d_name.name,
1595 target->d_name.len + 1);
1596 target->d_name.name = dentry->d_name.name;
1597 dentry->d_name.name = dentry->d_iname;
1598 } else {
1599
1600
1601
1602 memcpy(dentry->d_iname, target->d_name.name,
1603 target->d_name.len + 1);
1604 dentry->d_name.len = target->d_name.len;
1605 return;
1606 }
1607 }
1608 swap(dentry->d_name.len, target->d_name.len);
1609}
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631static void d_move_locked(struct dentry * dentry, struct dentry * target)
1632{
1633 struct hlist_head *list;
1634
1635 if (!dentry->d_inode)
1636 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
1637
1638 write_seqlock(&rename_lock);
1639
1640
1641
1642 if (target < dentry) {
1643 spin_lock(&target->d_lock);
1644 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1645 } else {
1646 spin_lock(&dentry->d_lock);
1647 spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED);
1648 }
1649
1650
1651 if (d_unhashed(dentry))
1652 goto already_unhashed;
1653
1654 hlist_del_rcu(&dentry->d_hash);
1655
1656already_unhashed:
1657 list = d_hash(target->d_parent, target->d_name.hash);
1658 __d_rehash(dentry, list);
1659
1660
1661 __d_drop(target);
1662
1663 list_del(&dentry->d_u.d_child);
1664 list_del(&target->d_u.d_child);
1665
1666
1667 switch_names(dentry, target);
1668 swap(dentry->d_name.hash, target->d_name.hash);
1669
1670
1671 if (IS_ROOT(dentry)) {
1672 dentry->d_parent = target->d_parent;
1673 target->d_parent = target;
1674 INIT_LIST_HEAD(&target->d_u.d_child);
1675 } else {
1676 swap(dentry->d_parent, target->d_parent);
1677
1678
1679 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
1680 }
1681
1682 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
1683 spin_unlock(&target->d_lock);
1684 fsnotify_d_move(dentry);
1685 spin_unlock(&dentry->d_lock);
1686 write_sequnlock(&rename_lock);
1687}
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698void d_move(struct dentry * dentry, struct dentry * target)
1699{
1700 spin_lock(&dcache_lock);
1701 d_move_locked(dentry, target);
1702 spin_unlock(&dcache_lock);
1703}
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
1714{
1715 struct dentry *p;
1716
1717 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
1718 if (p->d_parent == p1)
1719 return p;
1720 }
1721 return NULL;
1722}
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias)
1734 __releases(dcache_lock)
1735{
1736 struct mutex *m1 = NULL, *m2 = NULL;
1737 struct dentry *ret;
1738
1739
1740 if (alias->d_parent == dentry->d_parent)
1741 goto out_unalias;
1742
1743
1744 ret = ERR_PTR(-ELOOP);
1745 if (d_ancestor(alias, dentry))
1746 goto out_err;
1747
1748
1749 ret = ERR_PTR(-EBUSY);
1750 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
1751 goto out_err;
1752 m1 = &dentry->d_sb->s_vfs_rename_mutex;
1753 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
1754 goto out_err;
1755 m2 = &alias->d_parent->d_inode->i_mutex;
1756out_unalias:
1757 d_move_locked(alias, dentry);
1758 ret = alias;
1759out_err:
1760 spin_unlock(&dcache_lock);
1761 if (m2)
1762 mutex_unlock(m2);
1763 if (m1)
1764 mutex_unlock(m1);
1765 return ret;
1766}
1767
1768
1769
1770
1771
1772static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
1773{
1774 struct dentry *dparent, *aparent;
1775
1776 switch_names(dentry, anon);
1777 swap(dentry->d_name.hash, anon->d_name.hash);
1778
1779 dparent = dentry->d_parent;
1780 aparent = anon->d_parent;
1781
1782 dentry->d_parent = (aparent == anon) ? dentry : aparent;
1783 list_del(&dentry->d_u.d_child);
1784 if (!IS_ROOT(dentry))
1785 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
1786 else
1787 INIT_LIST_HEAD(&dentry->d_u.d_child);
1788
1789 anon->d_parent = (dparent == dentry) ? anon : dparent;
1790 list_del(&anon->d_u.d_child);
1791 if (!IS_ROOT(anon))
1792 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs);
1793 else
1794 INIT_LIST_HEAD(&anon->d_u.d_child);
1795
1796 anon->d_flags &= ~DCACHE_DISCONNECTED;
1797}
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
1808{
1809 struct dentry *actual;
1810
1811 BUG_ON(!d_unhashed(dentry));
1812
1813 spin_lock(&dcache_lock);
1814
1815 if (!inode) {
1816 actual = dentry;
1817 __d_instantiate(dentry, NULL);
1818 goto found_lock;
1819 }
1820
1821 if (S_ISDIR(inode->i_mode)) {
1822 struct dentry *alias;
1823
1824
1825 alias = __d_find_alias(inode, 0);
1826 if (alias) {
1827 actual = alias;
1828
1829
1830 if (IS_ROOT(alias)) {
1831 spin_lock(&alias->d_lock);
1832 __d_materialise_dentry(dentry, alias);
1833 __d_drop(alias);
1834 goto found;
1835 }
1836
1837 actual = __d_unalias(dentry, alias);
1838 if (IS_ERR(actual))
1839 dput(alias);
1840 goto out_nolock;
1841 }
1842 }
1843
1844
1845 actual = __d_instantiate_unique(dentry, inode);
1846 if (!actual)
1847 actual = dentry;
1848 else if (unlikely(!d_unhashed(actual)))
1849 goto shouldnt_be_hashed;
1850
1851found_lock:
1852 spin_lock(&actual->d_lock);
1853found:
1854 _d_rehash(actual);
1855 spin_unlock(&actual->d_lock);
1856 spin_unlock(&dcache_lock);
1857out_nolock:
1858 if (actual == dentry) {
1859 security_d_instantiate(dentry, inode);
1860 return NULL;
1861 }
1862
1863 iput(inode);
1864 return actual;
1865
1866shouldnt_be_hashed:
1867 spin_unlock(&dcache_lock);
1868 BUG();
1869}
1870
1871static int prepend(char **buffer, int *buflen, const char *str, int namelen)
1872{
1873 *buflen -= namelen;
1874 if (*buflen < 0)
1875 return -ENAMETOOLONG;
1876 *buffer -= namelen;
1877 memcpy(*buffer, str, namelen);
1878 return 0;
1879}
1880
1881static int prepend_name(char **buffer, int *buflen, struct qstr *name)
1882{
1883 return prepend(buffer, buflen, name->name, name->len);
1884}
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904char *__d_path(const struct path *path, struct path *root,
1905 char *buffer, int buflen)
1906{
1907 struct dentry *dentry = path->dentry;
1908 struct vfsmount *vfsmnt = path->mnt;
1909 char *end = buffer + buflen;
1910 char *retval;
1911
1912 spin_lock(&vfsmount_lock);
1913 prepend(&end, &buflen, "\0", 1);
1914 if (d_unlinked(dentry) &&
1915 (prepend(&end, &buflen, " (deleted)", 10) != 0))
1916 goto Elong;
1917
1918 if (buflen < 1)
1919 goto Elong;
1920
1921 retval = end-1;
1922 *retval = '/';
1923
1924 for (;;) {
1925 struct dentry * parent;
1926
1927 if (dentry == root->dentry && vfsmnt == root->mnt)
1928 break;
1929 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
1930
1931 if (vfsmnt->mnt_parent == vfsmnt) {
1932 goto global_root;
1933 }
1934 dentry = vfsmnt->mnt_mountpoint;
1935 vfsmnt = vfsmnt->mnt_parent;
1936 continue;
1937 }
1938 parent = dentry->d_parent;
1939 prefetch(parent);
1940 if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
1941 (prepend(&end, &buflen, "/", 1) != 0))
1942 goto Elong;
1943 retval = end;
1944 dentry = parent;
1945 }
1946
1947out:
1948 spin_unlock(&vfsmount_lock);
1949 return retval;
1950
1951global_root:
1952 retval += 1;
1953 if (prepend_name(&retval, &buflen, &dentry->d_name) != 0)
1954 goto Elong;
1955 root->mnt = vfsmnt;
1956 root->dentry = dentry;
1957 goto out;
1958
1959Elong:
1960 retval = ERR_PTR(-ENAMETOOLONG);
1961 goto out;
1962}
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980char *d_path(const struct path *path, char *buf, int buflen)
1981{
1982 char *res;
1983 struct path root;
1984 struct path tmp;
1985
1986
1987
1988
1989
1990
1991
1992
1993 if (path->dentry->d_op && path->dentry->d_op->d_dname)
1994 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
1995
1996 read_lock(¤t->fs->lock);
1997 root = current->fs->root;
1998 path_get(&root);
1999 read_unlock(¤t->fs->lock);
2000 spin_lock(&dcache_lock);
2001 tmp = root;
2002 res = __d_path(path, &tmp, buf, buflen);
2003 spin_unlock(&dcache_lock);
2004 path_put(&root);
2005 return res;
2006}
2007
2008
2009
2010
2011char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2012 const char *fmt, ...)
2013{
2014 va_list args;
2015 char temp[64];
2016 int sz;
2017
2018 va_start(args, fmt);
2019 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
2020 va_end(args);
2021
2022 if (sz > sizeof(temp) || sz > buflen)
2023 return ERR_PTR(-ENAMETOOLONG);
2024
2025 buffer += buflen - sz;
2026 return memcpy(buffer, temp, sz);
2027}
2028
2029
2030
2031
2032char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2033{
2034 char *end = buf + buflen;
2035 char *retval;
2036
2037 spin_lock(&dcache_lock);
2038 prepend(&end, &buflen, "\0", 1);
2039 if (d_unlinked(dentry) &&
2040 (prepend(&end, &buflen, "//deleted", 9) != 0))
2041 goto Elong;
2042 if (buflen < 1)
2043 goto Elong;
2044
2045 retval = end-1;
2046 *retval = '/';
2047
2048 while (!IS_ROOT(dentry)) {
2049 struct dentry *parent = dentry->d_parent;
2050
2051 prefetch(parent);
2052 if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
2053 (prepend(&end, &buflen, "/", 1) != 0))
2054 goto Elong;
2055
2056 retval = end;
2057 dentry = parent;
2058 }
2059 spin_unlock(&dcache_lock);
2060 return retval;
2061Elong:
2062 spin_unlock(&dcache_lock);
2063 return ERR_PTR(-ENAMETOOLONG);
2064}
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2085{
2086 int error;
2087 struct path pwd, root;
2088 char *page = (char *) __get_free_page(GFP_USER);
2089
2090 if (!page)
2091 return -ENOMEM;
2092
2093 read_lock(¤t->fs->lock);
2094 pwd = current->fs->pwd;
2095 path_get(&pwd);
2096 root = current->fs->root;
2097 path_get(&root);
2098 read_unlock(¤t->fs->lock);
2099
2100 error = -ENOENT;
2101 spin_lock(&dcache_lock);
2102 if (!d_unlinked(pwd.dentry)) {
2103 unsigned long len;
2104 struct path tmp = root;
2105 char * cwd;
2106
2107 cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE);
2108 spin_unlock(&dcache_lock);
2109
2110 error = PTR_ERR(cwd);
2111 if (IS_ERR(cwd))
2112 goto out;
2113
2114 error = -ERANGE;
2115 len = PAGE_SIZE + page - cwd;
2116 if (len <= size) {
2117 error = len;
2118 if (copy_to_user(buf, cwd, len))
2119 error = -EFAULT;
2120 }
2121 } else
2122 spin_unlock(&dcache_lock);
2123
2124out:
2125 path_put(&pwd);
2126 path_put(&root);
2127 free_page((unsigned long) page);
2128 return error;
2129}
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2148{
2149 int result;
2150 unsigned long seq;
2151
2152 if (new_dentry == old_dentry)
2153 return 1;
2154
2155
2156
2157
2158
2159 rcu_read_lock();
2160 do {
2161
2162 seq = read_seqbegin(&rename_lock);
2163 if (d_ancestor(old_dentry, new_dentry))
2164 result = 1;
2165 else
2166 result = 0;
2167 } while (read_seqretry(&rename_lock, seq));
2168 rcu_read_unlock();
2169
2170 return result;
2171}
2172
2173void d_genocide(struct dentry *root)
2174{
2175 struct dentry *this_parent = root;
2176 struct list_head *next;
2177
2178 spin_lock(&dcache_lock);
2179repeat:
2180 next = this_parent->d_subdirs.next;
2181resume:
2182 while (next != &this_parent->d_subdirs) {
2183 struct list_head *tmp = next;
2184 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
2185 next = tmp->next;
2186 if (d_unhashed(dentry)||!dentry->d_inode)
2187 continue;
2188 if (!list_empty(&dentry->d_subdirs)) {
2189 this_parent = dentry;
2190 goto repeat;
2191 }
2192 atomic_dec(&dentry->d_count);
2193 }
2194 if (this_parent != root) {
2195 next = this_parent->d_u.d_child.next;
2196 atomic_dec(&this_parent->d_count);
2197 this_parent = this_parent->d_parent;
2198 goto resume;
2199 }
2200 spin_unlock(&dcache_lock);
2201}
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217ino_t find_inode_number(struct dentry *dir, struct qstr *name)
2218{
2219 struct dentry * dentry;
2220 ino_t ino = 0;
2221
2222 dentry = d_hash_and_lookup(dir, name);
2223 if (dentry) {
2224 if (dentry->d_inode)
2225 ino = dentry->d_inode->i_ino;
2226 dput(dentry);
2227 }
2228 return ino;
2229}
2230
2231static __initdata unsigned long dhash_entries;
2232static int __init set_dhash_entries(char *str)
2233{
2234 if (!str)
2235 return 0;
2236 dhash_entries = simple_strtoul(str, &str, 0);
2237 return 1;
2238}
2239__setup("dhash_entries=", set_dhash_entries);
2240
2241static void __init dcache_init_early(void)
2242{
2243 int loop;
2244
2245
2246
2247
2248 if (hashdist)
2249 return;
2250
2251 dentry_hashtable =
2252 alloc_large_system_hash("Dentry cache",
2253 sizeof(struct hlist_head),
2254 dhash_entries,
2255 13,
2256 HASH_EARLY,
2257 &d_hash_shift,
2258 &d_hash_mask,
2259 0);
2260
2261 for (loop = 0; loop < (1 << d_hash_shift); loop++)
2262 INIT_HLIST_HEAD(&dentry_hashtable[loop]);
2263}
2264
2265static void __init dcache_init(void)
2266{
2267 int loop;
2268
2269
2270
2271
2272
2273
2274 dentry_cache = KMEM_CACHE(dentry,
2275 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
2276
2277 register_shrinker(&dcache_shrinker);
2278
2279
2280 if (!hashdist)
2281 return;
2282
2283 dentry_hashtable =
2284 alloc_large_system_hash("Dentry cache",
2285 sizeof(struct hlist_head),
2286 dhash_entries,
2287 13,
2288 0,
2289 &d_hash_shift,
2290 &d_hash_mask,
2291 0);
2292
2293 for (loop = 0; loop < (1 << d_hash_shift); loop++)
2294 INIT_HLIST_HEAD(&dentry_hashtable[loop]);
2295}
2296
2297
2298struct kmem_cache *names_cachep __read_mostly;
2299
2300EXPORT_SYMBOL(d_genocide);
2301
2302void __init vfs_caches_init_early(void)
2303{
2304 dcache_init_early();
2305 inode_init_early();
2306}
2307
2308void __init vfs_caches_init(unsigned long mempages)
2309{
2310 unsigned long reserve;
2311
2312
2313
2314
2315 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
2316 mempages -= reserve;
2317
2318 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
2319 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2320
2321 dcache_init();
2322 inode_init();
2323 files_init(mempages);
2324 mnt_init();
2325 bdev_cache_init();
2326 chrdev_init();
2327}
2328
2329EXPORT_SYMBOL(d_alloc);
2330EXPORT_SYMBOL(d_alloc_root);
2331EXPORT_SYMBOL(d_delete);
2332EXPORT_SYMBOL(d_find_alias);
2333EXPORT_SYMBOL(d_instantiate);
2334EXPORT_SYMBOL(d_invalidate);
2335EXPORT_SYMBOL(d_lookup);
2336EXPORT_SYMBOL(d_move);
2337EXPORT_SYMBOL_GPL(d_materialise_unique);
2338EXPORT_SYMBOL(d_path);
2339EXPORT_SYMBOL(d_prune_aliases);
2340EXPORT_SYMBOL(d_rehash);
2341EXPORT_SYMBOL(d_splice_alias);
2342EXPORT_SYMBOL(d_add_ci);
2343EXPORT_SYMBOL(d_validate);
2344EXPORT_SYMBOL(dget_locked);
2345EXPORT_SYMBOL(dput);
2346EXPORT_SYMBOL(find_inode_number);
2347EXPORT_SYMBOL(have_submounts);
2348EXPORT_SYMBOL(names_cachep);
2349EXPORT_SYMBOL(shrink_dcache_parent);
2350EXPORT_SYMBOL(shrink_dcache_sb);
2351