1
2
3
4
5#include <linux/fs.h>
6#include <linux/mm.h>
7#include <linux/dcache.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/writeback.h>
11#include <linux/module.h>
12#include <linux/backing-dev.h>
13#include <linux/wait.h>
14#include <linux/rwsem.h>
15#include <linux/hash.h>
16#include <linux/swap.h>
17#include <linux/security.h>
18#include <linux/pagemap.h>
19#include <linux/cdev.h>
20#include <linux/bootmem.h>
21#include <linux/fsnotify.h>
22#include <linux/mount.h>
23#include <linux/async.h>
24#include <linux/posix_acl.h>
25#include <linux/prefetch.h>
26#include <linux/ima.h>
27#include <linux/cred.h>
28#include <linux/buffer_head.h>
29#include "internal.h"
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62static unsigned int i_hash_mask __read_mostly;
63static unsigned int i_hash_shift __read_mostly;
64static struct hlist_head *inode_hashtable __read_mostly;
65static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
66
67__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
68
69
70
71
72
73const struct address_space_operations empty_aops = {
74};
75EXPORT_SYMBOL(empty_aops);
76
77
78
79
80struct inodes_stat_t inodes_stat;
81
82static DEFINE_PER_CPU(unsigned int, nr_inodes);
83static DEFINE_PER_CPU(unsigned int, nr_unused);
84
85static struct kmem_cache *inode_cachep __read_mostly;
86
87static int get_nr_inodes(void)
88{
89 int i;
90 int sum = 0;
91 for_each_possible_cpu(i)
92 sum += per_cpu(nr_inodes, i);
93 return sum < 0 ? 0 : sum;
94}
95
96static inline int get_nr_inodes_unused(void)
97{
98 int i;
99 int sum = 0;
100 for_each_possible_cpu(i)
101 sum += per_cpu(nr_unused, i);
102 return sum < 0 ? 0 : sum;
103}
104
105int get_nr_dirty_inodes(void)
106{
107
108 int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
109 return nr_dirty > 0 ? nr_dirty : 0;
110}
111
112
113
114
115#ifdef CONFIG_SYSCTL
116int proc_nr_inodes(ctl_table *table, int write,
117 void __user *buffer, size_t *lenp, loff_t *ppos)
118{
119 inodes_stat.nr_inodes = get_nr_inodes();
120 inodes_stat.nr_unused = get_nr_inodes_unused();
121 return proc_dointvec(table, write, buffer, lenp, ppos);
122}
123#endif
124
125
126
127
128
129
130
131
132
133int inode_init_always(struct super_block *sb, struct inode *inode)
134{
135 static const struct inode_operations empty_iops;
136 static const struct file_operations empty_fops;
137 struct address_space *const mapping = &inode->i_data;
138
139 inode->i_sb = sb;
140 inode->i_blkbits = sb->s_blocksize_bits;
141 inode->i_flags = 0;
142 atomic_set(&inode->i_count, 1);
143 inode->i_op = &empty_iops;
144 inode->i_fop = &empty_fops;
145 inode->__i_nlink = 1;
146 inode->i_opflags = 0;
147 inode->i_uid = 0;
148 inode->i_gid = 0;
149 atomic_set(&inode->i_writecount, 0);
150 inode->i_size = 0;
151 inode->i_blocks = 0;
152 inode->i_bytes = 0;
153 inode->i_generation = 0;
154#ifdef CONFIG_QUOTA
155 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
156#endif
157 inode->i_pipe = NULL;
158 inode->i_bdev = NULL;
159 inode->i_cdev = NULL;
160 inode->i_rdev = 0;
161 inode->dirtied_when = 0;
162
163 if (security_inode_alloc(inode))
164 goto out;
165 spin_lock_init(&inode->i_lock);
166 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
167
168 mutex_init(&inode->i_mutex);
169 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
170
171 atomic_set(&inode->i_dio_count, 0);
172
173 mapping->a_ops = &empty_aops;
174 mapping->host = inode;
175 mapping->flags = 0;
176 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
177 mapping->assoc_mapping = NULL;
178 mapping->backing_dev_info = &default_backing_dev_info;
179 mapping->writeback_index = 0;
180
181
182
183
184
185
186 if (sb->s_bdev) {
187 struct backing_dev_info *bdi;
188
189 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
190 mapping->backing_dev_info = bdi;
191 }
192 inode->i_private = NULL;
193 inode->i_mapping = mapping;
194#ifdef CONFIG_FS_POSIX_ACL
195 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
196#endif
197
198#ifdef CONFIG_FSNOTIFY
199 inode->i_fsnotify_mask = 0;
200#endif
201
202 this_cpu_inc(nr_inodes);
203
204 return 0;
205out:
206 return -ENOMEM;
207}
208EXPORT_SYMBOL(inode_init_always);
209
210static struct inode *alloc_inode(struct super_block *sb)
211{
212 struct inode *inode;
213
214 if (sb->s_op->alloc_inode)
215 inode = sb->s_op->alloc_inode(sb);
216 else
217 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
218
219 if (!inode)
220 return NULL;
221
222 if (unlikely(inode_init_always(sb, inode))) {
223 if (inode->i_sb->s_op->destroy_inode)
224 inode->i_sb->s_op->destroy_inode(inode);
225 else
226 kmem_cache_free(inode_cachep, inode);
227 return NULL;
228 }
229
230 return inode;
231}
232
233void free_inode_nonrcu(struct inode *inode)
234{
235 kmem_cache_free(inode_cachep, inode);
236}
237EXPORT_SYMBOL(free_inode_nonrcu);
238
239void __destroy_inode(struct inode *inode)
240{
241 BUG_ON(inode_has_buffers(inode));
242 security_inode_free(inode);
243 fsnotify_inode_delete(inode);
244#ifdef CONFIG_FS_POSIX_ACL
245 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
246 posix_acl_release(inode->i_acl);
247 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
248 posix_acl_release(inode->i_default_acl);
249#endif
250 this_cpu_dec(nr_inodes);
251}
252EXPORT_SYMBOL(__destroy_inode);
253
254static void i_callback(struct rcu_head *head)
255{
256 struct inode *inode = container_of(head, struct inode, i_rcu);
257 INIT_LIST_HEAD(&inode->i_dentry);
258 kmem_cache_free(inode_cachep, inode);
259}
260
261static void destroy_inode(struct inode *inode)
262{
263 BUG_ON(!list_empty(&inode->i_lru));
264 __destroy_inode(inode);
265 if (inode->i_sb->s_op->destroy_inode)
266 inode->i_sb->s_op->destroy_inode(inode);
267 else
268 call_rcu(&inode->i_rcu, i_callback);
269}
270
271void address_space_init_once(struct address_space *mapping)
272{
273 memset(mapping, 0, sizeof(*mapping));
274 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
275 spin_lock_init(&mapping->tree_lock);
276 mutex_init(&mapping->i_mmap_mutex);
277 INIT_LIST_HEAD(&mapping->private_list);
278 spin_lock_init(&mapping->private_lock);
279 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
280 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
281}
282EXPORT_SYMBOL(address_space_init_once);
283
284
285
286
287
288
289void inode_init_once(struct inode *inode)
290{
291 memset(inode, 0, sizeof(*inode));
292 INIT_HLIST_NODE(&inode->i_hash);
293 INIT_LIST_HEAD(&inode->i_dentry);
294 INIT_LIST_HEAD(&inode->i_devices);
295 INIT_LIST_HEAD(&inode->i_wb_list);
296 INIT_LIST_HEAD(&inode->i_lru);
297 address_space_init_once(&inode->i_data);
298 i_size_ordered_init(inode);
299#ifdef CONFIG_FSNOTIFY
300 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
301#endif
302}
303EXPORT_SYMBOL(inode_init_once);
304
305static void init_once(void *foo)
306{
307 struct inode *inode = (struct inode *) foo;
308
309 inode_init_once(inode);
310}
311
312
313
314
315void __iget(struct inode *inode)
316{
317 atomic_inc(&inode->i_count);
318}
319
320
321
322
323void ihold(struct inode *inode)
324{
325 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
326}
327EXPORT_SYMBOL(ihold);
328
329static void inode_lru_list_add(struct inode *inode)
330{
331 spin_lock(&inode->i_sb->s_inode_lru_lock);
332 if (list_empty(&inode->i_lru)) {
333 list_add(&inode->i_lru, &inode->i_sb->s_inode_lru);
334 inode->i_sb->s_nr_inodes_unused++;
335 this_cpu_inc(nr_unused);
336 }
337 spin_unlock(&inode->i_sb->s_inode_lru_lock);
338}
339
340static void inode_lru_list_del(struct inode *inode)
341{
342 spin_lock(&inode->i_sb->s_inode_lru_lock);
343 if (!list_empty(&inode->i_lru)) {
344 list_del_init(&inode->i_lru);
345 inode->i_sb->s_nr_inodes_unused--;
346 this_cpu_dec(nr_unused);
347 }
348 spin_unlock(&inode->i_sb->s_inode_lru_lock);
349}
350
351
352
353
354
355void inode_sb_list_add(struct inode *inode)
356{
357 spin_lock(&inode_sb_list_lock);
358 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
359 spin_unlock(&inode_sb_list_lock);
360}
361EXPORT_SYMBOL_GPL(inode_sb_list_add);
362
363static inline void inode_sb_list_del(struct inode *inode)
364{
365 if (!list_empty(&inode->i_sb_list)) {
366 spin_lock(&inode_sb_list_lock);
367 list_del_init(&inode->i_sb_list);
368 spin_unlock(&inode_sb_list_lock);
369 }
370}
371
372static unsigned long hash(struct super_block *sb, unsigned long hashval)
373{
374 unsigned long tmp;
375
376 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
377 L1_CACHE_BYTES;
378 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
379 return tmp & i_hash_mask;
380}
381
382
383
384
385
386
387
388
389
390void __insert_inode_hash(struct inode *inode, unsigned long hashval)
391{
392 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
393
394 spin_lock(&inode_hash_lock);
395 spin_lock(&inode->i_lock);
396 hlist_add_head(&inode->i_hash, b);
397 spin_unlock(&inode->i_lock);
398 spin_unlock(&inode_hash_lock);
399}
400EXPORT_SYMBOL(__insert_inode_hash);
401
402
403
404
405
406
407
408void __remove_inode_hash(struct inode *inode)
409{
410 spin_lock(&inode_hash_lock);
411 spin_lock(&inode->i_lock);
412 hlist_del_init(&inode->i_hash);
413 spin_unlock(&inode->i_lock);
414 spin_unlock(&inode_hash_lock);
415}
416EXPORT_SYMBOL(__remove_inode_hash);
417
418void end_writeback(struct inode *inode)
419{
420 might_sleep();
421
422
423
424
425
426 spin_lock_irq(&inode->i_data.tree_lock);
427 BUG_ON(inode->i_data.nrpages);
428 spin_unlock_irq(&inode->i_data.tree_lock);
429 BUG_ON(!list_empty(&inode->i_data.private_list));
430 BUG_ON(!(inode->i_state & I_FREEING));
431 BUG_ON(inode->i_state & I_CLEAR);
432 inode_sync_wait(inode);
433
434 inode->i_state = I_FREEING | I_CLEAR;
435}
436EXPORT_SYMBOL(end_writeback);
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451static void evict(struct inode *inode)
452{
453 const struct super_operations *op = inode->i_sb->s_op;
454
455 BUG_ON(!(inode->i_state & I_FREEING));
456 BUG_ON(!list_empty(&inode->i_lru));
457
458 if (!list_empty(&inode->i_wb_list))
459 inode_wb_list_del(inode);
460
461 inode_sb_list_del(inode);
462
463 if (op->evict_inode) {
464 op->evict_inode(inode);
465 } else {
466 if (inode->i_data.nrpages)
467 truncate_inode_pages(&inode->i_data, 0);
468 end_writeback(inode);
469 }
470 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
471 bd_forget(inode);
472 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
473 cd_forget(inode);
474
475 remove_inode_hash(inode);
476
477 spin_lock(&inode->i_lock);
478 wake_up_bit(&inode->i_state, __I_NEW);
479 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
480 spin_unlock(&inode->i_lock);
481
482 destroy_inode(inode);
483}
484
485
486
487
488
489
490
491
492static void dispose_list(struct list_head *head)
493{
494 while (!list_empty(head)) {
495 struct inode *inode;
496
497 inode = list_first_entry(head, struct inode, i_lru);
498 list_del_init(&inode->i_lru);
499
500 evict(inode);
501 }
502}
503
504
505
506
507
508
509
510
511
512
513void evict_inodes(struct super_block *sb)
514{
515 struct inode *inode, *next;
516 LIST_HEAD(dispose);
517
518 spin_lock(&inode_sb_list_lock);
519 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
520 if (atomic_read(&inode->i_count))
521 continue;
522
523 spin_lock(&inode->i_lock);
524 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
525 spin_unlock(&inode->i_lock);
526 continue;
527 }
528
529 inode->i_state |= I_FREEING;
530 inode_lru_list_del(inode);
531 spin_unlock(&inode->i_lock);
532 list_add(&inode->i_lru, &dispose);
533 }
534 spin_unlock(&inode_sb_list_lock);
535
536 dispose_list(&dispose);
537}
538
539
540
541
542
543
544
545
546
547
548
549int invalidate_inodes(struct super_block *sb, bool kill_dirty)
550{
551 int busy = 0;
552 struct inode *inode, *next;
553 LIST_HEAD(dispose);
554
555 spin_lock(&inode_sb_list_lock);
556 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
557 spin_lock(&inode->i_lock);
558 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
559 spin_unlock(&inode->i_lock);
560 continue;
561 }
562 if (inode->i_state & I_DIRTY && !kill_dirty) {
563 spin_unlock(&inode->i_lock);
564 busy = 1;
565 continue;
566 }
567 if (atomic_read(&inode->i_count)) {
568 spin_unlock(&inode->i_lock);
569 busy = 1;
570 continue;
571 }
572
573 inode->i_state |= I_FREEING;
574 inode_lru_list_del(inode);
575 spin_unlock(&inode->i_lock);
576 list_add(&inode->i_lru, &dispose);
577 }
578 spin_unlock(&inode_sb_list_lock);
579
580 dispose_list(&dispose);
581
582 return busy;
583}
584
585static int can_unuse(struct inode *inode)
586{
587 if (inode->i_state & ~I_REFERENCED)
588 return 0;
589 if (inode_has_buffers(inode))
590 return 0;
591 if (atomic_read(&inode->i_count))
592 return 0;
593 if (inode->i_data.nrpages)
594 return 0;
595 return 1;
596}
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616void prune_icache_sb(struct super_block *sb, int nr_to_scan)
617{
618 LIST_HEAD(freeable);
619 int nr_scanned;
620 unsigned long reap = 0;
621
622 spin_lock(&sb->s_inode_lru_lock);
623 for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) {
624 struct inode *inode;
625
626 if (list_empty(&sb->s_inode_lru))
627 break;
628
629 inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru);
630
631
632
633
634
635
636 if (!spin_trylock(&inode->i_lock)) {
637 list_move_tail(&inode->i_lru, &sb->s_inode_lru);
638 continue;
639 }
640
641
642
643
644
645 if (atomic_read(&inode->i_count) ||
646 (inode->i_state & ~I_REFERENCED)) {
647 list_del_init(&inode->i_lru);
648 spin_unlock(&inode->i_lock);
649 sb->s_nr_inodes_unused--;
650 this_cpu_dec(nr_unused);
651 continue;
652 }
653
654
655 if (inode->i_state & I_REFERENCED) {
656 inode->i_state &= ~I_REFERENCED;
657 list_move(&inode->i_lru, &sb->s_inode_lru);
658 spin_unlock(&inode->i_lock);
659 continue;
660 }
661 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
662 __iget(inode);
663 spin_unlock(&inode->i_lock);
664 spin_unlock(&sb->s_inode_lru_lock);
665 if (remove_inode_buffers(inode))
666 reap += invalidate_mapping_pages(&inode->i_data,
667 0, -1);
668 iput(inode);
669 spin_lock(&sb->s_inode_lru_lock);
670
671 if (inode != list_entry(sb->s_inode_lru.next,
672 struct inode, i_lru))
673 continue;
674
675 if (!spin_trylock(&inode->i_lock))
676 continue;
677 if (!can_unuse(inode)) {
678 spin_unlock(&inode->i_lock);
679 continue;
680 }
681 }
682 WARN_ON(inode->i_state & I_NEW);
683 inode->i_state |= I_FREEING;
684 spin_unlock(&inode->i_lock);
685
686 list_move(&inode->i_lru, &freeable);
687 sb->s_nr_inodes_unused--;
688 this_cpu_dec(nr_unused);
689 }
690 if (current_is_kswapd())
691 __count_vm_events(KSWAPD_INODESTEAL, reap);
692 else
693 __count_vm_events(PGINODESTEAL, reap);
694 spin_unlock(&sb->s_inode_lru_lock);
695
696 dispose_list(&freeable);
697}
698
699static void __wait_on_freeing_inode(struct inode *inode);
700
701
702
703static struct inode *find_inode(struct super_block *sb,
704 struct hlist_head *head,
705 int (*test)(struct inode *, void *),
706 void *data)
707{
708 struct hlist_node *node;
709 struct inode *inode = NULL;
710
711repeat:
712 hlist_for_each_entry(inode, node, head, i_hash) {
713 spin_lock(&inode->i_lock);
714 if (inode->i_sb != sb) {
715 spin_unlock(&inode->i_lock);
716 continue;
717 }
718 if (!test(inode, data)) {
719 spin_unlock(&inode->i_lock);
720 continue;
721 }
722 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
723 __wait_on_freeing_inode(inode);
724 goto repeat;
725 }
726 __iget(inode);
727 spin_unlock(&inode->i_lock);
728 return inode;
729 }
730 return NULL;
731}
732
733
734
735
736
737static struct inode *find_inode_fast(struct super_block *sb,
738 struct hlist_head *head, unsigned long ino)
739{
740 struct hlist_node *node;
741 struct inode *inode = NULL;
742
743repeat:
744 hlist_for_each_entry(inode, node, head, i_hash) {
745 spin_lock(&inode->i_lock);
746 if (inode->i_ino != ino) {
747 spin_unlock(&inode->i_lock);
748 continue;
749 }
750 if (inode->i_sb != sb) {
751 spin_unlock(&inode->i_lock);
752 continue;
753 }
754 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
755 __wait_on_freeing_inode(inode);
756 goto repeat;
757 }
758 __iget(inode);
759 spin_unlock(&inode->i_lock);
760 return inode;
761 }
762 return NULL;
763}
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780#define LAST_INO_BATCH 1024
781static DEFINE_PER_CPU(unsigned int, last_ino);
782
783unsigned int get_next_ino(void)
784{
785 unsigned int *p = &get_cpu_var(last_ino);
786 unsigned int res = *p;
787
788#ifdef CONFIG_SMP
789 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
790 static atomic_t shared_last_ino;
791 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
792
793 res = next - LAST_INO_BATCH;
794 }
795#endif
796
797 *p = ++res;
798 put_cpu_var(last_ino);
799 return res;
800}
801EXPORT_SYMBOL(get_next_ino);
802
803
804
805
806
807
808
809
810
811
812
813struct inode *new_inode_pseudo(struct super_block *sb)
814{
815 struct inode *inode = alloc_inode(sb);
816
817 if (inode) {
818 spin_lock(&inode->i_lock);
819 inode->i_state = 0;
820 spin_unlock(&inode->i_lock);
821 INIT_LIST_HEAD(&inode->i_sb_list);
822 }
823 return inode;
824}
825
826
827
828
829
830
831
832
833
834
835
836
837
838struct inode *new_inode(struct super_block *sb)
839{
840 struct inode *inode;
841
842 spin_lock_prefetch(&inode_sb_list_lock);
843
844 inode = new_inode_pseudo(sb);
845 if (inode)
846 inode_sb_list_add(inode);
847 return inode;
848}
849EXPORT_SYMBOL(new_inode);
850
851#ifdef CONFIG_DEBUG_LOCK_ALLOC
852void lockdep_annotate_inode_mutex_key(struct inode *inode)
853{
854 if (S_ISDIR(inode->i_mode)) {
855 struct file_system_type *type = inode->i_sb->s_type;
856
857
858 if (!lockdep_match_class(&inode->i_mutex,
859 &type->i_mutex_key)) {
860
861
862
863 mutex_destroy(&inode->i_mutex);
864 mutex_init(&inode->i_mutex);
865 lockdep_set_class(&inode->i_mutex,
866 &type->i_mutex_dir_key);
867 }
868 }
869}
870EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
871#endif
872
873
874
875
876
877
878
879
880void unlock_new_inode(struct inode *inode)
881{
882 lockdep_annotate_inode_mutex_key(inode);
883 spin_lock(&inode->i_lock);
884 WARN_ON(!(inode->i_state & I_NEW));
885 inode->i_state &= ~I_NEW;
886 wake_up_bit(&inode->i_state, __I_NEW);
887 spin_unlock(&inode->i_lock);
888}
889EXPORT_SYMBOL(unlock_new_inode);
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
912 int (*test)(struct inode *, void *),
913 int (*set)(struct inode *, void *), void *data)
914{
915 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
916 struct inode *inode;
917
918 spin_lock(&inode_hash_lock);
919 inode = find_inode(sb, head, test, data);
920 spin_unlock(&inode_hash_lock);
921
922 if (inode) {
923 wait_on_inode(inode);
924 return inode;
925 }
926
927 inode = alloc_inode(sb);
928 if (inode) {
929 struct inode *old;
930
931 spin_lock(&inode_hash_lock);
932
933 old = find_inode(sb, head, test, data);
934 if (!old) {
935 if (set(inode, data))
936 goto set_failed;
937
938 spin_lock(&inode->i_lock);
939 inode->i_state = I_NEW;
940 hlist_add_head(&inode->i_hash, head);
941 spin_unlock(&inode->i_lock);
942 inode_sb_list_add(inode);
943 spin_unlock(&inode_hash_lock);
944
945
946
947
948 return inode;
949 }
950
951
952
953
954
955
956 spin_unlock(&inode_hash_lock);
957 destroy_inode(inode);
958 inode = old;
959 wait_on_inode(inode);
960 }
961 return inode;
962
963set_failed:
964 spin_unlock(&inode_hash_lock);
965 destroy_inode(inode);
966 return NULL;
967}
968EXPORT_SYMBOL(iget5_locked);
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983struct inode *iget_locked(struct super_block *sb, unsigned long ino)
984{
985 struct hlist_head *head = inode_hashtable + hash(sb, ino);
986 struct inode *inode;
987
988 spin_lock(&inode_hash_lock);
989 inode = find_inode_fast(sb, head, ino);
990 spin_unlock(&inode_hash_lock);
991 if (inode) {
992 wait_on_inode(inode);
993 return inode;
994 }
995
996 inode = alloc_inode(sb);
997 if (inode) {
998 struct inode *old;
999
1000 spin_lock(&inode_hash_lock);
1001
1002 old = find_inode_fast(sb, head, ino);
1003 if (!old) {
1004 inode->i_ino = ino;
1005 spin_lock(&inode->i_lock);
1006 inode->i_state = I_NEW;
1007 hlist_add_head(&inode->i_hash, head);
1008 spin_unlock(&inode->i_lock);
1009 inode_sb_list_add(inode);
1010 spin_unlock(&inode_hash_lock);
1011
1012
1013
1014
1015 return inode;
1016 }
1017
1018
1019
1020
1021
1022
1023 spin_unlock(&inode_hash_lock);
1024 destroy_inode(inode);
1025 inode = old;
1026 wait_on_inode(inode);
1027 }
1028 return inode;
1029}
1030EXPORT_SYMBOL(iget_locked);
1031
1032
1033
1034
1035
1036
1037
1038
1039static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1040{
1041 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1042 struct hlist_node *node;
1043 struct inode *inode;
1044
1045 spin_lock(&inode_hash_lock);
1046 hlist_for_each_entry(inode, node, b, i_hash) {
1047 if (inode->i_ino == ino && inode->i_sb == sb) {
1048 spin_unlock(&inode_hash_lock);
1049 return 0;
1050 }
1051 }
1052 spin_unlock(&inode_hash_lock);
1053
1054 return 1;
1055}
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071ino_t iunique(struct super_block *sb, ino_t max_reserved)
1072{
1073
1074
1075
1076
1077
1078 static DEFINE_SPINLOCK(iunique_lock);
1079 static unsigned int counter;
1080 ino_t res;
1081
1082 spin_lock(&iunique_lock);
1083 do {
1084 if (counter <= max_reserved)
1085 counter = max_reserved + 1;
1086 res = counter++;
1087 } while (!test_inode_iunique(sb, res));
1088 spin_unlock(&iunique_lock);
1089
1090 return res;
1091}
1092EXPORT_SYMBOL(iunique);
1093
1094struct inode *igrab(struct inode *inode)
1095{
1096 spin_lock(&inode->i_lock);
1097 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1098 __iget(inode);
1099 spin_unlock(&inode->i_lock);
1100 } else {
1101 spin_unlock(&inode->i_lock);
1102
1103
1104
1105
1106
1107 inode = NULL;
1108 }
1109 return inode;
1110}
1111EXPORT_SYMBOL(igrab);
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1130 int (*test)(struct inode *, void *), void *data)
1131{
1132 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1133 struct inode *inode;
1134
1135 spin_lock(&inode_hash_lock);
1136 inode = find_inode(sb, head, test, data);
1137 spin_unlock(&inode_hash_lock);
1138
1139 return inode;
1140}
1141EXPORT_SYMBOL(ilookup5_nowait);
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1161 int (*test)(struct inode *, void *), void *data)
1162{
1163 struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
1164
1165 if (inode)
1166 wait_on_inode(inode);
1167 return inode;
1168}
1169EXPORT_SYMBOL(ilookup5);
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179struct inode *ilookup(struct super_block *sb, unsigned long ino)
1180{
1181 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1182 struct inode *inode;
1183
1184 spin_lock(&inode_hash_lock);
1185 inode = find_inode_fast(sb, head, ino);
1186 spin_unlock(&inode_hash_lock);
1187
1188 if (inode)
1189 wait_on_inode(inode);
1190 return inode;
1191}
1192EXPORT_SYMBOL(ilookup);
1193
1194int insert_inode_locked(struct inode *inode)
1195{
1196 struct super_block *sb = inode->i_sb;
1197 ino_t ino = inode->i_ino;
1198 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1199
1200 while (1) {
1201 struct hlist_node *node;
1202 struct inode *old = NULL;
1203 spin_lock(&inode_hash_lock);
1204 hlist_for_each_entry(old, node, head, i_hash) {
1205 if (old->i_ino != ino)
1206 continue;
1207 if (old->i_sb != sb)
1208 continue;
1209 spin_lock(&old->i_lock);
1210 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1211 spin_unlock(&old->i_lock);
1212 continue;
1213 }
1214 break;
1215 }
1216 if (likely(!node)) {
1217 spin_lock(&inode->i_lock);
1218 inode->i_state |= I_NEW;
1219 hlist_add_head(&inode->i_hash, head);
1220 spin_unlock(&inode->i_lock);
1221 spin_unlock(&inode_hash_lock);
1222 return 0;
1223 }
1224 __iget(old);
1225 spin_unlock(&old->i_lock);
1226 spin_unlock(&inode_hash_lock);
1227 wait_on_inode(old);
1228 if (unlikely(!inode_unhashed(old))) {
1229 iput(old);
1230 return -EBUSY;
1231 }
1232 iput(old);
1233 }
1234}
1235EXPORT_SYMBOL(insert_inode_locked);
1236
1237int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1238 int (*test)(struct inode *, void *), void *data)
1239{
1240 struct super_block *sb = inode->i_sb;
1241 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1242
1243 while (1) {
1244 struct hlist_node *node;
1245 struct inode *old = NULL;
1246
1247 spin_lock(&inode_hash_lock);
1248 hlist_for_each_entry(old, node, head, i_hash) {
1249 if (old->i_sb != sb)
1250 continue;
1251 if (!test(old, data))
1252 continue;
1253 spin_lock(&old->i_lock);
1254 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1255 spin_unlock(&old->i_lock);
1256 continue;
1257 }
1258 break;
1259 }
1260 if (likely(!node)) {
1261 spin_lock(&inode->i_lock);
1262 inode->i_state |= I_NEW;
1263 hlist_add_head(&inode->i_hash, head);
1264 spin_unlock(&inode->i_lock);
1265 spin_unlock(&inode_hash_lock);
1266 return 0;
1267 }
1268 __iget(old);
1269 spin_unlock(&old->i_lock);
1270 spin_unlock(&inode_hash_lock);
1271 wait_on_inode(old);
1272 if (unlikely(!inode_unhashed(old))) {
1273 iput(old);
1274 return -EBUSY;
1275 }
1276 iput(old);
1277 }
1278}
1279EXPORT_SYMBOL(insert_inode_locked4);
1280
1281
1282int generic_delete_inode(struct inode *inode)
1283{
1284 return 1;
1285}
1286EXPORT_SYMBOL(generic_delete_inode);
1287
1288
1289
1290
1291
1292
1293int generic_drop_inode(struct inode *inode)
1294{
1295 return !inode->i_nlink || inode_unhashed(inode);
1296}
1297EXPORT_SYMBOL_GPL(generic_drop_inode);
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309static void iput_final(struct inode *inode)
1310{
1311 struct super_block *sb = inode->i_sb;
1312 const struct super_operations *op = inode->i_sb->s_op;
1313 int drop;
1314
1315 WARN_ON(inode->i_state & I_NEW);
1316
1317 if (op->drop_inode)
1318 drop = op->drop_inode(inode);
1319 else
1320 drop = generic_drop_inode(inode);
1321
1322 if (!drop && (sb->s_flags & MS_ACTIVE)) {
1323 inode->i_state |= I_REFERENCED;
1324 if (!(inode->i_state & (I_DIRTY|I_SYNC)))
1325 inode_lru_list_add(inode);
1326 spin_unlock(&inode->i_lock);
1327 return;
1328 }
1329
1330 if (!drop) {
1331 inode->i_state |= I_WILL_FREE;
1332 spin_unlock(&inode->i_lock);
1333 write_inode_now(inode, 1);
1334 spin_lock(&inode->i_lock);
1335 WARN_ON(inode->i_state & I_NEW);
1336 inode->i_state &= ~I_WILL_FREE;
1337 }
1338
1339 inode->i_state |= I_FREEING;
1340 if (!list_empty(&inode->i_lru))
1341 inode_lru_list_del(inode);
1342 spin_unlock(&inode->i_lock);
1343
1344 evict(inode);
1345}
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356void iput(struct inode *inode)
1357{
1358 if (inode) {
1359 BUG_ON(inode->i_state & I_CLEAR);
1360
1361 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
1362 iput_final(inode);
1363 }
1364}
1365EXPORT_SYMBOL(iput);
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378sector_t bmap(struct inode *inode, sector_t block)
1379{
1380 sector_t res = 0;
1381 if (inode->i_mapping->a_ops->bmap)
1382 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1383 return res;
1384}
1385EXPORT_SYMBOL(bmap);
1386
1387
1388
1389
1390
1391
1392static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1393 struct timespec now)
1394{
1395
1396 if (!(mnt->mnt_flags & MNT_RELATIME))
1397 return 1;
1398
1399
1400
1401 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1402 return 1;
1403
1404
1405
1406 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1407 return 1;
1408
1409
1410
1411
1412
1413 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1414 return 1;
1415
1416
1417
1418 return 0;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
1431{
1432 struct inode *inode = dentry->d_inode;
1433 struct timespec now;
1434
1435 if (inode->i_flags & S_NOATIME)
1436 return;
1437 if (IS_NOATIME(inode))
1438 return;
1439 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1440 return;
1441
1442 if (mnt->mnt_flags & MNT_NOATIME)
1443 return;
1444 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1445 return;
1446
1447 now = current_fs_time(inode->i_sb);
1448
1449 if (!relatime_need_update(mnt, inode, now))
1450 return;
1451
1452 if (timespec_equal(&inode->i_atime, &now))
1453 return;
1454
1455 if (mnt_want_write(mnt))
1456 return;
1457
1458 inode->i_atime = now;
1459 mark_inode_dirty_sync(inode);
1460 mnt_drop_write(mnt);
1461}
1462EXPORT_SYMBOL(touch_atime);
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476void file_update_time(struct file *file)
1477{
1478 struct inode *inode = file->f_path.dentry->d_inode;
1479 struct timespec now;
1480 enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
1481
1482
1483 if (IS_NOCMTIME(inode))
1484 return;
1485
1486 now = current_fs_time(inode->i_sb);
1487 if (!timespec_equal(&inode->i_mtime, &now))
1488 sync_it = S_MTIME;
1489
1490 if (!timespec_equal(&inode->i_ctime, &now))
1491 sync_it |= S_CTIME;
1492
1493 if (IS_I_VERSION(inode))
1494 sync_it |= S_VERSION;
1495
1496 if (!sync_it)
1497 return;
1498
1499
1500 if (mnt_want_write_file(file))
1501 return;
1502
1503
1504 if (sync_it & S_VERSION)
1505 inode_inc_iversion(inode);
1506 if (sync_it & S_CTIME)
1507 inode->i_ctime = now;
1508 if (sync_it & S_MTIME)
1509 inode->i_mtime = now;
1510 mark_inode_dirty_sync(inode);
1511 mnt_drop_write(file->f_path.mnt);
1512}
1513EXPORT_SYMBOL(file_update_time);
1514
1515int inode_needs_sync(struct inode *inode)
1516{
1517 if (IS_SYNC(inode))
1518 return 1;
1519 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1520 return 1;
1521 return 0;
1522}
1523EXPORT_SYMBOL(inode_needs_sync);
1524
1525int inode_wait(void *word)
1526{
1527 schedule();
1528 return 0;
1529}
1530EXPORT_SYMBOL(inode_wait);
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543static void __wait_on_freeing_inode(struct inode *inode)
1544{
1545 wait_queue_head_t *wq;
1546 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1547 wq = bit_waitqueue(&inode->i_state, __I_NEW);
1548 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1549 spin_unlock(&inode->i_lock);
1550 spin_unlock(&inode_hash_lock);
1551 schedule();
1552 finish_wait(wq, &wait.wait);
1553 spin_lock(&inode_hash_lock);
1554}
1555
1556static __initdata unsigned long ihash_entries;
1557static int __init set_ihash_entries(char *str)
1558{
1559 if (!str)
1560 return 0;
1561 ihash_entries = simple_strtoul(str, &str, 0);
1562 return 1;
1563}
1564__setup("ihash_entries=", set_ihash_entries);
1565
1566
1567
1568
1569void __init inode_init_early(void)
1570{
1571 int loop;
1572
1573
1574
1575
1576 if (hashdist)
1577 return;
1578
1579 inode_hashtable =
1580 alloc_large_system_hash("Inode-cache",
1581 sizeof(struct hlist_head),
1582 ihash_entries,
1583 14,
1584 HASH_EARLY,
1585 &i_hash_shift,
1586 &i_hash_mask,
1587 0);
1588
1589 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1590 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1591}
1592
1593void __init inode_init(void)
1594{
1595 int loop;
1596
1597
1598 inode_cachep = kmem_cache_create("inode_cache",
1599 sizeof(struct inode),
1600 0,
1601 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1602 SLAB_MEM_SPREAD),
1603 init_once);
1604
1605
1606 if (!hashdist)
1607 return;
1608
1609 inode_hashtable =
1610 alloc_large_system_hash("Inode-cache",
1611 sizeof(struct hlist_head),
1612 ihash_entries,
1613 14,
1614 0,
1615 &i_hash_shift,
1616 &i_hash_mask,
1617 0);
1618
1619 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1620 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1621}
1622
1623void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1624{
1625 inode->i_mode = mode;
1626 if (S_ISCHR(mode)) {
1627 inode->i_fop = &def_chr_fops;
1628 inode->i_rdev = rdev;
1629 } else if (S_ISBLK(mode)) {
1630 inode->i_fop = &def_blk_fops;
1631 inode->i_rdev = rdev;
1632 } else if (S_ISFIFO(mode))
1633 inode->i_fop = &def_fifo_fops;
1634 else if (S_ISSOCK(mode))
1635 inode->i_fop = &bad_sock_fops;
1636 else
1637 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1638 " inode %s:%lu\n", mode, inode->i_sb->s_id,
1639 inode->i_ino);
1640}
1641EXPORT_SYMBOL(init_special_inode);
1642
1643
1644
1645
1646
1647
1648
1649void inode_init_owner(struct inode *inode, const struct inode *dir,
1650 mode_t mode)
1651{
1652 inode->i_uid = current_fsuid();
1653 if (dir && dir->i_mode & S_ISGID) {
1654 inode->i_gid = dir->i_gid;
1655 if (S_ISDIR(mode))
1656 mode |= S_ISGID;
1657 } else
1658 inode->i_gid = current_fsgid();
1659 inode->i_mode = mode;
1660}
1661EXPORT_SYMBOL(inode_init_owner);
1662
1663
1664
1665
1666
1667
1668
1669
1670bool inode_owner_or_capable(const struct inode *inode)
1671{
1672 struct user_namespace *ns = inode_userns(inode);
1673
1674 if (current_user_ns() == ns && current_fsuid() == inode->i_uid)
1675 return true;
1676 if (ns_capable(ns, CAP_FOWNER))
1677 return true;
1678 return false;
1679}
1680EXPORT_SYMBOL(inode_owner_or_capable);
1681