1
2
3
4
5#include <linux/export.h>
6#include <linux/fs.h>
7#include <linux/mm.h>
8#include <linux/backing-dev.h>
9#include <linux/hash.h>
10#include <linux/swap.h>
11#include <linux/security.h>
12#include <linux/cdev.h>
13#include <linux/bootmem.h>
14#include <linux/fsnotify.h>
15#include <linux/mount.h>
16#include <linux/posix_acl.h>
17#include <linux/prefetch.h>
18#include <linux/buffer_head.h>
19#include <linux/ratelimit.h>
20#include <linux/list_lru.h>
21#include <trace/events/writeback.h>
22#include "internal.h"
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55static unsigned int i_hash_mask __read_mostly;
56static unsigned int i_hash_shift __read_mostly;
57static struct hlist_head *inode_hashtable __read_mostly;
58static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
59
60__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
61
62
63
64
65
66const struct address_space_operations empty_aops = {
67};
68EXPORT_SYMBOL(empty_aops);
69
70
71
72
73struct inodes_stat_t inodes_stat;
74
75static DEFINE_PER_CPU(unsigned long, nr_inodes);
76static DEFINE_PER_CPU(unsigned long, nr_unused);
77
78static struct kmem_cache *inode_cachep __read_mostly;
79
80static long get_nr_inodes(void)
81{
82 int i;
83 long sum = 0;
84 for_each_possible_cpu(i)
85 sum += per_cpu(nr_inodes, i);
86 return sum < 0 ? 0 : sum;
87}
88
89static inline long get_nr_inodes_unused(void)
90{
91 int i;
92 long sum = 0;
93 for_each_possible_cpu(i)
94 sum += per_cpu(nr_unused, i);
95 return sum < 0 ? 0 : sum;
96}
97
98long get_nr_dirty_inodes(void)
99{
100
101 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
102 return nr_dirty > 0 ? nr_dirty : 0;
103}
104
105
106
107
108#ifdef CONFIG_SYSCTL
109int proc_nr_inodes(struct ctl_table *table, int write,
110 void __user *buffer, size_t *lenp, loff_t *ppos)
111{
112 inodes_stat.nr_inodes = get_nr_inodes();
113 inodes_stat.nr_unused = get_nr_inodes_unused();
114 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
115}
116#endif
117
118static int no_open(struct inode *inode, struct file *file)
119{
120 return -ENXIO;
121}
122
123
124
125
126
127
128
129
130
131int inode_init_always(struct super_block *sb, struct inode *inode)
132{
133 static const struct inode_operations empty_iops;
134 static const struct file_operations no_open_fops = {.open = no_open};
135 struct address_space *const mapping = &inode->i_data;
136
137 inode->i_sb = sb;
138 inode->i_blkbits = sb->s_blocksize_bits;
139 inode->i_flags = 0;
140 atomic_set(&inode->i_count, 1);
141 inode->i_op = &empty_iops;
142 inode->i_fop = &no_open_fops;
143 inode->__i_nlink = 1;
144 inode->i_opflags = 0;
145 i_uid_write(inode, 0);
146 i_gid_write(inode, 0);
147 atomic_set(&inode->i_writecount, 0);
148 inode->i_size = 0;
149 inode->i_blocks = 0;
150 inode->i_bytes = 0;
151 inode->i_generation = 0;
152 inode->i_pipe = NULL;
153 inode->i_bdev = NULL;
154 inode->i_cdev = NULL;
155 inode->i_rdev = 0;
156 inode->dirtied_when = 0;
157
158 if (security_inode_alloc(inode))
159 goto out;
160 spin_lock_init(&inode->i_lock);
161 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
162
163 mutex_init(&inode->i_mutex);
164 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
165
166 atomic_set(&inode->i_dio_count, 0);
167
168 mapping->a_ops = &empty_aops;
169 mapping->host = inode;
170 mapping->flags = 0;
171 atomic_set(&mapping->i_mmap_writable, 0);
172 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
173 mapping->private_data = NULL;
174 mapping->writeback_index = 0;
175 inode->i_private = NULL;
176 inode->i_mapping = mapping;
177 INIT_HLIST_HEAD(&inode->i_dentry);
178#ifdef CONFIG_FS_POSIX_ACL
179 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
180#endif
181
182#ifdef CONFIG_FSNOTIFY
183 inode->i_fsnotify_mask = 0;
184#endif
185 inode->i_flctx = NULL;
186 this_cpu_inc(nr_inodes);
187
188 return 0;
189out:
190 return -ENOMEM;
191}
192EXPORT_SYMBOL(inode_init_always);
193
194static struct inode *alloc_inode(struct super_block *sb)
195{
196 struct inode *inode;
197
198 if (sb->s_op->alloc_inode)
199 inode = sb->s_op->alloc_inode(sb);
200 else
201 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
202
203 if (!inode)
204 return NULL;
205
206 if (unlikely(inode_init_always(sb, inode))) {
207 if (inode->i_sb->s_op->destroy_inode)
208 inode->i_sb->s_op->destroy_inode(inode);
209 else
210 kmem_cache_free(inode_cachep, inode);
211 return NULL;
212 }
213
214 return inode;
215}
216
217void free_inode_nonrcu(struct inode *inode)
218{
219 kmem_cache_free(inode_cachep, inode);
220}
221EXPORT_SYMBOL(free_inode_nonrcu);
222
223void __destroy_inode(struct inode *inode)
224{
225 BUG_ON(inode_has_buffers(inode));
226 security_inode_free(inode);
227 fsnotify_inode_delete(inode);
228 locks_free_lock_context(inode->i_flctx);
229 if (!inode->i_nlink) {
230 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
231 atomic_long_dec(&inode->i_sb->s_remove_count);
232 }
233
234#ifdef CONFIG_FS_POSIX_ACL
235 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
236 posix_acl_release(inode->i_acl);
237 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
238 posix_acl_release(inode->i_default_acl);
239#endif
240 this_cpu_dec(nr_inodes);
241}
242EXPORT_SYMBOL(__destroy_inode);
243
244static void i_callback(struct rcu_head *head)
245{
246 struct inode *inode = container_of(head, struct inode, i_rcu);
247 kmem_cache_free(inode_cachep, inode);
248}
249
250static void destroy_inode(struct inode *inode)
251{
252 BUG_ON(!list_empty(&inode->i_lru));
253 __destroy_inode(inode);
254 if (inode->i_sb->s_op->destroy_inode)
255 inode->i_sb->s_op->destroy_inode(inode);
256 else
257 call_rcu(&inode->i_rcu, i_callback);
258}
259
260
261
262
263
264
265
266
267
268
269
270
271void drop_nlink(struct inode *inode)
272{
273 WARN_ON(inode->i_nlink == 0);
274 inode->__i_nlink--;
275 if (!inode->i_nlink)
276 atomic_long_inc(&inode->i_sb->s_remove_count);
277}
278EXPORT_SYMBOL(drop_nlink);
279
280
281
282
283
284
285
286
287
288void clear_nlink(struct inode *inode)
289{
290 if (inode->i_nlink) {
291 inode->__i_nlink = 0;
292 atomic_long_inc(&inode->i_sb->s_remove_count);
293 }
294}
295EXPORT_SYMBOL(clear_nlink);
296
297
298
299
300
301
302
303
304
305void set_nlink(struct inode *inode, unsigned int nlink)
306{
307 if (!nlink) {
308 clear_nlink(inode);
309 } else {
310
311 if (inode->i_nlink == 0)
312 atomic_long_dec(&inode->i_sb->s_remove_count);
313
314 inode->__i_nlink = nlink;
315 }
316}
317EXPORT_SYMBOL(set_nlink);
318
319
320
321
322
323
324
325
326
327void inc_nlink(struct inode *inode)
328{
329 if (unlikely(inode->i_nlink == 0)) {
330 WARN_ON(!(inode->i_state & I_LINKABLE));
331 atomic_long_dec(&inode->i_sb->s_remove_count);
332 }
333
334 inode->__i_nlink++;
335}
336EXPORT_SYMBOL(inc_nlink);
337
338void address_space_init_once(struct address_space *mapping)
339{
340 memset(mapping, 0, sizeof(*mapping));
341 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
342 spin_lock_init(&mapping->tree_lock);
343 init_rwsem(&mapping->i_mmap_rwsem);
344 INIT_LIST_HEAD(&mapping->private_list);
345 spin_lock_init(&mapping->private_lock);
346 mapping->i_mmap = RB_ROOT;
347}
348EXPORT_SYMBOL(address_space_init_once);
349
350
351
352
353
354
355void inode_init_once(struct inode *inode)
356{
357 memset(inode, 0, sizeof(*inode));
358 INIT_HLIST_NODE(&inode->i_hash);
359 INIT_LIST_HEAD(&inode->i_devices);
360 INIT_LIST_HEAD(&inode->i_wb_list);
361 INIT_LIST_HEAD(&inode->i_lru);
362 address_space_init_once(&inode->i_data);
363 i_size_ordered_init(inode);
364#ifdef CONFIG_FSNOTIFY
365 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
366#endif
367}
368EXPORT_SYMBOL(inode_init_once);
369
370static void init_once(void *foo)
371{
372 struct inode *inode = (struct inode *) foo;
373
374 inode_init_once(inode);
375}
376
377
378
379
380void __iget(struct inode *inode)
381{
382 atomic_inc(&inode->i_count);
383}
384
385
386
387
388void ihold(struct inode *inode)
389{
390 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
391}
392EXPORT_SYMBOL(ihold);
393
394static void inode_lru_list_add(struct inode *inode)
395{
396 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
397 this_cpu_inc(nr_unused);
398}
399
400
401
402
403
404
405void inode_add_lru(struct inode *inode)
406{
407 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
408 I_FREEING | I_WILL_FREE)) &&
409 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
410 inode_lru_list_add(inode);
411}
412
413
414static void inode_lru_list_del(struct inode *inode)
415{
416
417 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
418 this_cpu_dec(nr_unused);
419}
420
421
422
423
424
425void inode_sb_list_add(struct inode *inode)
426{
427 spin_lock(&inode_sb_list_lock);
428 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
429 spin_unlock(&inode_sb_list_lock);
430}
431EXPORT_SYMBOL_GPL(inode_sb_list_add);
432
433static inline void inode_sb_list_del(struct inode *inode)
434{
435 if (!list_empty(&inode->i_sb_list)) {
436 spin_lock(&inode_sb_list_lock);
437 list_del_init(&inode->i_sb_list);
438 spin_unlock(&inode_sb_list_lock);
439 }
440}
441
442static unsigned long hash(struct super_block *sb, unsigned long hashval)
443{
444 unsigned long tmp;
445
446 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
447 L1_CACHE_BYTES;
448 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
449 return tmp & i_hash_mask;
450}
451
452
453
454
455
456
457
458
459
460void __insert_inode_hash(struct inode *inode, unsigned long hashval)
461{
462 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
463
464 spin_lock(&inode_hash_lock);
465 spin_lock(&inode->i_lock);
466 hlist_add_head(&inode->i_hash, b);
467 spin_unlock(&inode->i_lock);
468 spin_unlock(&inode_hash_lock);
469}
470EXPORT_SYMBOL(__insert_inode_hash);
471
472
473
474
475
476
477
478void __remove_inode_hash(struct inode *inode)
479{
480 spin_lock(&inode_hash_lock);
481 spin_lock(&inode->i_lock);
482 hlist_del_init(&inode->i_hash);
483 spin_unlock(&inode->i_lock);
484 spin_unlock(&inode_hash_lock);
485}
486EXPORT_SYMBOL(__remove_inode_hash);
487
488void clear_inode(struct inode *inode)
489{
490 might_sleep();
491
492
493
494
495
496 spin_lock_irq(&inode->i_data.tree_lock);
497 BUG_ON(inode->i_data.nrpages);
498 BUG_ON(inode->i_data.nrshadows);
499 spin_unlock_irq(&inode->i_data.tree_lock);
500 BUG_ON(!list_empty(&inode->i_data.private_list));
501 BUG_ON(!(inode->i_state & I_FREEING));
502 BUG_ON(inode->i_state & I_CLEAR);
503
504 inode->i_state = I_FREEING | I_CLEAR;
505}
506EXPORT_SYMBOL(clear_inode);
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521static void evict(struct inode *inode)
522{
523 const struct super_operations *op = inode->i_sb->s_op;
524
525 BUG_ON(!(inode->i_state & I_FREEING));
526 BUG_ON(!list_empty(&inode->i_lru));
527
528 if (!list_empty(&inode->i_wb_list))
529 inode_wb_list_del(inode);
530
531 inode_sb_list_del(inode);
532
533
534
535
536
537
538
539 inode_wait_for_writeback(inode);
540
541 if (op->evict_inode) {
542 op->evict_inode(inode);
543 } else {
544 truncate_inode_pages_final(&inode->i_data);
545 clear_inode(inode);
546 }
547 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
548 bd_forget(inode);
549 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
550 cd_forget(inode);
551
552 remove_inode_hash(inode);
553
554 spin_lock(&inode->i_lock);
555 wake_up_bit(&inode->i_state, __I_NEW);
556 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
557 spin_unlock(&inode->i_lock);
558
559 destroy_inode(inode);
560}
561
562
563
564
565
566
567
568
569static void dispose_list(struct list_head *head)
570{
571 while (!list_empty(head)) {
572 struct inode *inode;
573
574 inode = list_first_entry(head, struct inode, i_lru);
575 list_del_init(&inode->i_lru);
576
577 evict(inode);
578 }
579}
580
581
582
583
584
585
586
587
588
589
590void evict_inodes(struct super_block *sb)
591{
592 struct inode *inode, *next;
593 LIST_HEAD(dispose);
594
595 spin_lock(&inode_sb_list_lock);
596 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
597 if (atomic_read(&inode->i_count))
598 continue;
599
600 spin_lock(&inode->i_lock);
601 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
602 spin_unlock(&inode->i_lock);
603 continue;
604 }
605
606 inode->i_state |= I_FREEING;
607 inode_lru_list_del(inode);
608 spin_unlock(&inode->i_lock);
609 list_add(&inode->i_lru, &dispose);
610 }
611 spin_unlock(&inode_sb_list_lock);
612
613 dispose_list(&dispose);
614}
615
616
617
618
619
620
621
622
623
624
625
626int invalidate_inodes(struct super_block *sb, bool kill_dirty)
627{
628 int busy = 0;
629 struct inode *inode, *next;
630 LIST_HEAD(dispose);
631
632 spin_lock(&inode_sb_list_lock);
633 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
634 spin_lock(&inode->i_lock);
635 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
636 spin_unlock(&inode->i_lock);
637 continue;
638 }
639 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
640 spin_unlock(&inode->i_lock);
641 busy = 1;
642 continue;
643 }
644 if (atomic_read(&inode->i_count)) {
645 spin_unlock(&inode->i_lock);
646 busy = 1;
647 continue;
648 }
649
650 inode->i_state |= I_FREEING;
651 inode_lru_list_del(inode);
652 spin_unlock(&inode->i_lock);
653 list_add(&inode->i_lru, &dispose);
654 }
655 spin_unlock(&inode_sb_list_lock);
656
657 dispose_list(&dispose);
658
659 return busy;
660}
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677static enum lru_status inode_lru_isolate(struct list_head *item,
678 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
679{
680 struct list_head *freeable = arg;
681 struct inode *inode = container_of(item, struct inode, i_lru);
682
683
684
685
686
687 if (!spin_trylock(&inode->i_lock))
688 return LRU_SKIP;
689
690
691
692
693
694 if (atomic_read(&inode->i_count) ||
695 (inode->i_state & ~I_REFERENCED)) {
696 list_lru_isolate(lru, &inode->i_lru);
697 spin_unlock(&inode->i_lock);
698 this_cpu_dec(nr_unused);
699 return LRU_REMOVED;
700 }
701
702
703 if (inode->i_state & I_REFERENCED) {
704 inode->i_state &= ~I_REFERENCED;
705 spin_unlock(&inode->i_lock);
706 return LRU_ROTATE;
707 }
708
709 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
710 __iget(inode);
711 spin_unlock(&inode->i_lock);
712 spin_unlock(lru_lock);
713 if (remove_inode_buffers(inode)) {
714 unsigned long reap;
715 reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
716 if (current_is_kswapd())
717 __count_vm_events(KSWAPD_INODESTEAL, reap);
718 else
719 __count_vm_events(PGINODESTEAL, reap);
720 if (current->reclaim_state)
721 current->reclaim_state->reclaimed_slab += reap;
722 }
723 iput(inode);
724 spin_lock(lru_lock);
725 return LRU_RETRY;
726 }
727
728 WARN_ON(inode->i_state & I_NEW);
729 inode->i_state |= I_FREEING;
730 list_lru_isolate_move(lru, &inode->i_lru, freeable);
731 spin_unlock(&inode->i_lock);
732
733 this_cpu_dec(nr_unused);
734 return LRU_REMOVED;
735}
736
737
738
739
740
741
742
743long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
744{
745 LIST_HEAD(freeable);
746 long freed;
747
748 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
749 inode_lru_isolate, &freeable);
750 dispose_list(&freeable);
751 return freed;
752}
753
754static void __wait_on_freeing_inode(struct inode *inode);
755
756
757
758static struct inode *find_inode(struct super_block *sb,
759 struct hlist_head *head,
760 int (*test)(struct inode *, void *),
761 void *data)
762{
763 struct inode *inode = NULL;
764
765repeat:
766 hlist_for_each_entry(inode, head, i_hash) {
767 if (inode->i_sb != sb)
768 continue;
769 if (!test(inode, data))
770 continue;
771 spin_lock(&inode->i_lock);
772 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
773 __wait_on_freeing_inode(inode);
774 goto repeat;
775 }
776 __iget(inode);
777 spin_unlock(&inode->i_lock);
778 return inode;
779 }
780 return NULL;
781}
782
783
784
785
786
787static struct inode *find_inode_fast(struct super_block *sb,
788 struct hlist_head *head, unsigned long ino)
789{
790 struct inode *inode = NULL;
791
792repeat:
793 hlist_for_each_entry(inode, head, i_hash) {
794 if (inode->i_ino != ino)
795 continue;
796 if (inode->i_sb != sb)
797 continue;
798 spin_lock(&inode->i_lock);
799 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
800 __wait_on_freeing_inode(inode);
801 goto repeat;
802 }
803 __iget(inode);
804 spin_unlock(&inode->i_lock);
805 return inode;
806 }
807 return NULL;
808}
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825#define LAST_INO_BATCH 1024
826static DEFINE_PER_CPU(unsigned int, last_ino);
827
828unsigned int get_next_ino(void)
829{
830 unsigned int *p = &get_cpu_var(last_ino);
831 unsigned int res = *p;
832
833#ifdef CONFIG_SMP
834 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
835 static atomic_t shared_last_ino;
836 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
837
838 res = next - LAST_INO_BATCH;
839 }
840#endif
841
842 *p = ++res;
843 put_cpu_var(last_ino);
844 return res;
845}
846EXPORT_SYMBOL(get_next_ino);
847
848
849
850
851
852
853
854
855
856
857
858struct inode *new_inode_pseudo(struct super_block *sb)
859{
860 struct inode *inode = alloc_inode(sb);
861
862 if (inode) {
863 spin_lock(&inode->i_lock);
864 inode->i_state = 0;
865 spin_unlock(&inode->i_lock);
866 INIT_LIST_HEAD(&inode->i_sb_list);
867 }
868 return inode;
869}
870
871
872
873
874
875
876
877
878
879
880
881
882
883struct inode *new_inode(struct super_block *sb)
884{
885 struct inode *inode;
886
887 spin_lock_prefetch(&inode_sb_list_lock);
888
889 inode = new_inode_pseudo(sb);
890 if (inode)
891 inode_sb_list_add(inode);
892 return inode;
893}
894EXPORT_SYMBOL(new_inode);
895
896#ifdef CONFIG_DEBUG_LOCK_ALLOC
897void lockdep_annotate_inode_mutex_key(struct inode *inode)
898{
899 if (S_ISDIR(inode->i_mode)) {
900 struct file_system_type *type = inode->i_sb->s_type;
901
902
903 if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
904
905
906
907 mutex_destroy(&inode->i_mutex);
908 mutex_init(&inode->i_mutex);
909 lockdep_set_class(&inode->i_mutex,
910 &type->i_mutex_dir_key);
911 }
912 }
913}
914EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
915#endif
916
917
918
919
920
921
922
923
924void unlock_new_inode(struct inode *inode)
925{
926 lockdep_annotate_inode_mutex_key(inode);
927 spin_lock(&inode->i_lock);
928 WARN_ON(!(inode->i_state & I_NEW));
929 inode->i_state &= ~I_NEW;
930 smp_mb();
931 wake_up_bit(&inode->i_state, __I_NEW);
932 spin_unlock(&inode->i_lock);
933}
934EXPORT_SYMBOL(unlock_new_inode);
935
936
937
938
939
940
941
942
943
944
945void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
946{
947 if (inode1 > inode2)
948 swap(inode1, inode2);
949
950 if (inode1 && !S_ISDIR(inode1->i_mode))
951 mutex_lock(&inode1->i_mutex);
952 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
953 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_NONDIR2);
954}
955EXPORT_SYMBOL(lock_two_nondirectories);
956
957
958
959
960
961
962void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
963{
964 if (inode1 && !S_ISDIR(inode1->i_mode))
965 mutex_unlock(&inode1->i_mutex);
966 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
967 mutex_unlock(&inode2->i_mutex);
968}
969EXPORT_SYMBOL(unlock_two_nondirectories);
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
992 int (*test)(struct inode *, void *),
993 int (*set)(struct inode *, void *), void *data)
994{
995 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
996 struct inode *inode;
997
998 spin_lock(&inode_hash_lock);
999 inode = find_inode(sb, head, test, data);
1000 spin_unlock(&inode_hash_lock);
1001
1002 if (inode) {
1003 wait_on_inode(inode);
1004 return inode;
1005 }
1006
1007 inode = alloc_inode(sb);
1008 if (inode) {
1009 struct inode *old;
1010
1011 spin_lock(&inode_hash_lock);
1012
1013 old = find_inode(sb, head, test, data);
1014 if (!old) {
1015 if (set(inode, data))
1016 goto set_failed;
1017
1018 spin_lock(&inode->i_lock);
1019 inode->i_state = I_NEW;
1020 hlist_add_head(&inode->i_hash, head);
1021 spin_unlock(&inode->i_lock);
1022 inode_sb_list_add(inode);
1023 spin_unlock(&inode_hash_lock);
1024
1025
1026
1027
1028 return inode;
1029 }
1030
1031
1032
1033
1034
1035
1036 spin_unlock(&inode_hash_lock);
1037 destroy_inode(inode);
1038 inode = old;
1039 wait_on_inode(inode);
1040 }
1041 return inode;
1042
1043set_failed:
1044 spin_unlock(&inode_hash_lock);
1045 destroy_inode(inode);
1046 return NULL;
1047}
1048EXPORT_SYMBOL(iget5_locked);
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1064{
1065 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1066 struct inode *inode;
1067
1068 spin_lock(&inode_hash_lock);
1069 inode = find_inode_fast(sb, head, ino);
1070 spin_unlock(&inode_hash_lock);
1071 if (inode) {
1072 wait_on_inode(inode);
1073 return inode;
1074 }
1075
1076 inode = alloc_inode(sb);
1077 if (inode) {
1078 struct inode *old;
1079
1080 spin_lock(&inode_hash_lock);
1081
1082 old = find_inode_fast(sb, head, ino);
1083 if (!old) {
1084 inode->i_ino = ino;
1085 spin_lock(&inode->i_lock);
1086 inode->i_state = I_NEW;
1087 hlist_add_head(&inode->i_hash, head);
1088 spin_unlock(&inode->i_lock);
1089 inode_sb_list_add(inode);
1090 spin_unlock(&inode_hash_lock);
1091
1092
1093
1094
1095 return inode;
1096 }
1097
1098
1099
1100
1101
1102
1103 spin_unlock(&inode_hash_lock);
1104 destroy_inode(inode);
1105 inode = old;
1106 wait_on_inode(inode);
1107 }
1108 return inode;
1109}
1110EXPORT_SYMBOL(iget_locked);
1111
1112
1113
1114
1115
1116
1117
1118
1119static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1120{
1121 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1122 struct inode *inode;
1123
1124 spin_lock(&inode_hash_lock);
1125 hlist_for_each_entry(inode, b, i_hash) {
1126 if (inode->i_ino == ino && inode->i_sb == sb) {
1127 spin_unlock(&inode_hash_lock);
1128 return 0;
1129 }
1130 }
1131 spin_unlock(&inode_hash_lock);
1132
1133 return 1;
1134}
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150ino_t iunique(struct super_block *sb, ino_t max_reserved)
1151{
1152
1153
1154
1155
1156
1157 static DEFINE_SPINLOCK(iunique_lock);
1158 static unsigned int counter;
1159 ino_t res;
1160
1161 spin_lock(&iunique_lock);
1162 do {
1163 if (counter <= max_reserved)
1164 counter = max_reserved + 1;
1165 res = counter++;
1166 } while (!test_inode_iunique(sb, res));
1167 spin_unlock(&iunique_lock);
1168
1169 return res;
1170}
1171EXPORT_SYMBOL(iunique);
1172
1173struct inode *igrab(struct inode *inode)
1174{
1175 spin_lock(&inode->i_lock);
1176 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1177 __iget(inode);
1178 spin_unlock(&inode->i_lock);
1179 } else {
1180 spin_unlock(&inode->i_lock);
1181
1182
1183
1184
1185
1186 inode = NULL;
1187 }
1188 return inode;
1189}
1190EXPORT_SYMBOL(igrab);
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1209 int (*test)(struct inode *, void *), void *data)
1210{
1211 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1212 struct inode *inode;
1213
1214 spin_lock(&inode_hash_lock);
1215 inode = find_inode(sb, head, test, data);
1216 spin_unlock(&inode_hash_lock);
1217
1218 return inode;
1219}
1220EXPORT_SYMBOL(ilookup5_nowait);
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1240 int (*test)(struct inode *, void *), void *data)
1241{
1242 struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
1243
1244 if (inode)
1245 wait_on_inode(inode);
1246 return inode;
1247}
1248EXPORT_SYMBOL(ilookup5);
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258struct inode *ilookup(struct super_block *sb, unsigned long ino)
1259{
1260 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1261 struct inode *inode;
1262
1263 spin_lock(&inode_hash_lock);
1264 inode = find_inode_fast(sb, head, ino);
1265 spin_unlock(&inode_hash_lock);
1266
1267 if (inode)
1268 wait_on_inode(inode);
1269 return inode;
1270}
1271EXPORT_SYMBOL(ilookup);
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296struct inode *find_inode_nowait(struct super_block *sb,
1297 unsigned long hashval,
1298 int (*match)(struct inode *, unsigned long,
1299 void *),
1300 void *data)
1301{
1302 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1303 struct inode *inode, *ret_inode = NULL;
1304 int mval;
1305
1306 spin_lock(&inode_hash_lock);
1307 hlist_for_each_entry(inode, head, i_hash) {
1308 if (inode->i_sb != sb)
1309 continue;
1310 mval = match(inode, hashval, data);
1311 if (mval == 0)
1312 continue;
1313 if (mval == 1)
1314 ret_inode = inode;
1315 goto out;
1316 }
1317out:
1318 spin_unlock(&inode_hash_lock);
1319 return ret_inode;
1320}
1321EXPORT_SYMBOL(find_inode_nowait);
1322
1323int insert_inode_locked(struct inode *inode)
1324{
1325 struct super_block *sb = inode->i_sb;
1326 ino_t ino = inode->i_ino;
1327 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1328
1329 while (1) {
1330 struct inode *old = NULL;
1331 spin_lock(&inode_hash_lock);
1332 hlist_for_each_entry(old, head, i_hash) {
1333 if (old->i_ino != ino)
1334 continue;
1335 if (old->i_sb != sb)
1336 continue;
1337 spin_lock(&old->i_lock);
1338 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1339 spin_unlock(&old->i_lock);
1340 continue;
1341 }
1342 break;
1343 }
1344 if (likely(!old)) {
1345 spin_lock(&inode->i_lock);
1346 inode->i_state |= I_NEW;
1347 hlist_add_head(&inode->i_hash, head);
1348 spin_unlock(&inode->i_lock);
1349 spin_unlock(&inode_hash_lock);
1350 return 0;
1351 }
1352 __iget(old);
1353 spin_unlock(&old->i_lock);
1354 spin_unlock(&inode_hash_lock);
1355 wait_on_inode(old);
1356 if (unlikely(!inode_unhashed(old))) {
1357 iput(old);
1358 return -EBUSY;
1359 }
1360 iput(old);
1361 }
1362}
1363EXPORT_SYMBOL(insert_inode_locked);
1364
1365int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1366 int (*test)(struct inode *, void *), void *data)
1367{
1368 struct super_block *sb = inode->i_sb;
1369 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1370
1371 while (1) {
1372 struct inode *old = NULL;
1373
1374 spin_lock(&inode_hash_lock);
1375 hlist_for_each_entry(old, head, i_hash) {
1376 if (old->i_sb != sb)
1377 continue;
1378 if (!test(old, data))
1379 continue;
1380 spin_lock(&old->i_lock);
1381 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1382 spin_unlock(&old->i_lock);
1383 continue;
1384 }
1385 break;
1386 }
1387 if (likely(!old)) {
1388 spin_lock(&inode->i_lock);
1389 inode->i_state |= I_NEW;
1390 hlist_add_head(&inode->i_hash, head);
1391 spin_unlock(&inode->i_lock);
1392 spin_unlock(&inode_hash_lock);
1393 return 0;
1394 }
1395 __iget(old);
1396 spin_unlock(&old->i_lock);
1397 spin_unlock(&inode_hash_lock);
1398 wait_on_inode(old);
1399 if (unlikely(!inode_unhashed(old))) {
1400 iput(old);
1401 return -EBUSY;
1402 }
1403 iput(old);
1404 }
1405}
1406EXPORT_SYMBOL(insert_inode_locked4);
1407
1408
1409int generic_delete_inode(struct inode *inode)
1410{
1411 return 1;
1412}
1413EXPORT_SYMBOL(generic_delete_inode);
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425static void iput_final(struct inode *inode)
1426{
1427 struct super_block *sb = inode->i_sb;
1428 const struct super_operations *op = inode->i_sb->s_op;
1429 int drop;
1430
1431 WARN_ON(inode->i_state & I_NEW);
1432
1433 if (op->drop_inode)
1434 drop = op->drop_inode(inode);
1435 else
1436 drop = generic_drop_inode(inode);
1437
1438 if (!drop && (sb->s_flags & MS_ACTIVE)) {
1439 inode->i_state |= I_REFERENCED;
1440 inode_add_lru(inode);
1441 spin_unlock(&inode->i_lock);
1442 return;
1443 }
1444
1445 if (!drop) {
1446 inode->i_state |= I_WILL_FREE;
1447 spin_unlock(&inode->i_lock);
1448 write_inode_now(inode, 1);
1449 spin_lock(&inode->i_lock);
1450 WARN_ON(inode->i_state & I_NEW);
1451 inode->i_state &= ~I_WILL_FREE;
1452 }
1453
1454 inode->i_state |= I_FREEING;
1455 if (!list_empty(&inode->i_lru))
1456 inode_lru_list_del(inode);
1457 spin_unlock(&inode->i_lock);
1458
1459 evict(inode);
1460}
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471void iput(struct inode *inode)
1472{
1473 if (!inode)
1474 return;
1475 BUG_ON(inode->i_state & I_CLEAR);
1476retry:
1477 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1478 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1479 atomic_inc(&inode->i_count);
1480 inode->i_state &= ~I_DIRTY_TIME;
1481 spin_unlock(&inode->i_lock);
1482 trace_writeback_lazytime_iput(inode);
1483 mark_inode_dirty_sync(inode);
1484 goto retry;
1485 }
1486 iput_final(inode);
1487 }
1488}
1489EXPORT_SYMBOL(iput);
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502sector_t bmap(struct inode *inode, sector_t block)
1503{
1504 sector_t res = 0;
1505 if (inode->i_mapping->a_ops->bmap)
1506 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1507 return res;
1508}
1509EXPORT_SYMBOL(bmap);
1510
1511
1512
1513
1514
1515
1516static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1517 struct timespec now)
1518{
1519
1520 if (!(mnt->mnt_flags & MNT_RELATIME))
1521 return 1;
1522
1523
1524
1525 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1526 return 1;
1527
1528
1529
1530 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1531 return 1;
1532
1533
1534
1535
1536
1537 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1538 return 1;
1539
1540
1541
1542 return 0;
1543}
1544
1545int generic_update_time(struct inode *inode, struct timespec *time, int flags)
1546{
1547 int iflags = I_DIRTY_TIME;
1548
1549 if (flags & S_ATIME)
1550 inode->i_atime = *time;
1551 if (flags & S_VERSION)
1552 inode_inc_iversion(inode);
1553 if (flags & S_CTIME)
1554 inode->i_ctime = *time;
1555 if (flags & S_MTIME)
1556 inode->i_mtime = *time;
1557
1558 if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION))
1559 iflags |= I_DIRTY_SYNC;
1560 __mark_inode_dirty(inode, iflags);
1561 return 0;
1562}
1563EXPORT_SYMBOL(generic_update_time);
1564
1565
1566
1567
1568
1569static int update_time(struct inode *inode, struct timespec *time, int flags)
1570{
1571 int (*update_time)(struct inode *, struct timespec *, int);
1572
1573 update_time = inode->i_op->update_time ? inode->i_op->update_time :
1574 generic_update_time;
1575
1576 return update_time(inode, time, flags);
1577}
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587void touch_atime(const struct path *path)
1588{
1589 struct vfsmount *mnt = path->mnt;
1590 struct inode *inode = path->dentry->d_inode;
1591 struct timespec now;
1592
1593 if (inode->i_flags & S_NOATIME)
1594 return;
1595 if (IS_NOATIME(inode))
1596 return;
1597 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1598 return;
1599
1600 if (mnt->mnt_flags & MNT_NOATIME)
1601 return;
1602 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1603 return;
1604
1605 now = current_fs_time(inode->i_sb);
1606
1607 if (!relatime_need_update(mnt, inode, now))
1608 return;
1609
1610 if (timespec_equal(&inode->i_atime, &now))
1611 return;
1612
1613 if (!sb_start_write_trylock(inode->i_sb))
1614 return;
1615
1616 if (__mnt_want_write(mnt))
1617 goto skip_update;
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627 update_time(inode, &now, S_ATIME);
1628 __mnt_drop_write(mnt);
1629skip_update:
1630 sb_end_write(inode->i_sb);
1631}
1632EXPORT_SYMBOL(touch_atime);
1633
1634
1635
1636
1637
1638
1639
1640int should_remove_suid(struct dentry *dentry)
1641{
1642 umode_t mode = dentry->d_inode->i_mode;
1643 int kill = 0;
1644
1645
1646 if (unlikely(mode & S_ISUID))
1647 kill = ATTR_KILL_SUID;
1648
1649
1650
1651
1652
1653 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1654 kill |= ATTR_KILL_SGID;
1655
1656 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1657 return kill;
1658
1659 return 0;
1660}
1661EXPORT_SYMBOL(should_remove_suid);
1662
1663static int __remove_suid(struct dentry *dentry, int kill)
1664{
1665 struct iattr newattrs;
1666
1667 newattrs.ia_valid = ATTR_FORCE | kill;
1668
1669
1670
1671
1672 return notify_change(dentry, &newattrs, NULL);
1673}
1674
1675int file_remove_suid(struct file *file)
1676{
1677 struct dentry *dentry = file->f_path.dentry;
1678 struct inode *inode = dentry->d_inode;
1679 int killsuid;
1680 int killpriv;
1681 int error = 0;
1682
1683
1684 if (IS_NOSEC(inode))
1685 return 0;
1686
1687 killsuid = should_remove_suid(dentry);
1688 killpriv = security_inode_need_killpriv(dentry);
1689
1690 if (killpriv < 0)
1691 return killpriv;
1692 if (killpriv)
1693 error = security_inode_killpriv(dentry);
1694 if (!error && killsuid)
1695 error = __remove_suid(dentry, killsuid);
1696 if (!error && (inode->i_sb->s_flags & MS_NOSEC))
1697 inode->i_flags |= S_NOSEC;
1698
1699 return error;
1700}
1701EXPORT_SYMBOL(file_remove_suid);
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716int file_update_time(struct file *file)
1717{
1718 struct inode *inode = file_inode(file);
1719 struct timespec now;
1720 int sync_it = 0;
1721 int ret;
1722
1723
1724 if (IS_NOCMTIME(inode))
1725 return 0;
1726
1727 now = current_fs_time(inode->i_sb);
1728 if (!timespec_equal(&inode->i_mtime, &now))
1729 sync_it = S_MTIME;
1730
1731 if (!timespec_equal(&inode->i_ctime, &now))
1732 sync_it |= S_CTIME;
1733
1734 if (IS_I_VERSION(inode))
1735 sync_it |= S_VERSION;
1736
1737 if (!sync_it)
1738 return 0;
1739
1740
1741 if (__mnt_want_write_file(file))
1742 return 0;
1743
1744 ret = update_time(inode, &now, sync_it);
1745 __mnt_drop_write_file(file);
1746
1747 return ret;
1748}
1749EXPORT_SYMBOL(file_update_time);
1750
1751int inode_needs_sync(struct inode *inode)
1752{
1753 if (IS_SYNC(inode))
1754 return 1;
1755 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1756 return 1;
1757 return 0;
1758}
1759EXPORT_SYMBOL(inode_needs_sync);
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772static void __wait_on_freeing_inode(struct inode *inode)
1773{
1774 wait_queue_head_t *wq;
1775 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1776 wq = bit_waitqueue(&inode->i_state, __I_NEW);
1777 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1778 spin_unlock(&inode->i_lock);
1779 spin_unlock(&inode_hash_lock);
1780 schedule();
1781 finish_wait(wq, &wait.wait);
1782 spin_lock(&inode_hash_lock);
1783}
1784
1785static __initdata unsigned long ihash_entries;
1786static int __init set_ihash_entries(char *str)
1787{
1788 if (!str)
1789 return 0;
1790 ihash_entries = simple_strtoul(str, &str, 0);
1791 return 1;
1792}
1793__setup("ihash_entries=", set_ihash_entries);
1794
1795
1796
1797
1798void __init inode_init_early(void)
1799{
1800 unsigned int loop;
1801
1802
1803
1804
1805 if (hashdist)
1806 return;
1807
1808 inode_hashtable =
1809 alloc_large_system_hash("Inode-cache",
1810 sizeof(struct hlist_head),
1811 ihash_entries,
1812 14,
1813 HASH_EARLY,
1814 &i_hash_shift,
1815 &i_hash_mask,
1816 0,
1817 0);
1818
1819 for (loop = 0; loop < (1U << i_hash_shift); loop++)
1820 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1821}
1822
1823void __init inode_init(void)
1824{
1825 unsigned int loop;
1826
1827
1828 inode_cachep = kmem_cache_create("inode_cache",
1829 sizeof(struct inode),
1830 0,
1831 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1832 SLAB_MEM_SPREAD),
1833 init_once);
1834
1835
1836 if (!hashdist)
1837 return;
1838
1839 inode_hashtable =
1840 alloc_large_system_hash("Inode-cache",
1841 sizeof(struct hlist_head),
1842 ihash_entries,
1843 14,
1844 0,
1845 &i_hash_shift,
1846 &i_hash_mask,
1847 0,
1848 0);
1849
1850 for (loop = 0; loop < (1U << i_hash_shift); loop++)
1851 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1852}
1853
1854void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1855{
1856 inode->i_mode = mode;
1857 if (S_ISCHR(mode)) {
1858 inode->i_fop = &def_chr_fops;
1859 inode->i_rdev = rdev;
1860 } else if (S_ISBLK(mode)) {
1861 inode->i_fop = &def_blk_fops;
1862 inode->i_rdev = rdev;
1863 } else if (S_ISFIFO(mode))
1864 inode->i_fop = &pipefifo_fops;
1865 else if (S_ISSOCK(mode))
1866 ;
1867 else
1868 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1869 " inode %s:%lu\n", mode, inode->i_sb->s_id,
1870 inode->i_ino);
1871}
1872EXPORT_SYMBOL(init_special_inode);
1873
1874
1875
1876
1877
1878
1879
1880void inode_init_owner(struct inode *inode, const struct inode *dir,
1881 umode_t mode)
1882{
1883 inode->i_uid = current_fsuid();
1884 if (dir && dir->i_mode & S_ISGID) {
1885 inode->i_gid = dir->i_gid;
1886 if (S_ISDIR(mode))
1887 mode |= S_ISGID;
1888 } else
1889 inode->i_gid = current_fsgid();
1890 inode->i_mode = mode;
1891}
1892EXPORT_SYMBOL(inode_init_owner);
1893
1894
1895
1896
1897
1898
1899
1900
1901bool inode_owner_or_capable(const struct inode *inode)
1902{
1903 struct user_namespace *ns;
1904
1905 if (uid_eq(current_fsuid(), inode->i_uid))
1906 return true;
1907
1908 ns = current_user_ns();
1909 if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid))
1910 return true;
1911 return false;
1912}
1913EXPORT_SYMBOL(inode_owner_or_capable);
1914
1915
1916
1917
1918static void __inode_dio_wait(struct inode *inode)
1919{
1920 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
1921 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
1922
1923 do {
1924 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
1925 if (atomic_read(&inode->i_dio_count))
1926 schedule();
1927 } while (atomic_read(&inode->i_dio_count));
1928 finish_wait(wq, &q.wait);
1929}
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941void inode_dio_wait(struct inode *inode)
1942{
1943 if (atomic_read(&inode->i_dio_count))
1944 __inode_dio_wait(inode);
1945}
1946EXPORT_SYMBOL(inode_dio_wait);
1947
1948
1949
1950
1951
1952
1953
1954
1955void inode_dio_done(struct inode *inode)
1956{
1957 if (atomic_dec_and_test(&inode->i_dio_count))
1958 wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
1959}
1960EXPORT_SYMBOL(inode_dio_done);
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979void inode_set_flags(struct inode *inode, unsigned int flags,
1980 unsigned int mask)
1981{
1982 unsigned int old_flags, new_flags;
1983
1984 WARN_ON_ONCE(flags & ~mask);
1985 do {
1986 old_flags = ACCESS_ONCE(inode->i_flags);
1987 new_flags = (old_flags & ~mask) | flags;
1988 } while (unlikely(cmpxchg(&inode->i_flags, old_flags,
1989 new_flags) != old_flags));
1990}
1991EXPORT_SYMBOL(inode_set_flags);
1992