1
2
3
4
5#include <linux/export.h>
6#include <linux/fs.h>
7#include <linux/mm.h>
8#include <linux/backing-dev.h>
9#include <linux/hash.h>
10#include <linux/swap.h>
11#include <linux/security.h>
12#include <linux/cdev.h>
13#include <linux/bootmem.h>
14#include <linux/fsnotify.h>
15#include <linux/mount.h>
16#include <linux/posix_acl.h>
17#include <linux/prefetch.h>
18#include <linux/buffer_head.h>
19#include <linux/ratelimit.h>
20#include <linux/list_lru.h>
21#include <trace/events/writeback.h>
22#include "internal.h"
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55static unsigned int i_hash_mask __read_mostly;
56static unsigned int i_hash_shift __read_mostly;
57static struct hlist_head *inode_hashtable __read_mostly;
58static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
59
60
61
62
63
64const struct address_space_operations empty_aops = {
65};
66EXPORT_SYMBOL(empty_aops);
67
68
69
70
71struct inodes_stat_t inodes_stat;
72
73static DEFINE_PER_CPU(unsigned long, nr_inodes);
74static DEFINE_PER_CPU(unsigned long, nr_unused);
75
76static struct kmem_cache *inode_cachep __read_mostly;
77
78static long get_nr_inodes(void)
79{
80 int i;
81 long sum = 0;
82 for_each_possible_cpu(i)
83 sum += per_cpu(nr_inodes, i);
84 return sum < 0 ? 0 : sum;
85}
86
87static inline long get_nr_inodes_unused(void)
88{
89 int i;
90 long sum = 0;
91 for_each_possible_cpu(i)
92 sum += per_cpu(nr_unused, i);
93 return sum < 0 ? 0 : sum;
94}
95
96long get_nr_dirty_inodes(void)
97{
98
99 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
100 return nr_dirty > 0 ? nr_dirty : 0;
101}
102
103
104
105
106#ifdef CONFIG_SYSCTL
107int proc_nr_inodes(struct ctl_table *table, int write,
108 void __user *buffer, size_t *lenp, loff_t *ppos)
109{
110 inodes_stat.nr_inodes = get_nr_inodes();
111 inodes_stat.nr_unused = get_nr_inodes_unused();
112 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
113}
114#endif
115
116static int no_open(struct inode *inode, struct file *file)
117{
118 return -ENXIO;
119}
120
121
122
123
124
125
126
127
128
129int inode_init_always(struct super_block *sb, struct inode *inode)
130{
131 static const struct inode_operations empty_iops;
132 static const struct file_operations no_open_fops = {.open = no_open};
133 struct address_space *const mapping = &inode->i_data;
134
135 inode->i_sb = sb;
136 inode->i_blkbits = sb->s_blocksize_bits;
137 inode->i_flags = 0;
138 atomic_set(&inode->i_count, 1);
139 inode->i_op = &empty_iops;
140 inode->i_fop = &no_open_fops;
141 inode->__i_nlink = 1;
142 inode->i_opflags = 0;
143 i_uid_write(inode, 0);
144 i_gid_write(inode, 0);
145 atomic_set(&inode->i_writecount, 0);
146 inode->i_size = 0;
147 inode->i_blocks = 0;
148 inode->i_bytes = 0;
149 inode->i_generation = 0;
150 inode->i_pipe = NULL;
151 inode->i_bdev = NULL;
152 inode->i_cdev = NULL;
153 inode->i_link = NULL;
154 inode->i_rdev = 0;
155 inode->dirtied_when = 0;
156
157 if (security_inode_alloc(inode))
158 goto out;
159 spin_lock_init(&inode->i_lock);
160 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
161
162 mutex_init(&inode->i_mutex);
163 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
164
165 atomic_set(&inode->i_dio_count, 0);
166
167 mapping->a_ops = &empty_aops;
168 mapping->host = inode;
169 mapping->flags = 0;
170 atomic_set(&mapping->i_mmap_writable, 0);
171 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
172 mapping->private_data = NULL;
173 mapping->writeback_index = 0;
174 inode->i_private = NULL;
175 inode->i_mapping = mapping;
176 INIT_HLIST_HEAD(&inode->i_dentry);
177#ifdef CONFIG_FS_POSIX_ACL
178 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
179#endif
180
181#ifdef CONFIG_FSNOTIFY
182 inode->i_fsnotify_mask = 0;
183#endif
184 inode->i_flctx = NULL;
185 this_cpu_inc(nr_inodes);
186
187 return 0;
188out:
189 return -ENOMEM;
190}
191EXPORT_SYMBOL(inode_init_always);
192
193static struct inode *alloc_inode(struct super_block *sb)
194{
195 struct inode *inode;
196
197 if (sb->s_op->alloc_inode)
198 inode = sb->s_op->alloc_inode(sb);
199 else
200 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
201
202 if (!inode)
203 return NULL;
204
205 if (unlikely(inode_init_always(sb, inode))) {
206 if (inode->i_sb->s_op->destroy_inode)
207 inode->i_sb->s_op->destroy_inode(inode);
208 else
209 kmem_cache_free(inode_cachep, inode);
210 return NULL;
211 }
212
213 return inode;
214}
215
216void free_inode_nonrcu(struct inode *inode)
217{
218 kmem_cache_free(inode_cachep, inode);
219}
220EXPORT_SYMBOL(free_inode_nonrcu);
221
222void __destroy_inode(struct inode *inode)
223{
224 BUG_ON(inode_has_buffers(inode));
225 inode_detach_wb(inode);
226 security_inode_free(inode);
227 fsnotify_inode_delete(inode);
228 locks_free_lock_context(inode->i_flctx);
229 if (!inode->i_nlink) {
230 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
231 atomic_long_dec(&inode->i_sb->s_remove_count);
232 }
233
234#ifdef CONFIG_FS_POSIX_ACL
235 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
236 posix_acl_release(inode->i_acl);
237 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
238 posix_acl_release(inode->i_default_acl);
239#endif
240 this_cpu_dec(nr_inodes);
241}
242EXPORT_SYMBOL(__destroy_inode);
243
244static void i_callback(struct rcu_head *head)
245{
246 struct inode *inode = container_of(head, struct inode, i_rcu);
247 kmem_cache_free(inode_cachep, inode);
248}
249
250static void destroy_inode(struct inode *inode)
251{
252 BUG_ON(!list_empty(&inode->i_lru));
253 __destroy_inode(inode);
254 if (inode->i_sb->s_op->destroy_inode)
255 inode->i_sb->s_op->destroy_inode(inode);
256 else
257 call_rcu(&inode->i_rcu, i_callback);
258}
259
260
261
262
263
264
265
266
267
268
269
270
271void drop_nlink(struct inode *inode)
272{
273 WARN_ON(inode->i_nlink == 0);
274 inode->__i_nlink--;
275 if (!inode->i_nlink)
276 atomic_long_inc(&inode->i_sb->s_remove_count);
277}
278EXPORT_SYMBOL(drop_nlink);
279
280
281
282
283
284
285
286
287
288void clear_nlink(struct inode *inode)
289{
290 if (inode->i_nlink) {
291 inode->__i_nlink = 0;
292 atomic_long_inc(&inode->i_sb->s_remove_count);
293 }
294}
295EXPORT_SYMBOL(clear_nlink);
296
297
298
299
300
301
302
303
304
305void set_nlink(struct inode *inode, unsigned int nlink)
306{
307 if (!nlink) {
308 clear_nlink(inode);
309 } else {
310
311 if (inode->i_nlink == 0)
312 atomic_long_dec(&inode->i_sb->s_remove_count);
313
314 inode->__i_nlink = nlink;
315 }
316}
317EXPORT_SYMBOL(set_nlink);
318
319
320
321
322
323
324
325
326
327void inc_nlink(struct inode *inode)
328{
329 if (unlikely(inode->i_nlink == 0)) {
330 WARN_ON(!(inode->i_state & I_LINKABLE));
331 atomic_long_dec(&inode->i_sb->s_remove_count);
332 }
333
334 inode->__i_nlink++;
335}
336EXPORT_SYMBOL(inc_nlink);
337
338void address_space_init_once(struct address_space *mapping)
339{
340 memset(mapping, 0, sizeof(*mapping));
341 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
342 spin_lock_init(&mapping->tree_lock);
343 init_rwsem(&mapping->i_mmap_rwsem);
344 INIT_LIST_HEAD(&mapping->private_list);
345 spin_lock_init(&mapping->private_lock);
346 mapping->i_mmap = RB_ROOT;
347}
348EXPORT_SYMBOL(address_space_init_once);
349
350
351
352
353
354
355void inode_init_once(struct inode *inode)
356{
357 memset(inode, 0, sizeof(*inode));
358 INIT_HLIST_NODE(&inode->i_hash);
359 INIT_LIST_HEAD(&inode->i_devices);
360 INIT_LIST_HEAD(&inode->i_io_list);
361 INIT_LIST_HEAD(&inode->i_lru);
362 address_space_init_once(&inode->i_data);
363 i_size_ordered_init(inode);
364#ifdef CONFIG_FSNOTIFY
365 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
366#endif
367}
368EXPORT_SYMBOL(inode_init_once);
369
370static void init_once(void *foo)
371{
372 struct inode *inode = (struct inode *) foo;
373
374 inode_init_once(inode);
375}
376
377
378
379
380void __iget(struct inode *inode)
381{
382 atomic_inc(&inode->i_count);
383}
384
385
386
387
388void ihold(struct inode *inode)
389{
390 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
391}
392EXPORT_SYMBOL(ihold);
393
394static void inode_lru_list_add(struct inode *inode)
395{
396 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
397 this_cpu_inc(nr_unused);
398}
399
400
401
402
403
404
405void inode_add_lru(struct inode *inode)
406{
407 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
408 I_FREEING | I_WILL_FREE)) &&
409 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
410 inode_lru_list_add(inode);
411}
412
413
414static void inode_lru_list_del(struct inode *inode)
415{
416
417 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
418 this_cpu_dec(nr_unused);
419}
420
421
422
423
424
425void inode_sb_list_add(struct inode *inode)
426{
427 spin_lock(&inode->i_sb->s_inode_list_lock);
428 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
429 spin_unlock(&inode->i_sb->s_inode_list_lock);
430}
431EXPORT_SYMBOL_GPL(inode_sb_list_add);
432
433static inline void inode_sb_list_del(struct inode *inode)
434{
435 if (!list_empty(&inode->i_sb_list)) {
436 spin_lock(&inode->i_sb->s_inode_list_lock);
437 list_del_init(&inode->i_sb_list);
438 spin_unlock(&inode->i_sb->s_inode_list_lock);
439 }
440}
441
442static unsigned long hash(struct super_block *sb, unsigned long hashval)
443{
444 unsigned long tmp;
445
446 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
447 L1_CACHE_BYTES;
448 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
449 return tmp & i_hash_mask;
450}
451
452
453
454
455
456
457
458
459
460void __insert_inode_hash(struct inode *inode, unsigned long hashval)
461{
462 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
463
464 spin_lock(&inode_hash_lock);
465 spin_lock(&inode->i_lock);
466 hlist_add_head(&inode->i_hash, b);
467 spin_unlock(&inode->i_lock);
468 spin_unlock(&inode_hash_lock);
469}
470EXPORT_SYMBOL(__insert_inode_hash);
471
472
473
474
475
476
477
478void __remove_inode_hash(struct inode *inode)
479{
480 spin_lock(&inode_hash_lock);
481 spin_lock(&inode->i_lock);
482 hlist_del_init(&inode->i_hash);
483 spin_unlock(&inode->i_lock);
484 spin_unlock(&inode_hash_lock);
485}
486EXPORT_SYMBOL(__remove_inode_hash);
487
488void clear_inode(struct inode *inode)
489{
490 might_sleep();
491
492
493
494
495
496 spin_lock_irq(&inode->i_data.tree_lock);
497 BUG_ON(inode->i_data.nrpages);
498 BUG_ON(inode->i_data.nrshadows);
499 spin_unlock_irq(&inode->i_data.tree_lock);
500 BUG_ON(!list_empty(&inode->i_data.private_list));
501 BUG_ON(!(inode->i_state & I_FREEING));
502 BUG_ON(inode->i_state & I_CLEAR);
503
504 inode->i_state = I_FREEING | I_CLEAR;
505}
506EXPORT_SYMBOL(clear_inode);
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521static void evict(struct inode *inode)
522{
523 const struct super_operations *op = inode->i_sb->s_op;
524
525 BUG_ON(!(inode->i_state & I_FREEING));
526 BUG_ON(!list_empty(&inode->i_lru));
527
528 if (!list_empty(&inode->i_io_list))
529 inode_io_list_del(inode);
530
531 inode_sb_list_del(inode);
532
533
534
535
536
537
538
539 inode_wait_for_writeback(inode);
540
541 if (op->evict_inode) {
542 op->evict_inode(inode);
543 } else {
544 truncate_inode_pages_final(&inode->i_data);
545 clear_inode(inode);
546 }
547 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
548 bd_forget(inode);
549 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
550 cd_forget(inode);
551
552 remove_inode_hash(inode);
553
554 spin_lock(&inode->i_lock);
555 wake_up_bit(&inode->i_state, __I_NEW);
556 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
557 spin_unlock(&inode->i_lock);
558
559 destroy_inode(inode);
560}
561
562
563
564
565
566
567
568
569static void dispose_list(struct list_head *head)
570{
571 while (!list_empty(head)) {
572 struct inode *inode;
573
574 inode = list_first_entry(head, struct inode, i_lru);
575 list_del_init(&inode->i_lru);
576
577 evict(inode);
578 cond_resched();
579 }
580}
581
582
583
584
585
586
587
588
589
590
591void evict_inodes(struct super_block *sb)
592{
593 struct inode *inode, *next;
594 LIST_HEAD(dispose);
595
596again:
597 spin_lock(&sb->s_inode_list_lock);
598 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
599 if (atomic_read(&inode->i_count))
600 continue;
601
602 spin_lock(&inode->i_lock);
603 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
604 spin_unlock(&inode->i_lock);
605 continue;
606 }
607
608 inode->i_state |= I_FREEING;
609 inode_lru_list_del(inode);
610 spin_unlock(&inode->i_lock);
611 list_add(&inode->i_lru, &dispose);
612
613
614
615
616
617
618 if (need_resched()) {
619 spin_unlock(&sb->s_inode_list_lock);
620 cond_resched();
621 dispose_list(&dispose);
622 goto again;
623 }
624 }
625 spin_unlock(&sb->s_inode_list_lock);
626
627 dispose_list(&dispose);
628}
629
630
631
632
633
634
635
636
637
638
639
640int invalidate_inodes(struct super_block *sb, bool kill_dirty)
641{
642 int busy = 0;
643 struct inode *inode, *next;
644 LIST_HEAD(dispose);
645
646 spin_lock(&sb->s_inode_list_lock);
647 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
648 spin_lock(&inode->i_lock);
649 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
650 spin_unlock(&inode->i_lock);
651 continue;
652 }
653 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
654 spin_unlock(&inode->i_lock);
655 busy = 1;
656 continue;
657 }
658 if (atomic_read(&inode->i_count)) {
659 spin_unlock(&inode->i_lock);
660 busy = 1;
661 continue;
662 }
663
664 inode->i_state |= I_FREEING;
665 inode_lru_list_del(inode);
666 spin_unlock(&inode->i_lock);
667 list_add(&inode->i_lru, &dispose);
668 }
669 spin_unlock(&sb->s_inode_list_lock);
670
671 dispose_list(&dispose);
672
673 return busy;
674}
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691static enum lru_status inode_lru_isolate(struct list_head *item,
692 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
693{
694 struct list_head *freeable = arg;
695 struct inode *inode = container_of(item, struct inode, i_lru);
696
697
698
699
700
701 if (!spin_trylock(&inode->i_lock))
702 return LRU_SKIP;
703
704
705
706
707
708 if (atomic_read(&inode->i_count) ||
709 (inode->i_state & ~I_REFERENCED)) {
710 list_lru_isolate(lru, &inode->i_lru);
711 spin_unlock(&inode->i_lock);
712 this_cpu_dec(nr_unused);
713 return LRU_REMOVED;
714 }
715
716
717 if (inode->i_state & I_REFERENCED) {
718 inode->i_state &= ~I_REFERENCED;
719 spin_unlock(&inode->i_lock);
720 return LRU_ROTATE;
721 }
722
723 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
724 __iget(inode);
725 spin_unlock(&inode->i_lock);
726 spin_unlock(lru_lock);
727 if (remove_inode_buffers(inode)) {
728 unsigned long reap;
729 reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
730 if (current_is_kswapd())
731 __count_vm_events(KSWAPD_INODESTEAL, reap);
732 else
733 __count_vm_events(PGINODESTEAL, reap);
734 if (current->reclaim_state)
735 current->reclaim_state->reclaimed_slab += reap;
736 }
737 iput(inode);
738 spin_lock(lru_lock);
739 return LRU_RETRY;
740 }
741
742 WARN_ON(inode->i_state & I_NEW);
743 inode->i_state |= I_FREEING;
744 list_lru_isolate_move(lru, &inode->i_lru, freeable);
745 spin_unlock(&inode->i_lock);
746
747 this_cpu_dec(nr_unused);
748 return LRU_REMOVED;
749}
750
751
752
753
754
755
756
757long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
758{
759 LIST_HEAD(freeable);
760 long freed;
761
762 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
763 inode_lru_isolate, &freeable);
764 dispose_list(&freeable);
765 return freed;
766}
767
768static void __wait_on_freeing_inode(struct inode *inode);
769
770
771
772static struct inode *find_inode(struct super_block *sb,
773 struct hlist_head *head,
774 int (*test)(struct inode *, void *),
775 void *data)
776{
777 struct inode *inode = NULL;
778
779repeat:
780 hlist_for_each_entry(inode, head, i_hash) {
781 if (inode->i_sb != sb)
782 continue;
783 if (!test(inode, data))
784 continue;
785 spin_lock(&inode->i_lock);
786 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
787 __wait_on_freeing_inode(inode);
788 goto repeat;
789 }
790 __iget(inode);
791 spin_unlock(&inode->i_lock);
792 return inode;
793 }
794 return NULL;
795}
796
797
798
799
800
801static struct inode *find_inode_fast(struct super_block *sb,
802 struct hlist_head *head, unsigned long ino)
803{
804 struct inode *inode = NULL;
805
806repeat:
807 hlist_for_each_entry(inode, head, i_hash) {
808 if (inode->i_ino != ino)
809 continue;
810 if (inode->i_sb != sb)
811 continue;
812 spin_lock(&inode->i_lock);
813 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
814 __wait_on_freeing_inode(inode);
815 goto repeat;
816 }
817 __iget(inode);
818 spin_unlock(&inode->i_lock);
819 return inode;
820 }
821 return NULL;
822}
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839#define LAST_INO_BATCH 1024
840static DEFINE_PER_CPU(unsigned int, last_ino);
841
842unsigned int get_next_ino(void)
843{
844 unsigned int *p = &get_cpu_var(last_ino);
845 unsigned int res = *p;
846
847#ifdef CONFIG_SMP
848 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
849 static atomic_t shared_last_ino;
850 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
851
852 res = next - LAST_INO_BATCH;
853 }
854#endif
855
856 res++;
857
858 if (unlikely(!res))
859 res++;
860 *p = res;
861 put_cpu_var(last_ino);
862 return res;
863}
864EXPORT_SYMBOL(get_next_ino);
865
866
867
868
869
870
871
872
873
874
875
876struct inode *new_inode_pseudo(struct super_block *sb)
877{
878 struct inode *inode = alloc_inode(sb);
879
880 if (inode) {
881 spin_lock(&inode->i_lock);
882 inode->i_state = 0;
883 spin_unlock(&inode->i_lock);
884 INIT_LIST_HEAD(&inode->i_sb_list);
885 }
886 return inode;
887}
888
889
890
891
892
893
894
895
896
897
898
899
900
901struct inode *new_inode(struct super_block *sb)
902{
903 struct inode *inode;
904
905 spin_lock_prefetch(&sb->s_inode_list_lock);
906
907 inode = new_inode_pseudo(sb);
908 if (inode)
909 inode_sb_list_add(inode);
910 return inode;
911}
912EXPORT_SYMBOL(new_inode);
913
914#ifdef CONFIG_DEBUG_LOCK_ALLOC
915void lockdep_annotate_inode_mutex_key(struct inode *inode)
916{
917 if (S_ISDIR(inode->i_mode)) {
918 struct file_system_type *type = inode->i_sb->s_type;
919
920
921 if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
922
923
924
925 mutex_destroy(&inode->i_mutex);
926 mutex_init(&inode->i_mutex);
927 lockdep_set_class(&inode->i_mutex,
928 &type->i_mutex_dir_key);
929 }
930 }
931}
932EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
933#endif
934
935
936
937
938
939
940
941
942void unlock_new_inode(struct inode *inode)
943{
944 lockdep_annotate_inode_mutex_key(inode);
945 spin_lock(&inode->i_lock);
946 WARN_ON(!(inode->i_state & I_NEW));
947 inode->i_state &= ~I_NEW;
948 smp_mb();
949 wake_up_bit(&inode->i_state, __I_NEW);
950 spin_unlock(&inode->i_lock);
951}
952EXPORT_SYMBOL(unlock_new_inode);
953
954
955
956
957
958
959
960
961
962
963void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
964{
965 if (inode1 > inode2)
966 swap(inode1, inode2);
967
968 if (inode1 && !S_ISDIR(inode1->i_mode))
969 mutex_lock(&inode1->i_mutex);
970 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
971 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_NONDIR2);
972}
973EXPORT_SYMBOL(lock_two_nondirectories);
974
975
976
977
978
979
980void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
981{
982 if (inode1 && !S_ISDIR(inode1->i_mode))
983 mutex_unlock(&inode1->i_mutex);
984 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
985 mutex_unlock(&inode2->i_mutex);
986}
987EXPORT_SYMBOL(unlock_two_nondirectories);
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1010 int (*test)(struct inode *, void *),
1011 int (*set)(struct inode *, void *), void *data)
1012{
1013 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1014 struct inode *inode;
1015
1016 spin_lock(&inode_hash_lock);
1017 inode = find_inode(sb, head, test, data);
1018 spin_unlock(&inode_hash_lock);
1019
1020 if (inode) {
1021 wait_on_inode(inode);
1022 return inode;
1023 }
1024
1025 inode = alloc_inode(sb);
1026 if (inode) {
1027 struct inode *old;
1028
1029 spin_lock(&inode_hash_lock);
1030
1031 old = find_inode(sb, head, test, data);
1032 if (!old) {
1033 if (set(inode, data))
1034 goto set_failed;
1035
1036 spin_lock(&inode->i_lock);
1037 inode->i_state = I_NEW;
1038 hlist_add_head(&inode->i_hash, head);
1039 spin_unlock(&inode->i_lock);
1040 inode_sb_list_add(inode);
1041 spin_unlock(&inode_hash_lock);
1042
1043
1044
1045
1046 return inode;
1047 }
1048
1049
1050
1051
1052
1053
1054 spin_unlock(&inode_hash_lock);
1055 destroy_inode(inode);
1056 inode = old;
1057 wait_on_inode(inode);
1058 }
1059 return inode;
1060
1061set_failed:
1062 spin_unlock(&inode_hash_lock);
1063 destroy_inode(inode);
1064 return NULL;
1065}
1066EXPORT_SYMBOL(iget5_locked);
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1082{
1083 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1084 struct inode *inode;
1085
1086 spin_lock(&inode_hash_lock);
1087 inode = find_inode_fast(sb, head, ino);
1088 spin_unlock(&inode_hash_lock);
1089 if (inode) {
1090 wait_on_inode(inode);
1091 return inode;
1092 }
1093
1094 inode = alloc_inode(sb);
1095 if (inode) {
1096 struct inode *old;
1097
1098 spin_lock(&inode_hash_lock);
1099
1100 old = find_inode_fast(sb, head, ino);
1101 if (!old) {
1102 inode->i_ino = ino;
1103 spin_lock(&inode->i_lock);
1104 inode->i_state = I_NEW;
1105 hlist_add_head(&inode->i_hash, head);
1106 spin_unlock(&inode->i_lock);
1107 inode_sb_list_add(inode);
1108 spin_unlock(&inode_hash_lock);
1109
1110
1111
1112
1113 return inode;
1114 }
1115
1116
1117
1118
1119
1120
1121 spin_unlock(&inode_hash_lock);
1122 destroy_inode(inode);
1123 inode = old;
1124 wait_on_inode(inode);
1125 }
1126 return inode;
1127}
1128EXPORT_SYMBOL(iget_locked);
1129
1130
1131
1132
1133
1134
1135
1136
1137static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1138{
1139 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1140 struct inode *inode;
1141
1142 spin_lock(&inode_hash_lock);
1143 hlist_for_each_entry(inode, b, i_hash) {
1144 if (inode->i_ino == ino && inode->i_sb == sb) {
1145 spin_unlock(&inode_hash_lock);
1146 return 0;
1147 }
1148 }
1149 spin_unlock(&inode_hash_lock);
1150
1151 return 1;
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168ino_t iunique(struct super_block *sb, ino_t max_reserved)
1169{
1170
1171
1172
1173
1174
1175 static DEFINE_SPINLOCK(iunique_lock);
1176 static unsigned int counter;
1177 ino_t res;
1178
1179 spin_lock(&iunique_lock);
1180 do {
1181 if (counter <= max_reserved)
1182 counter = max_reserved + 1;
1183 res = counter++;
1184 } while (!test_inode_iunique(sb, res));
1185 spin_unlock(&iunique_lock);
1186
1187 return res;
1188}
1189EXPORT_SYMBOL(iunique);
1190
1191struct inode *igrab(struct inode *inode)
1192{
1193 spin_lock(&inode->i_lock);
1194 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1195 __iget(inode);
1196 spin_unlock(&inode->i_lock);
1197 } else {
1198 spin_unlock(&inode->i_lock);
1199
1200
1201
1202
1203
1204 inode = NULL;
1205 }
1206 return inode;
1207}
1208EXPORT_SYMBOL(igrab);
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1227 int (*test)(struct inode *, void *), void *data)
1228{
1229 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1230 struct inode *inode;
1231
1232 spin_lock(&inode_hash_lock);
1233 inode = find_inode(sb, head, test, data);
1234 spin_unlock(&inode_hash_lock);
1235
1236 return inode;
1237}
1238EXPORT_SYMBOL(ilookup5_nowait);
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1258 int (*test)(struct inode *, void *), void *data)
1259{
1260 struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
1261
1262 if (inode)
1263 wait_on_inode(inode);
1264 return inode;
1265}
1266EXPORT_SYMBOL(ilookup5);
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276struct inode *ilookup(struct super_block *sb, unsigned long ino)
1277{
1278 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1279 struct inode *inode;
1280
1281 spin_lock(&inode_hash_lock);
1282 inode = find_inode_fast(sb, head, ino);
1283 spin_unlock(&inode_hash_lock);
1284
1285 if (inode)
1286 wait_on_inode(inode);
1287 return inode;
1288}
1289EXPORT_SYMBOL(ilookup);
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314struct inode *find_inode_nowait(struct super_block *sb,
1315 unsigned long hashval,
1316 int (*match)(struct inode *, unsigned long,
1317 void *),
1318 void *data)
1319{
1320 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1321 struct inode *inode, *ret_inode = NULL;
1322 int mval;
1323
1324 spin_lock(&inode_hash_lock);
1325 hlist_for_each_entry(inode, head, i_hash) {
1326 if (inode->i_sb != sb)
1327 continue;
1328 mval = match(inode, hashval, data);
1329 if (mval == 0)
1330 continue;
1331 if (mval == 1)
1332 ret_inode = inode;
1333 goto out;
1334 }
1335out:
1336 spin_unlock(&inode_hash_lock);
1337 return ret_inode;
1338}
1339EXPORT_SYMBOL(find_inode_nowait);
1340
1341int insert_inode_locked(struct inode *inode)
1342{
1343 struct super_block *sb = inode->i_sb;
1344 ino_t ino = inode->i_ino;
1345 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1346
1347 while (1) {
1348 struct inode *old = NULL;
1349 spin_lock(&inode_hash_lock);
1350 hlist_for_each_entry(old, head, i_hash) {
1351 if (old->i_ino != ino)
1352 continue;
1353 if (old->i_sb != sb)
1354 continue;
1355 spin_lock(&old->i_lock);
1356 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1357 spin_unlock(&old->i_lock);
1358 continue;
1359 }
1360 break;
1361 }
1362 if (likely(!old)) {
1363 spin_lock(&inode->i_lock);
1364 inode->i_state |= I_NEW;
1365 hlist_add_head(&inode->i_hash, head);
1366 spin_unlock(&inode->i_lock);
1367 spin_unlock(&inode_hash_lock);
1368 return 0;
1369 }
1370 __iget(old);
1371 spin_unlock(&old->i_lock);
1372 spin_unlock(&inode_hash_lock);
1373 wait_on_inode(old);
1374 if (unlikely(!inode_unhashed(old))) {
1375 iput(old);
1376 return -EBUSY;
1377 }
1378 iput(old);
1379 }
1380}
1381EXPORT_SYMBOL(insert_inode_locked);
1382
1383int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1384 int (*test)(struct inode *, void *), void *data)
1385{
1386 struct super_block *sb = inode->i_sb;
1387 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1388
1389 while (1) {
1390 struct inode *old = NULL;
1391
1392 spin_lock(&inode_hash_lock);
1393 hlist_for_each_entry(old, head, i_hash) {
1394 if (old->i_sb != sb)
1395 continue;
1396 if (!test(old, data))
1397 continue;
1398 spin_lock(&old->i_lock);
1399 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1400 spin_unlock(&old->i_lock);
1401 continue;
1402 }
1403 break;
1404 }
1405 if (likely(!old)) {
1406 spin_lock(&inode->i_lock);
1407 inode->i_state |= I_NEW;
1408 hlist_add_head(&inode->i_hash, head);
1409 spin_unlock(&inode->i_lock);
1410 spin_unlock(&inode_hash_lock);
1411 return 0;
1412 }
1413 __iget(old);
1414 spin_unlock(&old->i_lock);
1415 spin_unlock(&inode_hash_lock);
1416 wait_on_inode(old);
1417 if (unlikely(!inode_unhashed(old))) {
1418 iput(old);
1419 return -EBUSY;
1420 }
1421 iput(old);
1422 }
1423}
1424EXPORT_SYMBOL(insert_inode_locked4);
1425
1426
1427int generic_delete_inode(struct inode *inode)
1428{
1429 return 1;
1430}
1431EXPORT_SYMBOL(generic_delete_inode);
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443static void iput_final(struct inode *inode)
1444{
1445 struct super_block *sb = inode->i_sb;
1446 const struct super_operations *op = inode->i_sb->s_op;
1447 int drop;
1448
1449 WARN_ON(inode->i_state & I_NEW);
1450
1451 if (op->drop_inode)
1452 drop = op->drop_inode(inode);
1453 else
1454 drop = generic_drop_inode(inode);
1455
1456 if (!drop && (sb->s_flags & MS_ACTIVE)) {
1457 inode->i_state |= I_REFERENCED;
1458 inode_add_lru(inode);
1459 spin_unlock(&inode->i_lock);
1460 return;
1461 }
1462
1463 if (!drop) {
1464 inode->i_state |= I_WILL_FREE;
1465 spin_unlock(&inode->i_lock);
1466 write_inode_now(inode, 1);
1467 spin_lock(&inode->i_lock);
1468 WARN_ON(inode->i_state & I_NEW);
1469 inode->i_state &= ~I_WILL_FREE;
1470 }
1471
1472 inode->i_state |= I_FREEING;
1473 if (!list_empty(&inode->i_lru))
1474 inode_lru_list_del(inode);
1475 spin_unlock(&inode->i_lock);
1476
1477 evict(inode);
1478}
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489void iput(struct inode *inode)
1490{
1491 if (!inode)
1492 return;
1493 BUG_ON(inode->i_state & I_CLEAR);
1494retry:
1495 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1496 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1497 atomic_inc(&inode->i_count);
1498 inode->i_state &= ~I_DIRTY_TIME;
1499 spin_unlock(&inode->i_lock);
1500 trace_writeback_lazytime_iput(inode);
1501 mark_inode_dirty_sync(inode);
1502 goto retry;
1503 }
1504 iput_final(inode);
1505 }
1506}
1507EXPORT_SYMBOL(iput);
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520sector_t bmap(struct inode *inode, sector_t block)
1521{
1522 sector_t res = 0;
1523 if (inode->i_mapping->a_ops->bmap)
1524 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1525 return res;
1526}
1527EXPORT_SYMBOL(bmap);
1528
1529
1530
1531
1532
1533
1534static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1535 struct timespec now)
1536{
1537
1538 if (!(mnt->mnt_flags & MNT_RELATIME))
1539 return 1;
1540
1541
1542
1543 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1544 return 1;
1545
1546
1547
1548 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1549 return 1;
1550
1551
1552
1553
1554
1555 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1556 return 1;
1557
1558
1559
1560 return 0;
1561}
1562
1563int generic_update_time(struct inode *inode, struct timespec *time, int flags)
1564{
1565 int iflags = I_DIRTY_TIME;
1566
1567 if (flags & S_ATIME)
1568 inode->i_atime = *time;
1569 if (flags & S_VERSION)
1570 inode_inc_iversion(inode);
1571 if (flags & S_CTIME)
1572 inode->i_ctime = *time;
1573 if (flags & S_MTIME)
1574 inode->i_mtime = *time;
1575
1576 if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION))
1577 iflags |= I_DIRTY_SYNC;
1578 __mark_inode_dirty(inode, iflags);
1579 return 0;
1580}
1581EXPORT_SYMBOL(generic_update_time);
1582
1583
1584
1585
1586
1587static int update_time(struct inode *inode, struct timespec *time, int flags)
1588{
1589 int (*update_time)(struct inode *, struct timespec *, int);
1590
1591 update_time = inode->i_op->update_time ? inode->i_op->update_time :
1592 generic_update_time;
1593
1594 return update_time(inode, time, flags);
1595}
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606bool atime_needs_update(const struct path *path, struct inode *inode)
1607{
1608 struct vfsmount *mnt = path->mnt;
1609 struct timespec now;
1610
1611 if (inode->i_flags & S_NOATIME)
1612 return false;
1613 if (IS_NOATIME(inode))
1614 return false;
1615 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1616 return false;
1617
1618 if (mnt->mnt_flags & MNT_NOATIME)
1619 return false;
1620 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1621 return false;
1622
1623 now = current_fs_time(inode->i_sb);
1624
1625 if (!relatime_need_update(mnt, inode, now))
1626 return false;
1627
1628 if (timespec_equal(&inode->i_atime, &now))
1629 return false;
1630
1631 return true;
1632}
1633
1634void touch_atime(const struct path *path)
1635{
1636 struct vfsmount *mnt = path->mnt;
1637 struct inode *inode = d_inode(path->dentry);
1638 struct timespec now;
1639
1640 if (!atime_needs_update(path, inode))
1641 return;
1642
1643 if (!sb_start_write_trylock(inode->i_sb))
1644 return;
1645
1646 if (__mnt_want_write(mnt) != 0)
1647 goto skip_update;
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657 now = current_fs_time(inode->i_sb);
1658 update_time(inode, &now, S_ATIME);
1659 __mnt_drop_write(mnt);
1660skip_update:
1661 sb_end_write(inode->i_sb);
1662}
1663EXPORT_SYMBOL(touch_atime);
1664
1665
1666
1667
1668
1669
1670
1671int should_remove_suid(struct dentry *dentry)
1672{
1673 umode_t mode = d_inode(dentry)->i_mode;
1674 int kill = 0;
1675
1676
1677 if (unlikely(mode & S_ISUID))
1678 kill = ATTR_KILL_SUID;
1679
1680
1681
1682
1683
1684 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1685 kill |= ATTR_KILL_SGID;
1686
1687 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1688 return kill;
1689
1690 return 0;
1691}
1692EXPORT_SYMBOL(should_remove_suid);
1693
1694
1695
1696
1697
1698
1699int dentry_needs_remove_privs(struct dentry *dentry)
1700{
1701 struct inode *inode = d_inode(dentry);
1702 int mask = 0;
1703 int ret;
1704
1705 if (IS_NOSEC(inode))
1706 return 0;
1707
1708 mask = should_remove_suid(dentry);
1709 ret = security_inode_need_killpriv(dentry);
1710 if (ret < 0)
1711 return ret;
1712 if (ret)
1713 mask |= ATTR_KILL_PRIV;
1714 return mask;
1715}
1716EXPORT_SYMBOL(dentry_needs_remove_privs);
1717
1718static int __remove_privs(struct dentry *dentry, int kill)
1719{
1720 struct iattr newattrs;
1721
1722 newattrs.ia_valid = ATTR_FORCE | kill;
1723
1724
1725
1726
1727 return notify_change(dentry, &newattrs, NULL);
1728}
1729
1730
1731
1732
1733
1734int file_remove_privs(struct file *file)
1735{
1736 struct dentry *dentry = file->f_path.dentry;
1737 struct inode *inode = d_inode(dentry);
1738 int kill;
1739 int error = 0;
1740
1741
1742 if (IS_NOSEC(inode))
1743 return 0;
1744
1745 kill = file_needs_remove_privs(file);
1746 if (kill < 0)
1747 return kill;
1748 if (kill)
1749 error = __remove_privs(dentry, kill);
1750 if (!error)
1751 inode_has_no_xattr(inode);
1752
1753 return error;
1754}
1755EXPORT_SYMBOL(file_remove_privs);
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770int file_update_time(struct file *file)
1771{
1772 struct inode *inode = file_inode(file);
1773 struct timespec now;
1774 int sync_it = 0;
1775 int ret;
1776
1777
1778 if (IS_NOCMTIME(inode))
1779 return 0;
1780
1781 now = current_fs_time(inode->i_sb);
1782 if (!timespec_equal(&inode->i_mtime, &now))
1783 sync_it = S_MTIME;
1784
1785 if (!timespec_equal(&inode->i_ctime, &now))
1786 sync_it |= S_CTIME;
1787
1788 if (IS_I_VERSION(inode))
1789 sync_it |= S_VERSION;
1790
1791 if (!sync_it)
1792 return 0;
1793
1794
1795 if (__mnt_want_write_file(file))
1796 return 0;
1797
1798 ret = update_time(inode, &now, sync_it);
1799 __mnt_drop_write_file(file);
1800
1801 return ret;
1802}
1803EXPORT_SYMBOL(file_update_time);
1804
1805int inode_needs_sync(struct inode *inode)
1806{
1807 if (IS_SYNC(inode))
1808 return 1;
1809 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1810 return 1;
1811 return 0;
1812}
1813EXPORT_SYMBOL(inode_needs_sync);
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826static void __wait_on_freeing_inode(struct inode *inode)
1827{
1828 wait_queue_head_t *wq;
1829 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1830 wq = bit_waitqueue(&inode->i_state, __I_NEW);
1831 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1832 spin_unlock(&inode->i_lock);
1833 spin_unlock(&inode_hash_lock);
1834 schedule();
1835 finish_wait(wq, &wait.wait);
1836 spin_lock(&inode_hash_lock);
1837}
1838
1839static __initdata unsigned long ihash_entries;
1840static int __init set_ihash_entries(char *str)
1841{
1842 if (!str)
1843 return 0;
1844 ihash_entries = simple_strtoul(str, &str, 0);
1845 return 1;
1846}
1847__setup("ihash_entries=", set_ihash_entries);
1848
1849
1850
1851
1852void __init inode_init_early(void)
1853{
1854 unsigned int loop;
1855
1856
1857
1858
1859 if (hashdist)
1860 return;
1861
1862 inode_hashtable =
1863 alloc_large_system_hash("Inode-cache",
1864 sizeof(struct hlist_head),
1865 ihash_entries,
1866 14,
1867 HASH_EARLY,
1868 &i_hash_shift,
1869 &i_hash_mask,
1870 0,
1871 0);
1872
1873 for (loop = 0; loop < (1U << i_hash_shift); loop++)
1874 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1875}
1876
1877void __init inode_init(void)
1878{
1879 unsigned int loop;
1880
1881
1882 inode_cachep = kmem_cache_create("inode_cache",
1883 sizeof(struct inode),
1884 0,
1885 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1886 SLAB_MEM_SPREAD),
1887 init_once);
1888
1889
1890 if (!hashdist)
1891 return;
1892
1893 inode_hashtable =
1894 alloc_large_system_hash("Inode-cache",
1895 sizeof(struct hlist_head),
1896 ihash_entries,
1897 14,
1898 0,
1899 &i_hash_shift,
1900 &i_hash_mask,
1901 0,
1902 0);
1903
1904 for (loop = 0; loop < (1U << i_hash_shift); loop++)
1905 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1906}
1907
1908void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1909{
1910 inode->i_mode = mode;
1911 if (S_ISCHR(mode)) {
1912 inode->i_fop = &def_chr_fops;
1913 inode->i_rdev = rdev;
1914 } else if (S_ISBLK(mode)) {
1915 inode->i_fop = &def_blk_fops;
1916 inode->i_rdev = rdev;
1917 } else if (S_ISFIFO(mode))
1918 inode->i_fop = &pipefifo_fops;
1919 else if (S_ISSOCK(mode))
1920 ;
1921 else
1922 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1923 " inode %s:%lu\n", mode, inode->i_sb->s_id,
1924 inode->i_ino);
1925}
1926EXPORT_SYMBOL(init_special_inode);
1927
1928
1929
1930
1931
1932
1933
1934void inode_init_owner(struct inode *inode, const struct inode *dir,
1935 umode_t mode)
1936{
1937 inode->i_uid = current_fsuid();
1938 if (dir && dir->i_mode & S_ISGID) {
1939 inode->i_gid = dir->i_gid;
1940 if (S_ISDIR(mode))
1941 mode |= S_ISGID;
1942 } else
1943 inode->i_gid = current_fsgid();
1944 inode->i_mode = mode;
1945}
1946EXPORT_SYMBOL(inode_init_owner);
1947
1948
1949
1950
1951
1952
1953
1954
1955bool inode_owner_or_capable(const struct inode *inode)
1956{
1957 struct user_namespace *ns;
1958
1959 if (uid_eq(current_fsuid(), inode->i_uid))
1960 return true;
1961
1962 ns = current_user_ns();
1963 if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid))
1964 return true;
1965 return false;
1966}
1967EXPORT_SYMBOL(inode_owner_or_capable);
1968
1969
1970
1971
1972static void __inode_dio_wait(struct inode *inode)
1973{
1974 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
1975 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
1976
1977 do {
1978 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
1979 if (atomic_read(&inode->i_dio_count))
1980 schedule();
1981 } while (atomic_read(&inode->i_dio_count));
1982 finish_wait(wq, &q.wait);
1983}
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995void inode_dio_wait(struct inode *inode)
1996{
1997 if (atomic_read(&inode->i_dio_count))
1998 __inode_dio_wait(inode);
1999}
2000EXPORT_SYMBOL(inode_dio_wait);
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018void inode_set_flags(struct inode *inode, unsigned int flags,
2019 unsigned int mask)
2020{
2021 unsigned int old_flags, new_flags;
2022
2023 WARN_ON_ONCE(flags & ~mask);
2024 do {
2025 old_flags = ACCESS_ONCE(inode->i_flags);
2026 new_flags = (old_flags & ~mask) | flags;
2027 } while (unlikely(cmpxchg(&inode->i_flags, old_flags,
2028 new_flags) != old_flags));
2029}
2030EXPORT_SYMBOL(inode_set_flags);
2031