linux/fs/inode.c
<<
>>
Prefs
   1/*
   2 * (C) 1997 Linus Torvalds
   3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
   4 */
   5#include <linux/export.h>
   6#include <linux/fs.h>
   7#include <linux/mm.h>
   8#include <linux/backing-dev.h>
   9#include <linux/hash.h>
  10#include <linux/swap.h>
  11#include <linux/security.h>
  12#include <linux/cdev.h>
  13#include <linux/bootmem.h>
  14#include <linux/fsnotify.h>
  15#include <linux/mount.h>
  16#include <linux/posix_acl.h>
  17#include <linux/prefetch.h>
  18#include <linux/buffer_head.h> /* for inode_has_buffers */
  19#include <linux/ratelimit.h>
  20#include <linux/list_lru.h>
  21#include <trace/events/writeback.h>
  22#include "internal.h"
  23
  24/*
  25 * Inode locking rules:
  26 *
  27 * inode->i_lock protects:
  28 *   inode->i_state, inode->i_hash, __iget()
  29 * Inode LRU list locks protect:
  30 *   inode->i_sb->s_inode_lru, inode->i_lru
  31 * inode->i_sb->s_inode_list_lock protects:
  32 *   inode->i_sb->s_inodes, inode->i_sb_list
  33 * bdi->wb.list_lock protects:
  34 *   bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
  35 * inode_hash_lock protects:
  36 *   inode_hashtable, inode->i_hash
  37 *
  38 * Lock ordering:
  39 *
  40 * inode->i_sb->s_inode_list_lock
  41 *   inode->i_lock
  42 *     Inode LRU list locks
  43 *
  44 * bdi->wb.list_lock
  45 *   inode->i_lock
  46 *
  47 * inode_hash_lock
  48 *   inode->i_sb->s_inode_list_lock
  49 *   inode->i_lock
  50 *
  51 * iunique_lock
  52 *   inode_hash_lock
  53 */
  54
  55static unsigned int i_hash_mask __read_mostly;
  56static unsigned int i_hash_shift __read_mostly;
  57static struct hlist_head *inode_hashtable __read_mostly;
  58static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
  59
  60/*
  61 * Empty aops. Can be used for the cases where the user does not
  62 * define any of the address_space operations.
  63 */
  64const struct address_space_operations empty_aops = {
  65};
  66EXPORT_SYMBOL(empty_aops);
  67
  68/*
  69 * Statistics gathering..
  70 */
  71struct inodes_stat_t inodes_stat;
  72
  73static DEFINE_PER_CPU(unsigned long, nr_inodes);
  74static DEFINE_PER_CPU(unsigned long, nr_unused);
  75
  76static struct kmem_cache *inode_cachep __read_mostly;
  77
  78static long get_nr_inodes(void)
  79{
  80        int i;
  81        long sum = 0;
  82        for_each_possible_cpu(i)
  83                sum += per_cpu(nr_inodes, i);
  84        return sum < 0 ? 0 : sum;
  85}
  86
  87static inline long get_nr_inodes_unused(void)
  88{
  89        int i;
  90        long sum = 0;
  91        for_each_possible_cpu(i)
  92                sum += per_cpu(nr_unused, i);
  93        return sum < 0 ? 0 : sum;
  94}
  95
  96long get_nr_dirty_inodes(void)
  97{
  98        /* not actually dirty inodes, but a wild approximation */
  99        long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
 100        return nr_dirty > 0 ? nr_dirty : 0;
 101}
 102
 103/*
 104 * Handle nr_inode sysctl
 105 */
 106#ifdef CONFIG_SYSCTL
 107int proc_nr_inodes(struct ctl_table *table, int write,
 108                   void __user *buffer, size_t *lenp, loff_t *ppos)
 109{
 110        inodes_stat.nr_inodes = get_nr_inodes();
 111        inodes_stat.nr_unused = get_nr_inodes_unused();
 112        return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 113}
 114#endif
 115
 116static int no_open(struct inode *inode, struct file *file)
 117{
 118        return -ENXIO;
 119}
 120
 121/**
 122 * inode_init_always - perform inode structure initialisation
 123 * @sb: superblock inode belongs to
 124 * @inode: inode to initialise
 125 *
 126 * These are initializations that need to be done on every inode
 127 * allocation as the fields are not initialised by slab allocation.
 128 */
 129int inode_init_always(struct super_block *sb, struct inode *inode)
 130{
 131        static const struct inode_operations empty_iops;
 132        static const struct file_operations no_open_fops = {.open = no_open};
 133        struct address_space *const mapping = &inode->i_data;
 134
 135        inode->i_sb = sb;
 136        inode->i_blkbits = sb->s_blocksize_bits;
 137        inode->i_flags = 0;
 138        atomic_set(&inode->i_count, 1);
 139        inode->i_op = &empty_iops;
 140        inode->i_fop = &no_open_fops;
 141        inode->__i_nlink = 1;
 142        inode->i_opflags = 0;
 143        if (sb->s_xattr)
 144                inode->i_opflags |= IOP_XATTR;
 145        i_uid_write(inode, 0);
 146        i_gid_write(inode, 0);
 147        atomic_set(&inode->i_writecount, 0);
 148        inode->i_size = 0;
 149        inode->i_write_hint = WRITE_LIFE_NOT_SET;
 150        inode->i_blocks = 0;
 151        inode->i_bytes = 0;
 152        inode->i_generation = 0;
 153        inode->i_pipe = NULL;
 154        inode->i_bdev = NULL;
 155        inode->i_cdev = NULL;
 156        inode->i_link = NULL;
 157        inode->i_dir_seq = 0;
 158        inode->i_rdev = 0;
 159        inode->dirtied_when = 0;
 160
 161#ifdef CONFIG_CGROUP_WRITEBACK
 162        inode->i_wb_frn_winner = 0;
 163        inode->i_wb_frn_avg_time = 0;
 164        inode->i_wb_frn_history = 0;
 165#endif
 166
 167        if (security_inode_alloc(inode))
 168                goto out;
 169        spin_lock_init(&inode->i_lock);
 170        lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
 171
 172        init_rwsem(&inode->i_rwsem);
 173        lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
 174
 175        atomic_set(&inode->i_dio_count, 0);
 176
 177        mapping->a_ops = &empty_aops;
 178        mapping->host = inode;
 179        mapping->flags = 0;
 180        atomic_set(&mapping->i_mmap_writable, 0);
 181        mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
 182        mapping->private_data = NULL;
 183        mapping->writeback_index = 0;
 184        inode->i_private = NULL;
 185        inode->i_mapping = mapping;
 186        INIT_HLIST_HEAD(&inode->i_dentry);      /* buggered by rcu freeing */
 187#ifdef CONFIG_FS_POSIX_ACL
 188        inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
 189#endif
 190
 191#ifdef CONFIG_FSNOTIFY
 192        inode->i_fsnotify_mask = 0;
 193#endif
 194        inode->i_flctx = NULL;
 195        this_cpu_inc(nr_inodes);
 196
 197        return 0;
 198out:
 199        return -ENOMEM;
 200}
 201EXPORT_SYMBOL(inode_init_always);
 202
 203static struct inode *alloc_inode(struct super_block *sb)
 204{
 205        struct inode *inode;
 206
 207        if (sb->s_op->alloc_inode)
 208                inode = sb->s_op->alloc_inode(sb);
 209        else
 210                inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
 211
 212        if (!inode)
 213                return NULL;
 214
 215        if (unlikely(inode_init_always(sb, inode))) {
 216                if (inode->i_sb->s_op->destroy_inode)
 217                        inode->i_sb->s_op->destroy_inode(inode);
 218                else
 219                        kmem_cache_free(inode_cachep, inode);
 220                return NULL;
 221        }
 222
 223        return inode;
 224}
 225
 226void free_inode_nonrcu(struct inode *inode)
 227{
 228        kmem_cache_free(inode_cachep, inode);
 229}
 230EXPORT_SYMBOL(free_inode_nonrcu);
 231
 232void __destroy_inode(struct inode *inode)
 233{
 234        BUG_ON(inode_has_buffers(inode));
 235        inode_detach_wb(inode);
 236        security_inode_free(inode);
 237        fsnotify_inode_delete(inode);
 238        locks_free_lock_context(inode);
 239        if (!inode->i_nlink) {
 240                WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
 241                atomic_long_dec(&inode->i_sb->s_remove_count);
 242        }
 243
 244#ifdef CONFIG_FS_POSIX_ACL
 245        if (inode->i_acl && !is_uncached_acl(inode->i_acl))
 246                posix_acl_release(inode->i_acl);
 247        if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
 248                posix_acl_release(inode->i_default_acl);
 249#endif
 250        this_cpu_dec(nr_inodes);
 251}
 252EXPORT_SYMBOL(__destroy_inode);
 253
 254static void i_callback(struct rcu_head *head)
 255{
 256        struct inode *inode = container_of(head, struct inode, i_rcu);
 257        kmem_cache_free(inode_cachep, inode);
 258}
 259
 260static void destroy_inode(struct inode *inode)
 261{
 262        BUG_ON(!list_empty(&inode->i_lru));
 263        __destroy_inode(inode);
 264        if (inode->i_sb->s_op->destroy_inode)
 265                inode->i_sb->s_op->destroy_inode(inode);
 266        else
 267                call_rcu(&inode->i_rcu, i_callback);
 268}
 269
 270/**
 271 * drop_nlink - directly drop an inode's link count
 272 * @inode: inode
 273 *
 274 * This is a low-level filesystem helper to replace any
 275 * direct filesystem manipulation of i_nlink.  In cases
 276 * where we are attempting to track writes to the
 277 * filesystem, a decrement to zero means an imminent
 278 * write when the file is truncated and actually unlinked
 279 * on the filesystem.
 280 */
 281void drop_nlink(struct inode *inode)
 282{
 283        WARN_ON(inode->i_nlink == 0);
 284        inode->__i_nlink--;
 285        if (!inode->i_nlink)
 286                atomic_long_inc(&inode->i_sb->s_remove_count);
 287}
 288EXPORT_SYMBOL(drop_nlink);
 289
 290/**
 291 * clear_nlink - directly zero an inode's link count
 292 * @inode: inode
 293 *
 294 * This is a low-level filesystem helper to replace any
 295 * direct filesystem manipulation of i_nlink.  See
 296 * drop_nlink() for why we care about i_nlink hitting zero.
 297 */
 298void clear_nlink(struct inode *inode)
 299{
 300        if (inode->i_nlink) {
 301                inode->__i_nlink = 0;
 302                atomic_long_inc(&inode->i_sb->s_remove_count);
 303        }
 304}
 305EXPORT_SYMBOL(clear_nlink);
 306
 307/**
 308 * set_nlink - directly set an inode's link count
 309 * @inode: inode
 310 * @nlink: new nlink (should be non-zero)
 311 *
 312 * This is a low-level filesystem helper to replace any
 313 * direct filesystem manipulation of i_nlink.
 314 */
 315void set_nlink(struct inode *inode, unsigned int nlink)
 316{
 317        if (!nlink) {
 318                clear_nlink(inode);
 319        } else {
 320                /* Yes, some filesystems do change nlink from zero to one */
 321                if (inode->i_nlink == 0)
 322                        atomic_long_dec(&inode->i_sb->s_remove_count);
 323
 324                inode->__i_nlink = nlink;
 325        }
 326}
 327EXPORT_SYMBOL(set_nlink);
 328
 329/**
 330 * inc_nlink - directly increment an inode's link count
 331 * @inode: inode
 332 *
 333 * This is a low-level filesystem helper to replace any
 334 * direct filesystem manipulation of i_nlink.  Currently,
 335 * it is only here for parity with dec_nlink().
 336 */
 337void inc_nlink(struct inode *inode)
 338{
 339        if (unlikely(inode->i_nlink == 0)) {
 340                WARN_ON(!(inode->i_state & I_LINKABLE));
 341                atomic_long_dec(&inode->i_sb->s_remove_count);
 342        }
 343
 344        inode->__i_nlink++;
 345}
 346EXPORT_SYMBOL(inc_nlink);
 347
 348void address_space_init_once(struct address_space *mapping)
 349{
 350        memset(mapping, 0, sizeof(*mapping));
 351        INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT);
 352        spin_lock_init(&mapping->tree_lock);
 353        init_rwsem(&mapping->i_mmap_rwsem);
 354        INIT_LIST_HEAD(&mapping->private_list);
 355        spin_lock_init(&mapping->private_lock);
 356        mapping->i_mmap = RB_ROOT_CACHED;
 357}
 358EXPORT_SYMBOL(address_space_init_once);
 359
 360/*
 361 * These are initializations that only need to be done
 362 * once, because the fields are idempotent across use
 363 * of the inode, so let the slab aware of that.
 364 */
 365void inode_init_once(struct inode *inode)
 366{
 367        memset(inode, 0, sizeof(*inode));
 368        INIT_HLIST_NODE(&inode->i_hash);
 369        INIT_LIST_HEAD(&inode->i_devices);
 370        INIT_LIST_HEAD(&inode->i_io_list);
 371        INIT_LIST_HEAD(&inode->i_wb_list);
 372        INIT_LIST_HEAD(&inode->i_lru);
 373        address_space_init_once(&inode->i_data);
 374        i_size_ordered_init(inode);
 375}
 376EXPORT_SYMBOL(inode_init_once);
 377
 378static void init_once(void *foo)
 379{
 380        struct inode *inode = (struct inode *) foo;
 381
 382        inode_init_once(inode);
 383}
 384
 385/*
 386 * inode->i_lock must be held
 387 */
 388void __iget(struct inode *inode)
 389{
 390        atomic_inc(&inode->i_count);
 391}
 392
 393/*
 394 * get additional reference to inode; caller must already hold one.
 395 */
 396void ihold(struct inode *inode)
 397{
 398        WARN_ON(atomic_inc_return(&inode->i_count) < 2);
 399}
 400EXPORT_SYMBOL(ihold);
 401
 402static void inode_lru_list_add(struct inode *inode)
 403{
 404        if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
 405                this_cpu_inc(nr_unused);
 406        else
 407                inode->i_state |= I_REFERENCED;
 408}
 409
 410/*
 411 * Add inode to LRU if needed (inode is unused and clean).
 412 *
 413 * Needs inode->i_lock held.
 414 */
 415void inode_add_lru(struct inode *inode)
 416{
 417        if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
 418                                I_FREEING | I_WILL_FREE)) &&
 419            !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
 420                inode_lru_list_add(inode);
 421}
 422
 423
 424static void inode_lru_list_del(struct inode *inode)
 425{
 426
 427        if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
 428                this_cpu_dec(nr_unused);
 429}
 430
 431/**
 432 * inode_sb_list_add - add inode to the superblock list of inodes
 433 * @inode: inode to add
 434 */
 435void inode_sb_list_add(struct inode *inode)
 436{
 437        spin_lock(&inode->i_sb->s_inode_list_lock);
 438        list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
 439        spin_unlock(&inode->i_sb->s_inode_list_lock);
 440}
 441EXPORT_SYMBOL_GPL(inode_sb_list_add);
 442
 443static inline void inode_sb_list_del(struct inode *inode)
 444{
 445        if (!list_empty(&inode->i_sb_list)) {
 446                spin_lock(&inode->i_sb->s_inode_list_lock);
 447                list_del_init(&inode->i_sb_list);
 448                spin_unlock(&inode->i_sb->s_inode_list_lock);
 449        }
 450}
 451
 452static unsigned long hash(struct super_block *sb, unsigned long hashval)
 453{
 454        unsigned long tmp;
 455
 456        tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
 457                        L1_CACHE_BYTES;
 458        tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
 459        return tmp & i_hash_mask;
 460}
 461
 462/**
 463 *      __insert_inode_hash - hash an inode
 464 *      @inode: unhashed inode
 465 *      @hashval: unsigned long value used to locate this object in the
 466 *              inode_hashtable.
 467 *
 468 *      Add an inode to the inode hash for this superblock.
 469 */
 470void __insert_inode_hash(struct inode *inode, unsigned long hashval)
 471{
 472        struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
 473
 474        spin_lock(&inode_hash_lock);
 475        spin_lock(&inode->i_lock);
 476        hlist_add_head(&inode->i_hash, b);
 477        spin_unlock(&inode->i_lock);
 478        spin_unlock(&inode_hash_lock);
 479}
 480EXPORT_SYMBOL(__insert_inode_hash);
 481
 482/**
 483 *      __remove_inode_hash - remove an inode from the hash
 484 *      @inode: inode to unhash
 485 *
 486 *      Remove an inode from the superblock.
 487 */
 488void __remove_inode_hash(struct inode *inode)
 489{
 490        spin_lock(&inode_hash_lock);
 491        spin_lock(&inode->i_lock);
 492        hlist_del_init(&inode->i_hash);
 493        spin_unlock(&inode->i_lock);
 494        spin_unlock(&inode_hash_lock);
 495}
 496EXPORT_SYMBOL(__remove_inode_hash);
 497
 498void clear_inode(struct inode *inode)
 499{
 500        might_sleep();
 501        /*
 502         * We have to cycle tree_lock here because reclaim can be still in the
 503         * process of removing the last page (in __delete_from_page_cache())
 504         * and we must not free mapping under it.
 505         */
 506        spin_lock_irq(&inode->i_data.tree_lock);
 507        BUG_ON(inode->i_data.nrpages);
 508        BUG_ON(inode->i_data.nrexceptional);
 509        spin_unlock_irq(&inode->i_data.tree_lock);
 510        BUG_ON(!list_empty(&inode->i_data.private_list));
 511        BUG_ON(!(inode->i_state & I_FREEING));
 512        BUG_ON(inode->i_state & I_CLEAR);
 513        BUG_ON(!list_empty(&inode->i_wb_list));
 514        /* don't need i_lock here, no concurrent mods to i_state */
 515        inode->i_state = I_FREEING | I_CLEAR;
 516}
 517EXPORT_SYMBOL(clear_inode);
 518
 519/*
 520 * Free the inode passed in, removing it from the lists it is still connected
 521 * to. We remove any pages still attached to the inode and wait for any IO that
 522 * is still in progress before finally destroying the inode.
 523 *
 524 * An inode must already be marked I_FREEING so that we avoid the inode being
 525 * moved back onto lists if we race with other code that manipulates the lists
 526 * (e.g. writeback_single_inode). The caller is responsible for setting this.
 527 *
 528 * An inode must already be removed from the LRU list before being evicted from
 529 * the cache. This should occur atomically with setting the I_FREEING state
 530 * flag, so no inodes here should ever be on the LRU when being evicted.
 531 */
 532static void evict(struct inode *inode)
 533{
 534        const struct super_operations *op = inode->i_sb->s_op;
 535
 536        BUG_ON(!(inode->i_state & I_FREEING));
 537        BUG_ON(!list_empty(&inode->i_lru));
 538
 539        if (!list_empty(&inode->i_io_list))
 540                inode_io_list_del(inode);
 541
 542        inode_sb_list_del(inode);
 543
 544        /*
 545         * Wait for flusher thread to be done with the inode so that filesystem
 546         * does not start destroying it while writeback is still running. Since
 547         * the inode has I_FREEING set, flusher thread won't start new work on
 548         * the inode.  We just have to wait for running writeback to finish.
 549         */
 550        inode_wait_for_writeback(inode);
 551
 552        if (op->evict_inode) {
 553                op->evict_inode(inode);
 554        } else {
 555                truncate_inode_pages_final(&inode->i_data);
 556                clear_inode(inode);
 557        }
 558        if (S_ISBLK(inode->i_mode) && inode->i_bdev)
 559                bd_forget(inode);
 560        if (S_ISCHR(inode->i_mode) && inode->i_cdev)
 561                cd_forget(inode);
 562
 563        remove_inode_hash(inode);
 564
 565        spin_lock(&inode->i_lock);
 566        wake_up_bit(&inode->i_state, __I_NEW);
 567        BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
 568        spin_unlock(&inode->i_lock);
 569
 570        destroy_inode(inode);
 571}
 572
 573/*
 574 * dispose_list - dispose of the contents of a local list
 575 * @head: the head of the list to free
 576 *
 577 * Dispose-list gets a local list with local inodes in it, so it doesn't
 578 * need to worry about list corruption and SMP locks.
 579 */
 580static void dispose_list(struct list_head *head)
 581{
 582        while (!list_empty(head)) {
 583                struct inode *inode;
 584
 585                inode = list_first_entry(head, struct inode, i_lru);
 586                list_del_init(&inode->i_lru);
 587
 588                evict(inode);
 589                cond_resched();
 590        }
 591}
 592
 593/**
 594 * evict_inodes - evict all evictable inodes for a superblock
 595 * @sb:         superblock to operate on
 596 *
 597 * Make sure that no inodes with zero refcount are retained.  This is
 598 * called by superblock shutdown after having MS_ACTIVE flag removed,
 599 * so any inode reaching zero refcount during or after that call will
 600 * be immediately evicted.
 601 */
 602void evict_inodes(struct super_block *sb)
 603{
 604        struct inode *inode, *next;
 605        LIST_HEAD(dispose);
 606
 607again:
 608        spin_lock(&sb->s_inode_list_lock);
 609        list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
 610                if (atomic_read(&inode->i_count))
 611                        continue;
 612
 613                spin_lock(&inode->i_lock);
 614                if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
 615                        spin_unlock(&inode->i_lock);
 616                        continue;
 617                }
 618
 619                inode->i_state |= I_FREEING;
 620                inode_lru_list_del(inode);
 621                spin_unlock(&inode->i_lock);
 622                list_add(&inode->i_lru, &dispose);
 623
 624                /*
 625                 * We can have a ton of inodes to evict at unmount time given
 626                 * enough memory, check to see if we need to go to sleep for a
 627                 * bit so we don't livelock.
 628                 */
 629                if (need_resched()) {
 630                        spin_unlock(&sb->s_inode_list_lock);
 631                        cond_resched();
 632                        dispose_list(&dispose);
 633                        goto again;
 634                }
 635        }
 636        spin_unlock(&sb->s_inode_list_lock);
 637
 638        dispose_list(&dispose);
 639}
 640EXPORT_SYMBOL_GPL(evict_inodes);
 641
 642/**
 643 * invalidate_inodes    - attempt to free all inodes on a superblock
 644 * @sb:         superblock to operate on
 645 * @kill_dirty: flag to guide handling of dirty inodes
 646 *
 647 * Attempts to free all inodes for a given superblock.  If there were any
 648 * busy inodes return a non-zero value, else zero.
 649 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
 650 * them as busy.
 651 */
 652int invalidate_inodes(struct super_block *sb, bool kill_dirty)
 653{
 654        int busy = 0;
 655        struct inode *inode, *next;
 656        LIST_HEAD(dispose);
 657
 658        spin_lock(&sb->s_inode_list_lock);
 659        list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
 660                spin_lock(&inode->i_lock);
 661                if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
 662                        spin_unlock(&inode->i_lock);
 663                        continue;
 664                }
 665                if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
 666                        spin_unlock(&inode->i_lock);
 667                        busy = 1;
 668                        continue;
 669                }
 670                if (atomic_read(&inode->i_count)) {
 671                        spin_unlock(&inode->i_lock);
 672                        busy = 1;
 673                        continue;
 674                }
 675
 676                inode->i_state |= I_FREEING;
 677                inode_lru_list_del(inode);
 678                spin_unlock(&inode->i_lock);
 679                list_add(&inode->i_lru, &dispose);
 680        }
 681        spin_unlock(&sb->s_inode_list_lock);
 682
 683        dispose_list(&dispose);
 684
 685        return busy;
 686}
 687
 688/*
 689 * Isolate the inode from the LRU in preparation for freeing it.
 690 *
 691 * Any inodes which are pinned purely because of attached pagecache have their
 692 * pagecache removed.  If the inode has metadata buffers attached to
 693 * mapping->private_list then try to remove them.
 694 *
 695 * If the inode has the I_REFERENCED flag set, then it means that it has been
 696 * used recently - the flag is set in iput_final(). When we encounter such an
 697 * inode, clear the flag and move it to the back of the LRU so it gets another
 698 * pass through the LRU before it gets reclaimed. This is necessary because of
 699 * the fact we are doing lazy LRU updates to minimise lock contention so the
 700 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
 701 * with this flag set because they are the inodes that are out of order.
 702 */
 703static enum lru_status inode_lru_isolate(struct list_head *item,
 704                struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
 705{
 706        struct list_head *freeable = arg;
 707        struct inode    *inode = container_of(item, struct inode, i_lru);
 708
 709        /*
 710         * we are inverting the lru lock/inode->i_lock here, so use a trylock.
 711         * If we fail to get the lock, just skip it.
 712         */
 713        if (!spin_trylock(&inode->i_lock))
 714                return LRU_SKIP;
 715
 716        /*
 717         * Referenced or dirty inodes are still in use. Give them another pass
 718         * through the LRU as we canot reclaim them now.
 719         */
 720        if (atomic_read(&inode->i_count) ||
 721            (inode->i_state & ~I_REFERENCED)) {
 722                list_lru_isolate(lru, &inode->i_lru);
 723                spin_unlock(&inode->i_lock);
 724                this_cpu_dec(nr_unused);
 725                return LRU_REMOVED;
 726        }
 727
 728        /* recently referenced inodes get one more pass */
 729        if (inode->i_state & I_REFERENCED) {
 730                inode->i_state &= ~I_REFERENCED;
 731                spin_unlock(&inode->i_lock);
 732                return LRU_ROTATE;
 733        }
 734
 735        if (inode_has_buffers(inode) || inode->i_data.nrpages) {
 736                __iget(inode);
 737                spin_unlock(&inode->i_lock);
 738                spin_unlock(lru_lock);
 739                if (remove_inode_buffers(inode)) {
 740                        unsigned long reap;
 741                        reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
 742                        if (current_is_kswapd())
 743                                __count_vm_events(KSWAPD_INODESTEAL, reap);
 744                        else
 745                                __count_vm_events(PGINODESTEAL, reap);
 746                        if (current->reclaim_state)
 747                                current->reclaim_state->reclaimed_slab += reap;
 748                }
 749                iput(inode);
 750                spin_lock(lru_lock);
 751                return LRU_RETRY;
 752        }
 753
 754        WARN_ON(inode->i_state & I_NEW);
 755        inode->i_state |= I_FREEING;
 756        list_lru_isolate_move(lru, &inode->i_lru, freeable);
 757        spin_unlock(&inode->i_lock);
 758
 759        this_cpu_dec(nr_unused);
 760        return LRU_REMOVED;
 761}
 762
 763/*
 764 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
 765 * This is called from the superblock shrinker function with a number of inodes
 766 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
 767 * then are freed outside inode_lock by dispose_list().
 768 */
 769long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
 770{
 771        LIST_HEAD(freeable);
 772        long freed;
 773
 774        freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
 775                                     inode_lru_isolate, &freeable);
 776        dispose_list(&freeable);
 777        return freed;
 778}
 779
 780static void __wait_on_freeing_inode(struct inode *inode);
 781/*
 782 * Called with the inode lock held.
 783 */
 784static struct inode *find_inode(struct super_block *sb,
 785                                struct hlist_head *head,
 786                                int (*test)(struct inode *, void *),
 787                                void *data)
 788{
 789        struct inode *inode = NULL;
 790
 791repeat:
 792        hlist_for_each_entry(inode, head, i_hash) {
 793                if (inode->i_sb != sb)
 794                        continue;
 795                if (!test(inode, data))
 796                        continue;
 797                spin_lock(&inode->i_lock);
 798                if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
 799                        __wait_on_freeing_inode(inode);
 800                        goto repeat;
 801                }
 802                __iget(inode);
 803                spin_unlock(&inode->i_lock);
 804                return inode;
 805        }
 806        return NULL;
 807}
 808
 809/*
 810 * find_inode_fast is the fast path version of find_inode, see the comment at
 811 * iget_locked for details.
 812 */
 813static struct inode *find_inode_fast(struct super_block *sb,
 814                                struct hlist_head *head, unsigned long ino)
 815{
 816        struct inode *inode = NULL;
 817
 818repeat:
 819        hlist_for_each_entry(inode, head, i_hash) {
 820                if (inode->i_ino != ino)
 821                        continue;
 822                if (inode->i_sb != sb)
 823                        continue;
 824                spin_lock(&inode->i_lock);
 825                if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
 826                        __wait_on_freeing_inode(inode);
 827                        goto repeat;
 828                }
 829                __iget(inode);
 830                spin_unlock(&inode->i_lock);
 831                return inode;
 832        }
 833        return NULL;
 834}
 835
 836/*
 837 * Each cpu owns a range of LAST_INO_BATCH numbers.
 838 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
 839 * to renew the exhausted range.
 840 *
 841 * This does not significantly increase overflow rate because every CPU can
 842 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
 843 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
 844 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
 845 * overflow rate by 2x, which does not seem too significant.
 846 *
 847 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
 848 * error if st_ino won't fit in target struct field. Use 32bit counter
 849 * here to attempt to avoid that.
 850 */
 851#define LAST_INO_BATCH 1024
 852static DEFINE_PER_CPU(unsigned int, last_ino);
 853
 854unsigned int get_next_ino(void)
 855{
 856        unsigned int *p = &get_cpu_var(last_ino);
 857        unsigned int res = *p;
 858
 859#ifdef CONFIG_SMP
 860        if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
 861                static atomic_t shared_last_ino;
 862                int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
 863
 864                res = next - LAST_INO_BATCH;
 865        }
 866#endif
 867
 868        res++;
 869        /* get_next_ino should not provide a 0 inode number */
 870        if (unlikely(!res))
 871                res++;
 872        *p = res;
 873        put_cpu_var(last_ino);
 874        return res;
 875}
 876EXPORT_SYMBOL(get_next_ino);
 877
 878/**
 879 *      new_inode_pseudo        - obtain an inode
 880 *      @sb: superblock
 881 *
 882 *      Allocates a new inode for given superblock.
 883 *      Inode wont be chained in superblock s_inodes list
 884 *      This means :
 885 *      - fs can't be unmount
 886 *      - quotas, fsnotify, writeback can't work
 887 */
 888struct inode *new_inode_pseudo(struct super_block *sb)
 889{
 890        struct inode *inode = alloc_inode(sb);
 891
 892        if (inode) {
 893                spin_lock(&inode->i_lock);
 894                inode->i_state = 0;
 895                spin_unlock(&inode->i_lock);
 896                INIT_LIST_HEAD(&inode->i_sb_list);
 897        }
 898        return inode;
 899}
 900
 901/**
 902 *      new_inode       - obtain an inode
 903 *      @sb: superblock
 904 *
 905 *      Allocates a new inode for given superblock. The default gfp_mask
 906 *      for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
 907 *      If HIGHMEM pages are unsuitable or it is known that pages allocated
 908 *      for the page cache are not reclaimable or migratable,
 909 *      mapping_set_gfp_mask() must be called with suitable flags on the
 910 *      newly created inode's mapping
 911 *
 912 */
 913struct inode *new_inode(struct super_block *sb)
 914{
 915        struct inode *inode;
 916
 917        spin_lock_prefetch(&sb->s_inode_list_lock);
 918
 919        inode = new_inode_pseudo(sb);
 920        if (inode)
 921                inode_sb_list_add(inode);
 922        return inode;
 923}
 924EXPORT_SYMBOL(new_inode);
 925
 926#ifdef CONFIG_DEBUG_LOCK_ALLOC
 927void lockdep_annotate_inode_mutex_key(struct inode *inode)
 928{
 929        if (S_ISDIR(inode->i_mode)) {
 930                struct file_system_type *type = inode->i_sb->s_type;
 931
 932                /* Set new key only if filesystem hasn't already changed it */
 933                if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
 934                        /*
 935                         * ensure nobody is actually holding i_mutex
 936                         */
 937                        // mutex_destroy(&inode->i_mutex);
 938                        init_rwsem(&inode->i_rwsem);
 939                        lockdep_set_class(&inode->i_rwsem,
 940                                          &type->i_mutex_dir_key);
 941                }
 942        }
 943}
 944EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
 945#endif
 946
 947/**
 948 * unlock_new_inode - clear the I_NEW state and wake up any waiters
 949 * @inode:      new inode to unlock
 950 *
 951 * Called when the inode is fully initialised to clear the new state of the
 952 * inode and wake up anyone waiting for the inode to finish initialisation.
 953 */
 954void unlock_new_inode(struct inode *inode)
 955{
 956        lockdep_annotate_inode_mutex_key(inode);
 957        spin_lock(&inode->i_lock);
 958        WARN_ON(!(inode->i_state & I_NEW));
 959        inode->i_state &= ~I_NEW;
 960        smp_mb();
 961        wake_up_bit(&inode->i_state, __I_NEW);
 962        spin_unlock(&inode->i_lock);
 963}
 964EXPORT_SYMBOL(unlock_new_inode);
 965
 966/**
 967 * lock_two_nondirectories - take two i_mutexes on non-directory objects
 968 *
 969 * Lock any non-NULL argument that is not a directory.
 970 * Zero, one or two objects may be locked by this function.
 971 *
 972 * @inode1: first inode to lock
 973 * @inode2: second inode to lock
 974 */
 975void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
 976{
 977        if (inode1 > inode2)
 978                swap(inode1, inode2);
 979
 980        if (inode1 && !S_ISDIR(inode1->i_mode))
 981                inode_lock(inode1);
 982        if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
 983                inode_lock_nested(inode2, I_MUTEX_NONDIR2);
 984}
 985EXPORT_SYMBOL(lock_two_nondirectories);
 986
 987/**
 988 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
 989 * @inode1: first inode to unlock
 990 * @inode2: second inode to unlock
 991 */
 992void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
 993{
 994        if (inode1 && !S_ISDIR(inode1->i_mode))
 995                inode_unlock(inode1);
 996        if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
 997                inode_unlock(inode2);
 998}
 999EXPORT_SYMBOL(unlock_two_nondirectories);
1000
1001/**
1002 * iget5_locked - obtain an inode from a mounted file system
1003 * @sb:         super block of file system
1004 * @hashval:    hash value (usually inode number) to get
1005 * @test:       callback used for comparisons between inodes
1006 * @set:        callback used to initialize a new struct inode
1007 * @data:       opaque data pointer to pass to @test and @set
1008 *
1009 * Search for the inode specified by @hashval and @data in the inode cache,
1010 * and if present it is return it with an increased reference count. This is
1011 * a generalized version of iget_locked() for file systems where the inode
1012 * number is not sufficient for unique identification of an inode.
1013 *
1014 * If the inode is not in cache, allocate a new inode and return it locked,
1015 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1016 * before unlocking it via unlock_new_inode().
1017 *
1018 * Note both @test and @set are called with the inode_hash_lock held, so can't
1019 * sleep.
1020 */
1021struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1022                int (*test)(struct inode *, void *),
1023                int (*set)(struct inode *, void *), void *data)
1024{
1025        struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1026        struct inode *inode;
1027again:
1028        spin_lock(&inode_hash_lock);
1029        inode = find_inode(sb, head, test, data);
1030        spin_unlock(&inode_hash_lock);
1031
1032        if (inode) {
1033                wait_on_inode(inode);
1034                if (unlikely(inode_unhashed(inode))) {
1035                        iput(inode);
1036                        goto again;
1037                }
1038                return inode;
1039        }
1040
1041        inode = alloc_inode(sb);
1042        if (inode) {
1043                struct inode *old;
1044
1045                spin_lock(&inode_hash_lock);
1046                /* We released the lock, so.. */
1047                old = find_inode(sb, head, test, data);
1048                if (!old) {
1049                        if (set(inode, data))
1050                                goto set_failed;
1051
1052                        spin_lock(&inode->i_lock);
1053                        inode->i_state = I_NEW;
1054                        hlist_add_head(&inode->i_hash, head);
1055                        spin_unlock(&inode->i_lock);
1056                        inode_sb_list_add(inode);
1057                        spin_unlock(&inode_hash_lock);
1058
1059                        /* Return the locked inode with I_NEW set, the
1060                         * caller is responsible for filling in the contents
1061                         */
1062                        return inode;
1063                }
1064
1065                /*
1066                 * Uhhuh, somebody else created the same inode under
1067                 * us. Use the old inode instead of the one we just
1068                 * allocated.
1069                 */
1070                spin_unlock(&inode_hash_lock);
1071                destroy_inode(inode);
1072                inode = old;
1073                wait_on_inode(inode);
1074                if (unlikely(inode_unhashed(inode))) {
1075                        iput(inode);
1076                        goto again;
1077                }
1078        }
1079        return inode;
1080
1081set_failed:
1082        spin_unlock(&inode_hash_lock);
1083        destroy_inode(inode);
1084        return NULL;
1085}
1086EXPORT_SYMBOL(iget5_locked);
1087
1088/**
1089 * iget_locked - obtain an inode from a mounted file system
1090 * @sb:         super block of file system
1091 * @ino:        inode number to get
1092 *
1093 * Search for the inode specified by @ino in the inode cache and if present
1094 * return it with an increased reference count. This is for file systems
1095 * where the inode number is sufficient for unique identification of an inode.
1096 *
1097 * If the inode is not in cache, allocate a new inode and return it locked,
1098 * hashed, and with the I_NEW flag set.  The file system gets to fill it in
1099 * before unlocking it via unlock_new_inode().
1100 */
1101struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1102{
1103        struct hlist_head *head = inode_hashtable + hash(sb, ino);
1104        struct inode *inode;
1105again:
1106        spin_lock(&inode_hash_lock);
1107        inode = find_inode_fast(sb, head, ino);
1108        spin_unlock(&inode_hash_lock);
1109        if (inode) {
1110                wait_on_inode(inode);
1111                if (unlikely(inode_unhashed(inode))) {
1112                        iput(inode);
1113                        goto again;
1114                }
1115                return inode;
1116        }
1117
1118        inode = alloc_inode(sb);
1119        if (inode) {
1120                struct inode *old;
1121
1122                spin_lock(&inode_hash_lock);
1123                /* We released the lock, so.. */
1124                old = find_inode_fast(sb, head, ino);
1125                if (!old) {
1126                        inode->i_ino = ino;
1127                        spin_lock(&inode->i_lock);
1128                        inode->i_state = I_NEW;
1129                        hlist_add_head(&inode->i_hash, head);
1130                        spin_unlock(&inode->i_lock);
1131                        inode_sb_list_add(inode);
1132                        spin_unlock(&inode_hash_lock);
1133
1134                        /* Return the locked inode with I_NEW set, the
1135                         * caller is responsible for filling in the contents
1136                         */
1137                        return inode;
1138                }
1139
1140                /*
1141                 * Uhhuh, somebody else created the same inode under
1142                 * us. Use the old inode instead of the one we just
1143                 * allocated.
1144                 */
1145                spin_unlock(&inode_hash_lock);
1146                destroy_inode(inode);
1147                inode = old;
1148                wait_on_inode(inode);
1149                if (unlikely(inode_unhashed(inode))) {
1150                        iput(inode);
1151                        goto again;
1152                }
1153        }
1154        return inode;
1155}
1156EXPORT_SYMBOL(iget_locked);
1157
1158/*
1159 * search the inode cache for a matching inode number.
1160 * If we find one, then the inode number we are trying to
1161 * allocate is not unique and so we should not use it.
1162 *
1163 * Returns 1 if the inode number is unique, 0 if it is not.
1164 */
1165static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1166{
1167        struct hlist_head *b = inode_hashtable + hash(sb, ino);
1168        struct inode *inode;
1169
1170        spin_lock(&inode_hash_lock);
1171        hlist_for_each_entry(inode, b, i_hash) {
1172                if (inode->i_ino == ino && inode->i_sb == sb) {
1173                        spin_unlock(&inode_hash_lock);
1174                        return 0;
1175                }
1176        }
1177        spin_unlock(&inode_hash_lock);
1178
1179        return 1;
1180}
1181
1182/**
1183 *      iunique - get a unique inode number
1184 *      @sb: superblock
1185 *      @max_reserved: highest reserved inode number
1186 *
1187 *      Obtain an inode number that is unique on the system for a given
1188 *      superblock. This is used by file systems that have no natural
1189 *      permanent inode numbering system. An inode number is returned that
1190 *      is higher than the reserved limit but unique.
1191 *
1192 *      BUGS:
1193 *      With a large number of inodes live on the file system this function
1194 *      currently becomes quite slow.
1195 */
1196ino_t iunique(struct super_block *sb, ino_t max_reserved)
1197{
1198        /*
1199         * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1200         * error if st_ino won't fit in target struct field. Use 32bit counter
1201         * here to attempt to avoid that.
1202         */
1203        static DEFINE_SPINLOCK(iunique_lock);
1204        static unsigned int counter;
1205        ino_t res;
1206
1207        spin_lock(&iunique_lock);
1208        do {
1209                if (counter <= max_reserved)
1210                        counter = max_reserved + 1;
1211                res = counter++;
1212        } while (!test_inode_iunique(sb, res));
1213        spin_unlock(&iunique_lock);
1214
1215        return res;
1216}
1217EXPORT_SYMBOL(iunique);
1218
1219struct inode *igrab(struct inode *inode)
1220{
1221        spin_lock(&inode->i_lock);
1222        if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1223                __iget(inode);
1224                spin_unlock(&inode->i_lock);
1225        } else {
1226                spin_unlock(&inode->i_lock);
1227                /*
1228                 * Handle the case where s_op->clear_inode is not been
1229                 * called yet, and somebody is calling igrab
1230                 * while the inode is getting freed.
1231                 */
1232                inode = NULL;
1233        }
1234        return inode;
1235}
1236EXPORT_SYMBOL(igrab);
1237
1238/**
1239 * ilookup5_nowait - search for an inode in the inode cache
1240 * @sb:         super block of file system to search
1241 * @hashval:    hash value (usually inode number) to search for
1242 * @test:       callback used for comparisons between inodes
1243 * @data:       opaque data pointer to pass to @test
1244 *
1245 * Search for the inode specified by @hashval and @data in the inode cache.
1246 * If the inode is in the cache, the inode is returned with an incremented
1247 * reference count.
1248 *
1249 * Note: I_NEW is not waited upon so you have to be very careful what you do
1250 * with the returned inode.  You probably should be using ilookup5() instead.
1251 *
1252 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1253 */
1254struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1255                int (*test)(struct inode *, void *), void *data)
1256{
1257        struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1258        struct inode *inode;
1259
1260        spin_lock(&inode_hash_lock);
1261        inode = find_inode(sb, head, test, data);
1262        spin_unlock(&inode_hash_lock);
1263
1264        return inode;
1265}
1266EXPORT_SYMBOL(ilookup5_nowait);
1267
1268/**
1269 * ilookup5 - search for an inode in the inode cache
1270 * @sb:         super block of file system to search
1271 * @hashval:    hash value (usually inode number) to search for
1272 * @test:       callback used for comparisons between inodes
1273 * @data:       opaque data pointer to pass to @test
1274 *
1275 * Search for the inode specified by @hashval and @data in the inode cache,
1276 * and if the inode is in the cache, return the inode with an incremented
1277 * reference count.  Waits on I_NEW before returning the inode.
1278 * returned with an incremented reference count.
1279 *
1280 * This is a generalized version of ilookup() for file systems where the
1281 * inode number is not sufficient for unique identification of an inode.
1282 *
1283 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1284 */
1285struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1286                int (*test)(struct inode *, void *), void *data)
1287{
1288        struct inode *inode;
1289again:
1290        inode = ilookup5_nowait(sb, hashval, test, data);
1291        if (inode) {
1292                wait_on_inode(inode);
1293                if (unlikely(inode_unhashed(inode))) {
1294                        iput(inode);
1295                        goto again;
1296                }
1297        }
1298        return inode;
1299}
1300EXPORT_SYMBOL(ilookup5);
1301
1302/**
1303 * ilookup - search for an inode in the inode cache
1304 * @sb:         super block of file system to search
1305 * @ino:        inode number to search for
1306 *
1307 * Search for the inode @ino in the inode cache, and if the inode is in the
1308 * cache, the inode is returned with an incremented reference count.
1309 */
1310struct inode *ilookup(struct super_block *sb, unsigned long ino)
1311{
1312        struct hlist_head *head = inode_hashtable + hash(sb, ino);
1313        struct inode *inode;
1314again:
1315        spin_lock(&inode_hash_lock);
1316        inode = find_inode_fast(sb, head, ino);
1317        spin_unlock(&inode_hash_lock);
1318
1319        if (inode) {
1320                wait_on_inode(inode);
1321                if (unlikely(inode_unhashed(inode))) {
1322                        iput(inode);
1323                        goto again;
1324                }
1325        }
1326        return inode;
1327}
1328EXPORT_SYMBOL(ilookup);
1329
1330/**
1331 * find_inode_nowait - find an inode in the inode cache
1332 * @sb:         super block of file system to search
1333 * @hashval:    hash value (usually inode number) to search for
1334 * @match:      callback used for comparisons between inodes
1335 * @data:       opaque data pointer to pass to @match
1336 *
1337 * Search for the inode specified by @hashval and @data in the inode
1338 * cache, where the helper function @match will return 0 if the inode
1339 * does not match, 1 if the inode does match, and -1 if the search
1340 * should be stopped.  The @match function must be responsible for
1341 * taking the i_lock spin_lock and checking i_state for an inode being
1342 * freed or being initialized, and incrementing the reference count
1343 * before returning 1.  It also must not sleep, since it is called with
1344 * the inode_hash_lock spinlock held.
1345 *
1346 * This is a even more generalized version of ilookup5() when the
1347 * function must never block --- find_inode() can block in
1348 * __wait_on_freeing_inode() --- or when the caller can not increment
1349 * the reference count because the resulting iput() might cause an
1350 * inode eviction.  The tradeoff is that the @match funtion must be
1351 * very carefully implemented.
1352 */
1353struct inode *find_inode_nowait(struct super_block *sb,
1354                                unsigned long hashval,
1355                                int (*match)(struct inode *, unsigned long,
1356                                             void *),
1357                                void *data)
1358{
1359        struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1360        struct inode *inode, *ret_inode = NULL;
1361        int mval;
1362
1363        spin_lock(&inode_hash_lock);
1364        hlist_for_each_entry(inode, head, i_hash) {
1365                if (inode->i_sb != sb)
1366                        continue;
1367                mval = match(inode, hashval, data);
1368                if (mval == 0)
1369                        continue;
1370                if (mval == 1)
1371                        ret_inode = inode;
1372                goto out;
1373        }
1374out:
1375        spin_unlock(&inode_hash_lock);
1376        return ret_inode;
1377}
1378EXPORT_SYMBOL(find_inode_nowait);
1379
1380int insert_inode_locked(struct inode *inode)
1381{
1382        struct super_block *sb = inode->i_sb;
1383        ino_t ino = inode->i_ino;
1384        struct hlist_head *head = inode_hashtable + hash(sb, ino);
1385
1386        while (1) {
1387                struct inode *old = NULL;
1388                spin_lock(&inode_hash_lock);
1389                hlist_for_each_entry(old, head, i_hash) {
1390                        if (old->i_ino != ino)
1391                                continue;
1392                        if (old->i_sb != sb)
1393                                continue;
1394                        spin_lock(&old->i_lock);
1395                        if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1396                                spin_unlock(&old->i_lock);
1397                                continue;
1398                        }
1399                        break;
1400                }
1401                if (likely(!old)) {
1402                        spin_lock(&inode->i_lock);
1403                        inode->i_state |= I_NEW;
1404                        hlist_add_head(&inode->i_hash, head);
1405                        spin_unlock(&inode->i_lock);
1406                        spin_unlock(&inode_hash_lock);
1407                        return 0;
1408                }
1409                __iget(old);
1410                spin_unlock(&old->i_lock);
1411                spin_unlock(&inode_hash_lock);
1412                wait_on_inode(old);
1413                if (unlikely(!inode_unhashed(old))) {
1414                        iput(old);
1415                        return -EBUSY;
1416                }
1417                iput(old);
1418        }
1419}
1420EXPORT_SYMBOL(insert_inode_locked);
1421
1422int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1423                int (*test)(struct inode *, void *), void *data)
1424{
1425        struct super_block *sb = inode->i_sb;
1426        struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1427
1428        while (1) {
1429                struct inode *old = NULL;
1430
1431                spin_lock(&inode_hash_lock);
1432                hlist_for_each_entry(old, head, i_hash) {
1433                        if (old->i_sb != sb)
1434                                continue;
1435                        if (!test(old, data))
1436                                continue;
1437                        spin_lock(&old->i_lock);
1438                        if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1439                                spin_unlock(&old->i_lock);
1440                                continue;
1441                        }
1442                        break;
1443                }
1444                if (likely(!old)) {
1445                        spin_lock(&inode->i_lock);
1446                        inode->i_state |= I_NEW;
1447                        hlist_add_head(&inode->i_hash, head);
1448                        spin_unlock(&inode->i_lock);
1449                        spin_unlock(&inode_hash_lock);
1450                        return 0;
1451                }
1452                __iget(old);
1453                spin_unlock(&old->i_lock);
1454                spin_unlock(&inode_hash_lock);
1455                wait_on_inode(old);
1456                if (unlikely(!inode_unhashed(old))) {
1457                        iput(old);
1458                        return -EBUSY;
1459                }
1460                iput(old);
1461        }
1462}
1463EXPORT_SYMBOL(insert_inode_locked4);
1464
1465
1466int generic_delete_inode(struct inode *inode)
1467{
1468        return 1;
1469}
1470EXPORT_SYMBOL(generic_delete_inode);
1471
1472/*
1473 * Called when we're dropping the last reference
1474 * to an inode.
1475 *
1476 * Call the FS "drop_inode()" function, defaulting to
1477 * the legacy UNIX filesystem behaviour.  If it tells
1478 * us to evict inode, do so.  Otherwise, retain inode
1479 * in cache if fs is alive, sync and evict if fs is
1480 * shutting down.
1481 */
1482static void iput_final(struct inode *inode)
1483{
1484        struct super_block *sb = inode->i_sb;
1485        const struct super_operations *op = inode->i_sb->s_op;
1486        int drop;
1487
1488        WARN_ON(inode->i_state & I_NEW);
1489
1490        if (op->drop_inode)
1491                drop = op->drop_inode(inode);
1492        else
1493                drop = generic_drop_inode(inode);
1494
1495        if (!drop && (sb->s_flags & MS_ACTIVE)) {
1496                inode_add_lru(inode);
1497                spin_unlock(&inode->i_lock);
1498                return;
1499        }
1500
1501        if (!drop) {
1502                inode->i_state |= I_WILL_FREE;
1503                spin_unlock(&inode->i_lock);
1504                write_inode_now(inode, 1);
1505                spin_lock(&inode->i_lock);
1506                WARN_ON(inode->i_state & I_NEW);
1507                inode->i_state &= ~I_WILL_FREE;
1508        }
1509
1510        inode->i_state |= I_FREEING;
1511        if (!list_empty(&inode->i_lru))
1512                inode_lru_list_del(inode);
1513        spin_unlock(&inode->i_lock);
1514
1515        evict(inode);
1516}
1517
1518/**
1519 *      iput    - put an inode
1520 *      @inode: inode to put
1521 *
1522 *      Puts an inode, dropping its usage count. If the inode use count hits
1523 *      zero, the inode is then freed and may also be destroyed.
1524 *
1525 *      Consequently, iput() can sleep.
1526 */
1527void iput(struct inode *inode)
1528{
1529        if (!inode)
1530                return;
1531        BUG_ON(inode->i_state & I_CLEAR);
1532retry:
1533        if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1534                if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1535                        atomic_inc(&inode->i_count);
1536                        inode->i_state &= ~I_DIRTY_TIME;
1537                        spin_unlock(&inode->i_lock);
1538                        trace_writeback_lazytime_iput(inode);
1539                        mark_inode_dirty_sync(inode);
1540                        goto retry;
1541                }
1542                iput_final(inode);
1543        }
1544}
1545EXPORT_SYMBOL(iput);
1546
1547/**
1548 *      bmap    - find a block number in a file
1549 *      @inode: inode of file
1550 *      @block: block to find
1551 *
1552 *      Returns the block number on the device holding the inode that
1553 *      is the disk block number for the block of the file requested.
1554 *      That is, asked for block 4 of inode 1 the function will return the
1555 *      disk block relative to the disk start that holds that block of the
1556 *      file.
1557 */
1558sector_t bmap(struct inode *inode, sector_t block)
1559{
1560        sector_t res = 0;
1561        if (inode->i_mapping->a_ops->bmap)
1562                res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1563        return res;
1564}
1565EXPORT_SYMBOL(bmap);
1566
1567/*
1568 * Update times in overlayed inode from underlying real inode
1569 */
1570static void update_ovl_inode_times(struct dentry *dentry, struct inode *inode,
1571                               bool rcu)
1572{
1573        struct dentry *upperdentry;
1574
1575        /*
1576         * Nothing to do if in rcu or if non-overlayfs
1577         */
1578        if (rcu || likely(!(dentry->d_flags & DCACHE_OP_REAL)))
1579                return;
1580
1581        upperdentry = d_real(dentry, NULL, 0, D_REAL_UPPER);
1582
1583        /*
1584         * If file is on lower then we can't update atime, so no worries about
1585         * stale mtime/ctime.
1586         */
1587        if (upperdentry) {
1588                struct inode *realinode = d_inode(upperdentry);
1589
1590                if ((!timespec_equal(&inode->i_mtime, &realinode->i_mtime) ||
1591                     !timespec_equal(&inode->i_ctime, &realinode->i_ctime))) {
1592                        inode->i_mtime = realinode->i_mtime;
1593                        inode->i_ctime = realinode->i_ctime;
1594                }
1595        }
1596}
1597
1598/*
1599 * With relative atime, only update atime if the previous atime is
1600 * earlier than either the ctime or mtime or if at least a day has
1601 * passed since the last atime update.
1602 */
1603static int relatime_need_update(const struct path *path, struct inode *inode,
1604                                struct timespec now, bool rcu)
1605{
1606
1607        if (!(path->mnt->mnt_flags & MNT_RELATIME))
1608                return 1;
1609
1610        update_ovl_inode_times(path->dentry, inode, rcu);
1611        /*
1612         * Is mtime younger than atime? If yes, update atime:
1613         */
1614        if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1615                return 1;
1616        /*
1617         * Is ctime younger than atime? If yes, update atime:
1618         */
1619        if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1620                return 1;
1621
1622        /*
1623         * Is the previous atime value older than a day? If yes,
1624         * update atime:
1625         */
1626        if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1627                return 1;
1628        /*
1629         * Good, we can skip the atime update:
1630         */
1631        return 0;
1632}
1633
1634int generic_update_time(struct inode *inode, struct timespec *time, int flags)
1635{
1636        int iflags = I_DIRTY_TIME;
1637
1638        if (flags & S_ATIME)
1639                inode->i_atime = *time;
1640        if (flags & S_VERSION)
1641                inode_inc_iversion(inode);
1642        if (flags & S_CTIME)
1643                inode->i_ctime = *time;
1644        if (flags & S_MTIME)
1645                inode->i_mtime = *time;
1646
1647        if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION))
1648                iflags |= I_DIRTY_SYNC;
1649        __mark_inode_dirty(inode, iflags);
1650        return 0;
1651}
1652EXPORT_SYMBOL(generic_update_time);
1653
1654/*
1655 * This does the actual work of updating an inodes time or version.  Must have
1656 * had called mnt_want_write() before calling this.
1657 */
1658static int update_time(struct inode *inode, struct timespec *time, int flags)
1659{
1660        int (*update_time)(struct inode *, struct timespec *, int);
1661
1662        update_time = inode->i_op->update_time ? inode->i_op->update_time :
1663                generic_update_time;
1664
1665        return update_time(inode, time, flags);
1666}
1667
1668/**
1669 *      touch_atime     -       update the access time
1670 *      @path: the &struct path to update
1671 *      @inode: inode to update
1672 *
1673 *      Update the accessed time on an inode and mark it for writeback.
1674 *      This function automatically handles read only file systems and media,
1675 *      as well as the "noatime" flag and inode specific "noatime" markers.
1676 */
1677bool __atime_needs_update(const struct path *path, struct inode *inode,
1678                          bool rcu)
1679{
1680        struct vfsmount *mnt = path->mnt;
1681        struct timespec now;
1682
1683        if (inode->i_flags & S_NOATIME)
1684                return false;
1685
1686        /* Atime updates will likely cause i_uid and i_gid to be written
1687         * back improprely if their true value is unknown to the vfs.
1688         */
1689        if (HAS_UNMAPPED_ID(inode))
1690                return false;
1691
1692        if (IS_NOATIME(inode))
1693                return false;
1694        if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1695                return false;
1696
1697        if (mnt->mnt_flags & MNT_NOATIME)
1698                return false;
1699        if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1700                return false;
1701
1702        now = current_time(inode);
1703
1704        if (!relatime_need_update(path, inode, now, rcu))
1705                return false;
1706
1707        if (timespec_equal(&inode->i_atime, &now))
1708                return false;
1709
1710        return true;
1711}
1712
1713void touch_atime(const struct path *path)
1714{
1715        struct vfsmount *mnt = path->mnt;
1716        struct inode *inode = d_inode(path->dentry);
1717        struct timespec now;
1718
1719        if (!__atime_needs_update(path, inode, false))
1720                return;
1721
1722        if (!sb_start_write_trylock(inode->i_sb))
1723                return;
1724
1725        if (__mnt_want_write(mnt) != 0)
1726                goto skip_update;
1727        /*
1728         * File systems can error out when updating inodes if they need to
1729         * allocate new space to modify an inode (such is the case for
1730         * Btrfs), but since we touch atime while walking down the path we
1731         * really don't care if we failed to update the atime of the file,
1732         * so just ignore the return value.
1733         * We may also fail on filesystems that have the ability to make parts
1734         * of the fs read only, e.g. subvolumes in Btrfs.
1735         */
1736        now = current_time(inode);
1737        update_time(inode, &now, S_ATIME);
1738        __mnt_drop_write(mnt);
1739skip_update:
1740        sb_end_write(inode->i_sb);
1741}
1742EXPORT_SYMBOL(touch_atime);
1743
1744/*
1745 * The logic we want is
1746 *
1747 *      if suid or (sgid and xgrp)
1748 *              remove privs
1749 */
1750int should_remove_suid(struct dentry *dentry)
1751{
1752        umode_t mode = d_inode(dentry)->i_mode;
1753        int kill = 0;
1754
1755        /* suid always must be killed */
1756        if (unlikely(mode & S_ISUID))
1757                kill = ATTR_KILL_SUID;
1758
1759        /*
1760         * sgid without any exec bits is just a mandatory locking mark; leave
1761         * it alone.  If some exec bits are set, it's a real sgid; kill it.
1762         */
1763        if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1764                kill |= ATTR_KILL_SGID;
1765
1766        if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1767                return kill;
1768
1769        return 0;
1770}
1771EXPORT_SYMBOL(should_remove_suid);
1772
1773/*
1774 * Return mask of changes for notify_change() that need to be done as a
1775 * response to write or truncate. Return 0 if nothing has to be changed.
1776 * Negative value on error (change should be denied).
1777 */
1778int dentry_needs_remove_privs(struct dentry *dentry)
1779{
1780        struct inode *inode = d_inode(dentry);
1781        int mask = 0;
1782        int ret;
1783
1784        if (IS_NOSEC(inode))
1785                return 0;
1786
1787        mask = should_remove_suid(dentry);
1788        ret = security_inode_need_killpriv(dentry);
1789        if (ret < 0)
1790                return ret;
1791        if (ret)
1792                mask |= ATTR_KILL_PRIV;
1793        return mask;
1794}
1795
1796static int __remove_privs(struct dentry *dentry, int kill)
1797{
1798        struct iattr newattrs;
1799
1800        newattrs.ia_valid = ATTR_FORCE | kill;
1801        /*
1802         * Note we call this on write, so notify_change will not
1803         * encounter any conflicting delegations:
1804         */
1805        return notify_change(dentry, &newattrs, NULL);
1806}
1807
1808/*
1809 * Remove special file priviledges (suid, capabilities) when file is written
1810 * to or truncated.
1811 */
1812int file_remove_privs(struct file *file)
1813{
1814        struct dentry *dentry = file_dentry(file);
1815        struct inode *inode = file_inode(file);
1816        int kill;
1817        int error = 0;
1818
1819        /* Fast path for nothing security related */
1820        if (IS_NOSEC(inode))
1821                return 0;
1822
1823        kill = dentry_needs_remove_privs(dentry);
1824        if (kill < 0)
1825                return kill;
1826        if (kill)
1827                error = __remove_privs(dentry, kill);
1828        if (!error)
1829                inode_has_no_xattr(inode);
1830
1831        return error;
1832}
1833EXPORT_SYMBOL(file_remove_privs);
1834
1835/**
1836 *      file_update_time        -       update mtime and ctime time
1837 *      @file: file accessed
1838 *
1839 *      Update the mtime and ctime members of an inode and mark the inode
1840 *      for writeback.  Note that this function is meant exclusively for
1841 *      usage in the file write path of filesystems, and filesystems may
1842 *      choose to explicitly ignore update via this function with the
1843 *      S_NOCMTIME inode flag, e.g. for network filesystem where these
1844 *      timestamps are handled by the server.  This can return an error for
1845 *      file systems who need to allocate space in order to update an inode.
1846 */
1847
1848int file_update_time(struct file *file)
1849{
1850        struct inode *inode = file_inode(file);
1851        struct timespec now;
1852        int sync_it = 0;
1853        int ret;
1854
1855        /* First try to exhaust all avenues to not sync */
1856        if (IS_NOCMTIME(inode))
1857                return 0;
1858
1859        now = current_time(inode);
1860        if (!timespec_equal(&inode->i_mtime, &now))
1861                sync_it = S_MTIME;
1862
1863        if (!timespec_equal(&inode->i_ctime, &now))
1864                sync_it |= S_CTIME;
1865
1866        if (IS_I_VERSION(inode))
1867                sync_it |= S_VERSION;
1868
1869        if (!sync_it)
1870                return 0;
1871
1872        /* Finally allowed to write? Takes lock. */
1873        if (__mnt_want_write_file(file))
1874                return 0;
1875
1876        ret = update_time(inode, &now, sync_it);
1877        __mnt_drop_write_file(file);
1878
1879        return ret;
1880}
1881EXPORT_SYMBOL(file_update_time);
1882
1883int inode_needs_sync(struct inode *inode)
1884{
1885        if (IS_SYNC(inode))
1886                return 1;
1887        if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1888                return 1;
1889        return 0;
1890}
1891EXPORT_SYMBOL(inode_needs_sync);
1892
1893/*
1894 * If we try to find an inode in the inode hash while it is being
1895 * deleted, we have to wait until the filesystem completes its
1896 * deletion before reporting that it isn't found.  This function waits
1897 * until the deletion _might_ have completed.  Callers are responsible
1898 * to recheck inode state.
1899 *
1900 * It doesn't matter if I_NEW is not set initially, a call to
1901 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1902 * will DTRT.
1903 */
1904static void __wait_on_freeing_inode(struct inode *inode)
1905{
1906        wait_queue_head_t *wq;
1907        DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1908        wq = bit_waitqueue(&inode->i_state, __I_NEW);
1909        prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
1910        spin_unlock(&inode->i_lock);
1911        spin_unlock(&inode_hash_lock);
1912        schedule();
1913        finish_wait(wq, &wait.wq_entry);
1914        spin_lock(&inode_hash_lock);
1915}
1916
1917static __initdata unsigned long ihash_entries;
1918static int __init set_ihash_entries(char *str)
1919{
1920        if (!str)
1921                return 0;
1922        ihash_entries = simple_strtoul(str, &str, 0);
1923        return 1;
1924}
1925__setup("ihash_entries=", set_ihash_entries);
1926
1927/*
1928 * Initialize the waitqueues and inode hash table.
1929 */
1930void __init inode_init_early(void)
1931{
1932        /* If hashes are distributed across NUMA nodes, defer
1933         * hash allocation until vmalloc space is available.
1934         */
1935        if (hashdist)
1936                return;
1937
1938        inode_hashtable =
1939                alloc_large_system_hash("Inode-cache",
1940                                        sizeof(struct hlist_head),
1941                                        ihash_entries,
1942                                        14,
1943                                        HASH_EARLY | HASH_ZERO,
1944                                        &i_hash_shift,
1945                                        &i_hash_mask,
1946                                        0,
1947                                        0);
1948}
1949
1950void __init inode_init(void)
1951{
1952        /* inode slab cache */
1953        inode_cachep = kmem_cache_create("inode_cache",
1954                                         sizeof(struct inode),
1955                                         0,
1956                                         (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1957                                         SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1958                                         init_once);
1959
1960        /* Hash may have been set up in inode_init_early */
1961        if (!hashdist)
1962                return;
1963
1964        inode_hashtable =
1965                alloc_large_system_hash("Inode-cache",
1966                                        sizeof(struct hlist_head),
1967                                        ihash_entries,
1968                                        14,
1969                                        HASH_ZERO,
1970                                        &i_hash_shift,
1971                                        &i_hash_mask,
1972                                        0,
1973                                        0);
1974}
1975
1976void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1977{
1978        inode->i_mode = mode;
1979        if (S_ISCHR(mode)) {
1980                inode->i_fop = &def_chr_fops;
1981                inode->i_rdev = rdev;
1982        } else if (S_ISBLK(mode)) {
1983                inode->i_fop = &def_blk_fops;
1984                inode->i_rdev = rdev;
1985        } else if (S_ISFIFO(mode))
1986                inode->i_fop = &pipefifo_fops;
1987        else if (S_ISSOCK(mode))
1988                ;       /* leave it no_open_fops */
1989        else
1990                printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1991                                  " inode %s:%lu\n", mode, inode->i_sb->s_id,
1992                                  inode->i_ino);
1993}
1994EXPORT_SYMBOL(init_special_inode);
1995
1996/**
1997 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
1998 * @inode: New inode
1999 * @dir: Directory inode
2000 * @mode: mode of the new inode
2001 */
2002void inode_init_owner(struct inode *inode, const struct inode *dir,
2003                        umode_t mode)
2004{
2005        inode->i_uid = current_fsuid();
2006        if (dir && dir->i_mode & S_ISGID) {
2007                inode->i_gid = dir->i_gid;
2008                if (S_ISDIR(mode))
2009                        mode |= S_ISGID;
2010        } else
2011                inode->i_gid = current_fsgid();
2012        inode->i_mode = mode;
2013}
2014EXPORT_SYMBOL(inode_init_owner);
2015
2016/**
2017 * inode_owner_or_capable - check current task permissions to inode
2018 * @inode: inode being checked
2019 *
2020 * Return true if current either has CAP_FOWNER in a namespace with the
2021 * inode owner uid mapped, or owns the file.
2022 */
2023bool inode_owner_or_capable(const struct inode *inode)
2024{
2025        struct user_namespace *ns;
2026
2027        if (uid_eq(current_fsuid(), inode->i_uid))
2028                return true;
2029
2030        ns = current_user_ns();
2031        if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER))
2032                return true;
2033        return false;
2034}
2035EXPORT_SYMBOL(inode_owner_or_capable);
2036
2037/*
2038 * Direct i/o helper functions
2039 */
2040static void __inode_dio_wait(struct inode *inode)
2041{
2042        wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
2043        DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
2044
2045        do {
2046                prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
2047                if (atomic_read(&inode->i_dio_count))
2048                        schedule();
2049        } while (atomic_read(&inode->i_dio_count));
2050        finish_wait(wq, &q.wq_entry);
2051}
2052
2053/**
2054 * inode_dio_wait - wait for outstanding DIO requests to finish
2055 * @inode: inode to wait for
2056 *
2057 * Waits for all pending direct I/O requests to finish so that we can
2058 * proceed with a truncate or equivalent operation.
2059 *
2060 * Must be called under a lock that serializes taking new references
2061 * to i_dio_count, usually by inode->i_mutex.
2062 */
2063void inode_dio_wait(struct inode *inode)
2064{
2065        if (atomic_read(&inode->i_dio_count))
2066                __inode_dio_wait(inode);
2067}
2068EXPORT_SYMBOL(inode_dio_wait);
2069
2070/*
2071 * inode_set_flags - atomically set some inode flags
2072 *
2073 * Note: the caller should be holding i_mutex, or else be sure that
2074 * they have exclusive access to the inode structure (i.e., while the
2075 * inode is being instantiated).  The reason for the cmpxchg() loop
2076 * --- which wouldn't be necessary if all code paths which modify
2077 * i_flags actually followed this rule, is that there is at least one
2078 * code path which doesn't today so we use cmpxchg() out of an abundance
2079 * of caution.
2080 *
2081 * In the long run, i_mutex is overkill, and we should probably look
2082 * at using the i_lock spinlock to protect i_flags, and then make sure
2083 * it is so documented in include/linux/fs.h and that all code follows
2084 * the locking convention!!
2085 */
2086void inode_set_flags(struct inode *inode, unsigned int flags,
2087                     unsigned int mask)
2088{
2089        unsigned int old_flags, new_flags;
2090
2091        WARN_ON_ONCE(flags & ~mask);
2092        do {
2093                old_flags = ACCESS_ONCE(inode->i_flags);
2094                new_flags = (old_flags & ~mask) | flags;
2095        } while (unlikely(cmpxchg(&inode->i_flags, old_flags,
2096                                  new_flags) != old_flags));
2097}
2098EXPORT_SYMBOL(inode_set_flags);
2099
2100void inode_nohighmem(struct inode *inode)
2101{
2102        mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2103}
2104EXPORT_SYMBOL(inode_nohighmem);
2105
2106/**
2107 * current_time - Return FS time
2108 * @inode: inode.
2109 *
2110 * Return the current time truncated to the time granularity supported by
2111 * the fs.
2112 *
2113 * Note that inode and inode->sb cannot be NULL.
2114 * Otherwise, the function warns and returns time without truncation.
2115 */
2116struct timespec current_time(struct inode *inode)
2117{
2118        struct timespec now = current_kernel_time();
2119
2120        if (unlikely(!inode->i_sb)) {
2121                WARN(1, "current_time() called with uninitialized super_block in the inode");
2122                return now;
2123        }
2124
2125        return timespec_trunc(now, inode->i_sb->s_time_gran);
2126}
2127EXPORT_SYMBOL(current_time);
2128