linux/fs/xfs/xfs_icache.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_format.h"
  21#include "xfs_log_format.h"
  22#include "xfs_trans_resv.h"
  23#include "xfs_sb.h"
  24#include "xfs_mount.h"
  25#include "xfs_inode.h"
  26#include "xfs_error.h"
  27#include "xfs_trans.h"
  28#include "xfs_trans_priv.h"
  29#include "xfs_inode_item.h"
  30#include "xfs_quota.h"
  31#include "xfs_trace.h"
  32#include "xfs_icache.h"
  33#include "xfs_bmap_util.h"
  34#include "xfs_dquot_item.h"
  35#include "xfs_dquot.h"
  36
  37#include <linux/kthread.h>
  38#include <linux/freezer.h>
  39
  40/*
  41 * Allocate and initialise an xfs_inode.
  42 */
  43struct xfs_inode *
  44xfs_inode_alloc(
  45        struct xfs_mount        *mp,
  46        xfs_ino_t               ino)
  47{
  48        struct xfs_inode        *ip;
  49
  50        /*
  51         * if this didn't occur in transactions, we could use
  52         * KM_MAYFAIL and return NULL here on ENOMEM. Set the
  53         * code up to do this anyway.
  54         */
  55        ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
  56        if (!ip)
  57                return NULL;
  58        if (inode_init_always(mp->m_super, VFS_I(ip))) {
  59                kmem_zone_free(xfs_inode_zone, ip);
  60                return NULL;
  61        }
  62
  63        /* VFS doesn't initialise i_mode! */
  64        VFS_I(ip)->i_mode = 0;
  65
  66        XFS_STATS_INC(mp, vn_active);
  67        ASSERT(atomic_read(&ip->i_pincount) == 0);
  68        ASSERT(!xfs_isiflocked(ip));
  69        ASSERT(ip->i_ino == 0);
  70
  71        mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
  72
  73        /* initialise the xfs inode */
  74        ip->i_ino = ino;
  75        ip->i_mount = mp;
  76        memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
  77        ip->i_afp = NULL;
  78        memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
  79        ip->i_flags = 0;
  80        ip->i_delayed_blks = 0;
  81        memset(&ip->i_d, 0, sizeof(ip->i_d));
  82
  83        return ip;
  84}
  85
  86STATIC void
  87xfs_inode_free_callback(
  88        struct rcu_head         *head)
  89{
  90        struct inode            *inode = container_of(head, struct inode, i_rcu);
  91        struct xfs_inode        *ip = XFS_I(inode);
  92
  93        switch (VFS_I(ip)->i_mode & S_IFMT) {
  94        case S_IFREG:
  95        case S_IFDIR:
  96        case S_IFLNK:
  97                xfs_idestroy_fork(ip, XFS_DATA_FORK);
  98                break;
  99        }
 100
 101        if (ip->i_afp)
 102                xfs_idestroy_fork(ip, XFS_ATTR_FORK);
 103
 104        if (ip->i_itemp) {
 105                ASSERT(!test_bit(XFS_LI_IN_AIL,
 106                                 &ip->i_itemp->ili_item.li_flags));
 107                xfs_inode_item_destroy(ip);
 108                ip->i_itemp = NULL;
 109        }
 110
 111        kmem_zone_free(xfs_inode_zone, ip);
 112}
 113
 114static void
 115__xfs_inode_free(
 116        struct xfs_inode        *ip)
 117{
 118        /* asserts to verify all state is correct here */
 119        ASSERT(atomic_read(&ip->i_pincount) == 0);
 120        XFS_STATS_DEC(ip->i_mount, vn_active);
 121
 122        call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 123}
 124
 125void
 126xfs_inode_free(
 127        struct xfs_inode        *ip)
 128{
 129        ASSERT(!xfs_isiflocked(ip));
 130
 131        /*
 132         * Because we use RCU freeing we need to ensure the inode always
 133         * appears to be reclaimed with an invalid inode number when in the
 134         * free state. The ip->i_flags_lock provides the barrier against lookup
 135         * races.
 136         */
 137        spin_lock(&ip->i_flags_lock);
 138        ip->i_flags = XFS_IRECLAIM;
 139        ip->i_ino = 0;
 140        spin_unlock(&ip->i_flags_lock);
 141
 142        __xfs_inode_free(ip);
 143}
 144
 145/*
 146 * Queue a new inode reclaim pass if there are reclaimable inodes and there
 147 * isn't a reclaim pass already in progress. By default it runs every 5s based
 148 * on the xfs periodic sync default of 30s. Perhaps this should have it's own
 149 * tunable, but that can be done if this method proves to be ineffective or too
 150 * aggressive.
 151 */
 152static void
 153xfs_reclaim_work_queue(
 154        struct xfs_mount        *mp)
 155{
 156
 157        rcu_read_lock();
 158        if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
 159                queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
 160                        msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
 161        }
 162        rcu_read_unlock();
 163}
 164
 165/*
 166 * This is a fast pass over the inode cache to try to get reclaim moving on as
 167 * many inodes as possible in a short period of time. It kicks itself every few
 168 * seconds, as well as being kicked by the inode cache shrinker when memory
 169 * goes low. It scans as quickly as possible avoiding locked inodes or those
 170 * already being flushed, and once done schedules a future pass.
 171 */
 172void
 173xfs_reclaim_worker(
 174        struct work_struct *work)
 175{
 176        struct xfs_mount *mp = container_of(to_delayed_work(work),
 177                                        struct xfs_mount, m_reclaim_work);
 178
 179        xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
 180        xfs_reclaim_work_queue(mp);
 181}
 182
 183static void
 184xfs_perag_set_reclaim_tag(
 185        struct xfs_perag        *pag)
 186{
 187        struct xfs_mount        *mp = pag->pag_mount;
 188
 189        lockdep_assert_held(&pag->pag_ici_lock);
 190        if (pag->pag_ici_reclaimable++)
 191                return;
 192
 193        /* propagate the reclaim tag up into the perag radix tree */
 194        spin_lock(&mp->m_perag_lock);
 195        radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
 196                           XFS_ICI_RECLAIM_TAG);
 197        spin_unlock(&mp->m_perag_lock);
 198
 199        /* schedule periodic background inode reclaim */
 200        xfs_reclaim_work_queue(mp);
 201
 202        trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
 203}
 204
 205static void
 206xfs_perag_clear_reclaim_tag(
 207        struct xfs_perag        *pag)
 208{
 209        struct xfs_mount        *mp = pag->pag_mount;
 210
 211        lockdep_assert_held(&pag->pag_ici_lock);
 212        if (--pag->pag_ici_reclaimable)
 213                return;
 214
 215        /* clear the reclaim tag from the perag radix tree */
 216        spin_lock(&mp->m_perag_lock);
 217        radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
 218                             XFS_ICI_RECLAIM_TAG);
 219        spin_unlock(&mp->m_perag_lock);
 220        trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
 221}
 222
 223
 224/*
 225 * We set the inode flag atomically with the radix tree tag.
 226 * Once we get tag lookups on the radix tree, this inode flag
 227 * can go away.
 228 */
 229void
 230xfs_inode_set_reclaim_tag(
 231        struct xfs_inode        *ip)
 232{
 233        struct xfs_mount        *mp = ip->i_mount;
 234        struct xfs_perag        *pag;
 235
 236        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
 237        spin_lock(&pag->pag_ici_lock);
 238        spin_lock(&ip->i_flags_lock);
 239
 240        radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
 241                           XFS_ICI_RECLAIM_TAG);
 242        xfs_perag_set_reclaim_tag(pag);
 243        __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
 244
 245        spin_unlock(&ip->i_flags_lock);
 246        spin_unlock(&pag->pag_ici_lock);
 247        xfs_perag_put(pag);
 248}
 249
 250STATIC void
 251xfs_inode_clear_reclaim_tag(
 252        struct xfs_perag        *pag,
 253        xfs_ino_t               ino)
 254{
 255        radix_tree_tag_clear(&pag->pag_ici_root,
 256                             XFS_INO_TO_AGINO(pag->pag_mount, ino),
 257                             XFS_ICI_RECLAIM_TAG);
 258        xfs_perag_clear_reclaim_tag(pag);
 259}
 260
 261static void
 262xfs_inew_wait(
 263        struct xfs_inode        *ip)
 264{
 265        wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
 266        DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
 267
 268        do {
 269                prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
 270                if (!xfs_iflags_test(ip, XFS_INEW))
 271                        break;
 272                schedule();
 273        } while (true);
 274        finish_wait(wq, &wait.wait);
 275}
 276
 277/*
 278 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
 279 * part of the structure. This is made more complex by the fact we store
 280 * information about the on-disk values in the VFS inode and so we can't just
 281 * overwrite the values unconditionally. Hence we save the parameters we
 282 * need to retain across reinitialisation, and rewrite them into the VFS inode
 283 * after reinitialisation even if it fails.
 284 */
 285static int
 286xfs_reinit_inode(
 287        struct xfs_mount        *mp,
 288        struct inode            *inode)
 289{
 290        int             error;
 291        uint32_t        nlink = inode->i_nlink;
 292        uint32_t        generation = inode->i_generation;
 293        uint64_t        version = inode->i_version;
 294        umode_t         mode = inode->i_mode;
 295        dev_t           dev = inode->i_rdev;
 296
 297        error = inode_init_always(mp->m_super, inode);
 298
 299        set_nlink(inode, nlink);
 300        inode->i_generation = generation;
 301        inode->i_version = version;
 302        inode->i_mode = mode;
 303        inode->i_rdev = dev;
 304        return error;
 305}
 306
 307/*
 308 * If we are allocating a new inode, then check what was returned is
 309 * actually a free, empty inode. If we are not allocating an inode,
 310 * then check we didn't find a free inode.
 311 *
 312 * Returns:
 313 *      0               if the inode free state matches the lookup context
 314 *      -ENOENT         if the inode is free and we are not allocating
 315 *      -EFSCORRUPTED   if there is any state mismatch at all
 316 */
 317static int
 318xfs_iget_check_free_state(
 319        struct xfs_inode        *ip,
 320        int                     flags)
 321{
 322        if (flags & XFS_IGET_CREATE) {
 323                /* should be a free inode */
 324                if (VFS_I(ip)->i_mode != 0) {
 325                        xfs_warn(ip->i_mount,
 326"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
 327                                ip->i_ino, VFS_I(ip)->i_mode);
 328                        return -EFSCORRUPTED;
 329                }
 330
 331                if (ip->i_d.di_nblocks != 0) {
 332                        xfs_warn(ip->i_mount,
 333"Corruption detected! Free inode 0x%llx has blocks allocated!",
 334                                ip->i_ino);
 335                        return -EFSCORRUPTED;
 336                }
 337                return 0;
 338        }
 339
 340        /* should be an allocated inode */
 341        if (VFS_I(ip)->i_mode == 0)
 342                return -ENOENT;
 343
 344        return 0;
 345}
 346
 347/*
 348 * Check the validity of the inode we just found it the cache
 349 */
 350static int
 351xfs_iget_cache_hit(
 352        struct xfs_perag        *pag,
 353        struct xfs_inode        *ip,
 354        xfs_ino_t               ino,
 355        int                     flags,
 356        int                     lock_flags) __releases(RCU)
 357{
 358        struct inode            *inode = VFS_I(ip);
 359        struct xfs_mount        *mp = ip->i_mount;
 360        int                     error;
 361
 362        /*
 363         * check for re-use of an inode within an RCU grace period due to the
 364         * radix tree nodes not being updated yet. We monitor for this by
 365         * setting the inode number to zero before freeing the inode structure.
 366         * If the inode has been reallocated and set up, then the inode number
 367         * will not match, so check for that, too.
 368         */
 369        spin_lock(&ip->i_flags_lock);
 370        if (ip->i_ino != ino) {
 371                trace_xfs_iget_skip(ip);
 372                XFS_STATS_INC(mp, xs_ig_frecycle);
 373                error = -EAGAIN;
 374                goto out_error;
 375        }
 376
 377
 378        /*
 379         * If we are racing with another cache hit that is currently
 380         * instantiating this inode or currently recycling it out of
 381         * reclaimabe state, wait for the initialisation to complete
 382         * before continuing.
 383         *
 384         * XXX(hch): eventually we should do something equivalent to
 385         *           wait_on_inode to wait for these flags to be cleared
 386         *           instead of polling for it.
 387         */
 388        if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
 389                trace_xfs_iget_skip(ip);
 390                XFS_STATS_INC(mp, xs_ig_frecycle);
 391                error = -EAGAIN;
 392                goto out_error;
 393        }
 394
 395        /*
 396         * Check the inode free state is valid. This also detects lookup
 397         * racing with unlinks.
 398         */
 399        error = xfs_iget_check_free_state(ip, flags);
 400        if (error)
 401                goto out_error;
 402
 403        /*
 404         * If IRECLAIMABLE is set, we've torn down the VFS inode already.
 405         * Need to carefully get it back into useable state.
 406         */
 407        if (ip->i_flags & XFS_IRECLAIMABLE) {
 408                trace_xfs_iget_reclaim(ip);
 409
 410                if (flags & XFS_IGET_INCORE) {
 411                        error = -EAGAIN;
 412                        goto out_error;
 413                }
 414
 415                /*
 416                 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
 417                 * from stomping over us while we recycle the inode.  We can't
 418                 * clear the radix tree reclaimable tag yet as it requires
 419                 * pag_ici_lock to be held exclusive.
 420                 */
 421                ip->i_flags |= XFS_IRECLAIM;
 422
 423                spin_unlock(&ip->i_flags_lock);
 424                rcu_read_unlock();
 425
 426                error = xfs_reinit_inode(mp, inode);
 427                if (error) {
 428                        bool wake;
 429                        /*
 430                         * Re-initializing the inode failed, and we are in deep
 431                         * trouble.  Try to re-add it to the reclaim list.
 432                         */
 433                        rcu_read_lock();
 434                        spin_lock(&ip->i_flags_lock);
 435                        wake = !!__xfs_iflags_test(ip, XFS_INEW);
 436                        ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
 437                        if (wake)
 438                                wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
 439                        ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
 440                        trace_xfs_iget_reclaim_fail(ip);
 441                        goto out_error;
 442                }
 443
 444                spin_lock(&pag->pag_ici_lock);
 445                spin_lock(&ip->i_flags_lock);
 446
 447                /*
 448                 * Clear the per-lifetime state in the inode as we are now
 449                 * effectively a new inode and need to return to the initial
 450                 * state before reuse occurs.
 451                 */
 452                ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
 453                ip->i_flags |= XFS_INEW;
 454                xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
 455                inode->i_state = I_NEW;
 456
 457                ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
 458                mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
 459
 460                spin_unlock(&ip->i_flags_lock);
 461                spin_unlock(&pag->pag_ici_lock);
 462        } else {
 463                /* If the VFS inode is being torn down, pause and try again. */
 464                if (!igrab(inode)) {
 465                        trace_xfs_iget_skip(ip);
 466                        error = -EAGAIN;
 467                        goto out_error;
 468                }
 469
 470                /* We've got a live one. */
 471                spin_unlock(&ip->i_flags_lock);
 472                rcu_read_unlock();
 473                trace_xfs_iget_hit(ip);
 474        }
 475
 476        if (lock_flags != 0)
 477                xfs_ilock(ip, lock_flags);
 478
 479        if (!(flags & XFS_IGET_INCORE))
 480                xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
 481        XFS_STATS_INC(mp, xs_ig_found);
 482
 483        return 0;
 484
 485out_error:
 486        spin_unlock(&ip->i_flags_lock);
 487        rcu_read_unlock();
 488        return error;
 489}
 490
 491
 492static int
 493xfs_iget_cache_miss(
 494        struct xfs_mount        *mp,
 495        struct xfs_perag        *pag,
 496        xfs_trans_t             *tp,
 497        xfs_ino_t               ino,
 498        struct xfs_inode        **ipp,
 499        int                     flags,
 500        int                     lock_flags)
 501{
 502        struct xfs_inode        *ip;
 503        int                     error;
 504        xfs_agino_t             agino = XFS_INO_TO_AGINO(mp, ino);
 505        int                     iflags;
 506
 507        ip = xfs_inode_alloc(mp, ino);
 508        if (!ip)
 509                return -ENOMEM;
 510
 511        error = xfs_iread(mp, tp, ip, flags);
 512        if (error)
 513                goto out_destroy;
 514
 515        if (!xfs_inode_verify_forks(ip)) {
 516                error = -EFSCORRUPTED;
 517                goto out_destroy;
 518        }
 519
 520        trace_xfs_iget_miss(ip);
 521
 522
 523        /*
 524         * Check the inode free state is valid. This also detects lookup
 525         * racing with unlinks.
 526         */
 527        error = xfs_iget_check_free_state(ip, flags);
 528        if (error)
 529                goto out_destroy;
 530
 531        /*
 532         * Preload the radix tree so we can insert safely under the
 533         * write spinlock. Note that we cannot sleep inside the preload
 534         * region. Since we can be called from transaction context, don't
 535         * recurse into the file system.
 536         */
 537        if (radix_tree_preload(GFP_NOFS)) {
 538                error = -EAGAIN;
 539                goto out_destroy;
 540        }
 541
 542        /*
 543         * Because the inode hasn't been added to the radix-tree yet it can't
 544         * be found by another thread, so we can do the non-sleeping lock here.
 545         */
 546        if (lock_flags) {
 547                if (!xfs_ilock_nowait(ip, lock_flags))
 548                        BUG();
 549        }
 550
 551        /*
 552         * These values must be set before inserting the inode into the radix
 553         * tree as the moment it is inserted a concurrent lookup (allowed by the
 554         * RCU locking mechanism) can find it and that lookup must see that this
 555         * is an inode currently under construction (i.e. that XFS_INEW is set).
 556         * The ip->i_flags_lock that protects the XFS_INEW flag forms the
 557         * memory barrier that ensures this detection works correctly at lookup
 558         * time.
 559         */
 560        iflags = XFS_INEW;
 561        if (flags & XFS_IGET_DONTCACHE)
 562                iflags |= XFS_IDONTCACHE;
 563        ip->i_udquot = NULL;
 564        ip->i_gdquot = NULL;
 565        ip->i_pdquot = NULL;
 566        xfs_iflags_set(ip, iflags);
 567
 568        /* insert the new inode */
 569        spin_lock(&pag->pag_ici_lock);
 570        error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
 571        if (unlikely(error)) {
 572                WARN_ON(error != -EEXIST);
 573                XFS_STATS_INC(mp, xs_ig_dup);
 574                error = -EAGAIN;
 575                goto out_preload_end;
 576        }
 577        spin_unlock(&pag->pag_ici_lock);
 578        radix_tree_preload_end();
 579
 580        *ipp = ip;
 581        return 0;
 582
 583out_preload_end:
 584        spin_unlock(&pag->pag_ici_lock);
 585        radix_tree_preload_end();
 586        if (lock_flags)
 587                xfs_iunlock(ip, lock_flags);
 588out_destroy:
 589        __destroy_inode(VFS_I(ip));
 590        xfs_inode_free(ip);
 591        return error;
 592}
 593
 594/*
 595 * Look up an inode by number in the given file system.
 596 * The inode is looked up in the cache held in each AG.
 597 * If the inode is found in the cache, initialise the vfs inode
 598 * if necessary.
 599 *
 600 * If it is not in core, read it in from the file system's device,
 601 * add it to the cache and initialise the vfs inode.
 602 *
 603 * The inode is locked according to the value of the lock_flags parameter.
 604 * This flag parameter indicates how and if the inode's IO lock and inode lock
 605 * should be taken.
 606 *
 607 * mp -- the mount point structure for the current file system.  It points
 608 *       to the inode hash table.
 609 * tp -- a pointer to the current transaction if there is one.  This is
 610 *       simply passed through to the xfs_iread() call.
 611 * ino -- the number of the inode desired.  This is the unique identifier
 612 *        within the file system for the inode being requested.
 613 * lock_flags -- flags indicating how to lock the inode.  See the comment
 614 *               for xfs_ilock() for a list of valid values.
 615 */
 616int
 617xfs_iget(
 618        xfs_mount_t     *mp,
 619        xfs_trans_t     *tp,
 620        xfs_ino_t       ino,
 621        uint            flags,
 622        uint            lock_flags,
 623        xfs_inode_t     **ipp)
 624{
 625        xfs_inode_t     *ip;
 626        int             error;
 627        xfs_perag_t     *pag;
 628        xfs_agino_t     agino;
 629
 630        /*
 631         * xfs_reclaim_inode() uses the ILOCK to ensure an inode
 632         * doesn't get freed while it's being referenced during a
 633         * radix tree traversal here.  It assumes this function
 634         * aqcuires only the ILOCK (and therefore it has no need to
 635         * involve the IOLOCK in this synchronization).
 636         */
 637        ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
 638
 639        /* reject inode numbers outside existing AGs */
 640        if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
 641                return -EINVAL;
 642
 643        XFS_STATS_INC(mp, xs_ig_attempts);
 644
 645        /* get the perag structure and ensure that it's inode capable */
 646        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
 647        agino = XFS_INO_TO_AGINO(mp, ino);
 648
 649again:
 650        error = 0;
 651        rcu_read_lock();
 652        ip = radix_tree_lookup(&pag->pag_ici_root, agino);
 653
 654        if (ip) {
 655                error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
 656                if (error)
 657                        goto out_error_or_again;
 658        } else {
 659                rcu_read_unlock();
 660                if (flags & XFS_IGET_INCORE) {
 661                        error = -ENODATA;
 662                        goto out_error_or_again;
 663                }
 664                XFS_STATS_INC(mp, xs_ig_missed);
 665
 666                error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
 667                                                        flags, lock_flags);
 668                if (error)
 669                        goto out_error_or_again;
 670        }
 671        xfs_perag_put(pag);
 672
 673        *ipp = ip;
 674
 675        /*
 676         * If we have a real type for an on-disk inode, we can setup the inode
 677         * now.  If it's a new inode being created, xfs_ialloc will handle it.
 678         */
 679        if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
 680                xfs_setup_existing_inode(ip);
 681        return 0;
 682
 683out_error_or_again:
 684        if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
 685                delay(1);
 686                goto again;
 687        }
 688        xfs_perag_put(pag);
 689        return error;
 690}
 691
 692/*
 693 * "Is this a cached inode that's also allocated?"
 694 *
 695 * Look up an inode by number in the given file system.  If the inode is
 696 * in cache and isn't in purgatory, return 1 if the inode is allocated
 697 * and 0 if it is not.  For all other cases (not in cache, being torn
 698 * down, etc.), return a negative error code.
 699 *
 700 * The caller has to prevent inode allocation and freeing activity,
 701 * presumably by locking the AGI buffer.   This is to ensure that an
 702 * inode cannot transition from allocated to freed until the caller is
 703 * ready to allow that.  If the inode is in an intermediate state (new,
 704 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
 705 * inode is not in the cache, -ENOENT will be returned.  The caller must
 706 * deal with these scenarios appropriately.
 707 *
 708 * This is a specialized use case for the online scrubber; if you're
 709 * reading this, you probably want xfs_iget.
 710 */
 711int
 712xfs_icache_inode_is_allocated(
 713        struct xfs_mount        *mp,
 714        struct xfs_trans        *tp,
 715        xfs_ino_t               ino,
 716        bool                    *inuse)
 717{
 718        struct xfs_inode        *ip;
 719        int                     error;
 720
 721        error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
 722        if (error)
 723                return error;
 724
 725        *inuse = !!(VFS_I(ip)->i_mode);
 726        IRELE(ip);
 727        return 0;
 728}
 729
 730/*
 731 * The inode lookup is done in batches to keep the amount of lock traffic and
 732 * radix tree lookups to a minimum. The batch size is a trade off between
 733 * lookup reduction and stack usage. This is in the reclaim path, so we can't
 734 * be too greedy.
 735 */
 736#define XFS_LOOKUP_BATCH        32
 737
 738STATIC int
 739xfs_inode_ag_walk_grab(
 740        struct xfs_inode        *ip,
 741        int                     flags)
 742{
 743        struct inode            *inode = VFS_I(ip);
 744        bool                    newinos = !!(flags & XFS_AGITER_INEW_WAIT);
 745
 746        ASSERT(rcu_read_lock_held());
 747
 748        /*
 749         * check for stale RCU freed inode
 750         *
 751         * If the inode has been reallocated, it doesn't matter if it's not in
 752         * the AG we are walking - we are walking for writeback, so if it
 753         * passes all the "valid inode" checks and is dirty, then we'll write
 754         * it back anyway.  If it has been reallocated and still being
 755         * initialised, the XFS_INEW check below will catch it.
 756         */
 757        spin_lock(&ip->i_flags_lock);
 758        if (!ip->i_ino)
 759                goto out_unlock_noent;
 760
 761        /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
 762        if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
 763            __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
 764                goto out_unlock_noent;
 765        spin_unlock(&ip->i_flags_lock);
 766
 767        /* nothing to sync during shutdown */
 768        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 769                return -EFSCORRUPTED;
 770
 771        /* If we can't grab the inode, it must on it's way to reclaim. */
 772        if (!igrab(inode))
 773                return -ENOENT;
 774
 775        /* inode is valid */
 776        return 0;
 777
 778out_unlock_noent:
 779        spin_unlock(&ip->i_flags_lock);
 780        return -ENOENT;
 781}
 782
 783STATIC int
 784xfs_inode_ag_walk(
 785        struct xfs_mount        *mp,
 786        struct xfs_perag        *pag,
 787        int                     (*execute)(struct xfs_inode *ip, int flags,
 788                                           void *args),
 789        int                     flags,
 790        void                    *args,
 791        int                     tag,
 792        int                     iter_flags)
 793{
 794        uint32_t                first_index;
 795        int                     last_error = 0;
 796        int                     skipped;
 797        int                     done;
 798        int                     nr_found;
 799
 800restart:
 801        done = 0;
 802        skipped = 0;
 803        first_index = 0;
 804        nr_found = 0;
 805        do {
 806                struct xfs_inode *batch[XFS_LOOKUP_BATCH];
 807                int             error = 0;
 808                int             i;
 809
 810                rcu_read_lock();
 811
 812                if (tag == -1)
 813                        nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
 814                                        (void **)batch, first_index,
 815                                        XFS_LOOKUP_BATCH);
 816                else
 817                        nr_found = radix_tree_gang_lookup_tag(
 818                                        &pag->pag_ici_root,
 819                                        (void **) batch, first_index,
 820                                        XFS_LOOKUP_BATCH, tag);
 821
 822                if (!nr_found) {
 823                        rcu_read_unlock();
 824                        break;
 825                }
 826
 827                /*
 828                 * Grab the inodes before we drop the lock. if we found
 829                 * nothing, nr == 0 and the loop will be skipped.
 830                 */
 831                for (i = 0; i < nr_found; i++) {
 832                        struct xfs_inode *ip = batch[i];
 833
 834                        if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
 835                                batch[i] = NULL;
 836
 837                        /*
 838                         * Update the index for the next lookup. Catch
 839                         * overflows into the next AG range which can occur if
 840                         * we have inodes in the last block of the AG and we
 841                         * are currently pointing to the last inode.
 842                         *
 843                         * Because we may see inodes that are from the wrong AG
 844                         * due to RCU freeing and reallocation, only update the
 845                         * index if it lies in this AG. It was a race that lead
 846                         * us to see this inode, so another lookup from the
 847                         * same index will not find it again.
 848                         */
 849                        if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
 850                                continue;
 851                        first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
 852                        if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
 853                                done = 1;
 854                }
 855
 856                /* unlock now we've grabbed the inodes. */
 857                rcu_read_unlock();
 858
 859                for (i = 0; i < nr_found; i++) {
 860                        if (!batch[i])
 861                                continue;
 862                        if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
 863                            xfs_iflags_test(batch[i], XFS_INEW))
 864                                xfs_inew_wait(batch[i]);
 865                        error = execute(batch[i], flags, args);
 866                        IRELE(batch[i]);
 867                        if (error == -EAGAIN) {
 868                                skipped++;
 869                                continue;
 870                        }
 871                        if (error && last_error != -EFSCORRUPTED)
 872                                last_error = error;
 873                }
 874
 875                /* bail out if the filesystem is corrupted.  */
 876                if (error == -EFSCORRUPTED)
 877                        break;
 878
 879                cond_resched();
 880
 881        } while (nr_found && !done);
 882
 883        if (skipped) {
 884                delay(1);
 885                goto restart;
 886        }
 887        return last_error;
 888}
 889
 890/*
 891 * Background scanning to trim post-EOF preallocated space. This is queued
 892 * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
 893 */
 894void
 895xfs_queue_eofblocks(
 896        struct xfs_mount *mp)
 897{
 898        rcu_read_lock();
 899        if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
 900                queue_delayed_work(mp->m_eofblocks_workqueue,
 901                                   &mp->m_eofblocks_work,
 902                                   msecs_to_jiffies(xfs_eofb_secs * 1000));
 903        rcu_read_unlock();
 904}
 905
 906void
 907xfs_eofblocks_worker(
 908        struct work_struct *work)
 909{
 910        struct xfs_mount *mp = container_of(to_delayed_work(work),
 911                                struct xfs_mount, m_eofblocks_work);
 912        xfs_icache_free_eofblocks(mp, NULL);
 913        xfs_queue_eofblocks(mp);
 914}
 915
 916int
 917xfs_inode_ag_iterator_flags(
 918        struct xfs_mount        *mp,
 919        int                     (*execute)(struct xfs_inode *ip, int flags,
 920                                           void *args),
 921        int                     flags,
 922        void                    *args,
 923        int                     iter_flags)
 924{
 925        struct xfs_perag        *pag;
 926        int                     error = 0;
 927        int                     last_error = 0;
 928        xfs_agnumber_t          ag;
 929
 930        ag = 0;
 931        while ((pag = xfs_perag_get(mp, ag))) {
 932                ag = pag->pag_agno + 1;
 933                error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
 934                                          iter_flags);
 935                xfs_perag_put(pag);
 936                if (error) {
 937                        last_error = error;
 938                        if (error == -EFSCORRUPTED)
 939                                break;
 940                }
 941        }
 942        return last_error;
 943}
 944
 945int
 946xfs_inode_ag_iterator(
 947        struct xfs_mount        *mp,
 948        int                     (*execute)(struct xfs_inode *ip, int flags,
 949                                           void *args),
 950        int                     flags,
 951        void                    *args)
 952{
 953        return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
 954}
 955
 956int
 957xfs_inode_ag_iterator_tag(
 958        struct xfs_mount        *mp,
 959        int                     (*execute)(struct xfs_inode *ip, int flags,
 960                                           void *args),
 961        int                     flags,
 962        void                    *args,
 963        int                     tag)
 964{
 965        struct xfs_perag        *pag;
 966        int                     error = 0;
 967        int                     last_error = 0;
 968        xfs_agnumber_t          ag;
 969
 970        ag = 0;
 971        while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
 972                ag = pag->pag_agno + 1;
 973                error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
 974                                          0);
 975                xfs_perag_put(pag);
 976                if (error) {
 977                        last_error = error;
 978                        if (error == -EFSCORRUPTED)
 979                                break;
 980                }
 981        }
 982        return last_error;
 983}
 984
 985/*
 986 * Grab the inode for reclaim exclusively.
 987 * Return 0 if we grabbed it, non-zero otherwise.
 988 */
 989STATIC int
 990xfs_reclaim_inode_grab(
 991        struct xfs_inode        *ip,
 992        int                     flags)
 993{
 994        ASSERT(rcu_read_lock_held());
 995
 996        /* quick check for stale RCU freed inode */
 997        if (!ip->i_ino)
 998                return 1;
 999
1000        /*
1001         * If we are asked for non-blocking operation, do unlocked checks to
1002         * see if the inode already is being flushed or in reclaim to avoid
1003         * lock traffic.
1004         */
1005        if ((flags & SYNC_TRYLOCK) &&
1006            __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
1007                return 1;
1008
1009        /*
1010         * The radix tree lock here protects a thread in xfs_iget from racing
1011         * with us starting reclaim on the inode.  Once we have the
1012         * XFS_IRECLAIM flag set it will not touch us.
1013         *
1014         * Due to RCU lookup, we may find inodes that have been freed and only
1015         * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
1016         * aren't candidates for reclaim at all, so we must check the
1017         * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
1018         */
1019        spin_lock(&ip->i_flags_lock);
1020        if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
1021            __xfs_iflags_test(ip, XFS_IRECLAIM)) {
1022                /* not a reclaim candidate. */
1023                spin_unlock(&ip->i_flags_lock);
1024                return 1;
1025        }
1026        __xfs_iflags_set(ip, XFS_IRECLAIM);
1027        spin_unlock(&ip->i_flags_lock);
1028        return 0;
1029}
1030
1031/*
1032 * Inodes in different states need to be treated differently. The following
1033 * table lists the inode states and the reclaim actions necessary:
1034 *
1035 *      inode state          iflush ret         required action
1036 *      ---------------      ----------         ---------------
1037 *      bad                     -               reclaim
1038 *      shutdown                EIO             unpin and reclaim
1039 *      clean, unpinned         0               reclaim
1040 *      stale, unpinned         0               reclaim
1041 *      clean, pinned(*)        0               requeue
1042 *      stale, pinned           EAGAIN          requeue
1043 *      dirty, async            -               requeue
1044 *      dirty, sync             0               reclaim
1045 *
1046 * (*) dgc: I don't think the clean, pinned state is possible but it gets
1047 * handled anyway given the order of checks implemented.
1048 *
1049 * Also, because we get the flush lock first, we know that any inode that has
1050 * been flushed delwri has had the flush completed by the time we check that
1051 * the inode is clean.
1052 *
1053 * Note that because the inode is flushed delayed write by AIL pushing, the
1054 * flush lock may already be held here and waiting on it can result in very
1055 * long latencies.  Hence for sync reclaims, where we wait on the flush lock,
1056 * the caller should push the AIL first before trying to reclaim inodes to
1057 * minimise the amount of time spent waiting.  For background relaim, we only
1058 * bother to reclaim clean inodes anyway.
1059 *
1060 * Hence the order of actions after gaining the locks should be:
1061 *      bad             => reclaim
1062 *      shutdown        => unpin and reclaim
1063 *      pinned, async   => requeue
1064 *      pinned, sync    => unpin
1065 *      stale           => reclaim
1066 *      clean           => reclaim
1067 *      dirty, async    => requeue
1068 *      dirty, sync     => flush, wait and reclaim
1069 */
1070STATIC int
1071xfs_reclaim_inode(
1072        struct xfs_inode        *ip,
1073        struct xfs_perag        *pag,
1074        int                     sync_mode)
1075{
1076        struct xfs_buf          *bp = NULL;
1077        xfs_ino_t               ino = ip->i_ino; /* for radix_tree_delete */
1078        int                     error;
1079
1080restart:
1081        error = 0;
1082        xfs_ilock(ip, XFS_ILOCK_EXCL);
1083        if (!xfs_iflock_nowait(ip)) {
1084                if (!(sync_mode & SYNC_WAIT))
1085                        goto out;
1086                xfs_iflock(ip);
1087        }
1088
1089        if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1090                xfs_iunpin_wait(ip);
1091                /* xfs_iflush_abort() drops the flush lock */
1092                xfs_iflush_abort(ip, false);
1093                goto reclaim;
1094        }
1095        if (xfs_ipincount(ip)) {
1096                if (!(sync_mode & SYNC_WAIT))
1097                        goto out_ifunlock;
1098                xfs_iunpin_wait(ip);
1099        }
1100        if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
1101                xfs_ifunlock(ip);
1102                goto reclaim;
1103        }
1104
1105        /*
1106         * Never flush out dirty data during non-blocking reclaim, as it would
1107         * just contend with AIL pushing trying to do the same job.
1108         */
1109        if (!(sync_mode & SYNC_WAIT))
1110                goto out_ifunlock;
1111
1112        /*
1113         * Now we have an inode that needs flushing.
1114         *
1115         * Note that xfs_iflush will never block on the inode buffer lock, as
1116         * xfs_ifree_cluster() can lock the inode buffer before it locks the
1117         * ip->i_lock, and we are doing the exact opposite here.  As a result,
1118         * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
1119         * result in an ABBA deadlock with xfs_ifree_cluster().
1120         *
1121         * As xfs_ifree_cluser() must gather all inodes that are active in the
1122         * cache to mark them stale, if we hit this case we don't actually want
1123         * to do IO here - we want the inode marked stale so we can simply
1124         * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the
1125         * inode, back off and try again.  Hopefully the next pass through will
1126         * see the stale flag set on the inode.
1127         */
1128        error = xfs_iflush(ip, &bp);
1129        if (error == -EAGAIN) {
1130                xfs_iunlock(ip, XFS_ILOCK_EXCL);
1131                /* backoff longer than in xfs_ifree_cluster */
1132                delay(2);
1133                goto restart;
1134        }
1135
1136        if (!error) {
1137                error = xfs_bwrite(bp);
1138                xfs_buf_relse(bp);
1139        }
1140
1141reclaim:
1142        ASSERT(!xfs_isiflocked(ip));
1143
1144        /*
1145         * Because we use RCU freeing we need to ensure the inode always appears
1146         * to be reclaimed with an invalid inode number when in the free state.
1147         * We do this as early as possible under the ILOCK so that
1148         * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
1149         * detect races with us here. By doing this, we guarantee that once
1150         * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
1151         * it will see either a valid inode that will serialise correctly, or it
1152         * will see an invalid inode that it can skip.
1153         */
1154        spin_lock(&ip->i_flags_lock);
1155        ip->i_flags = XFS_IRECLAIM;
1156        ip->i_ino = 0;
1157        spin_unlock(&ip->i_flags_lock);
1158
1159        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1160
1161        XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1162        /*
1163         * Remove the inode from the per-AG radix tree.
1164         *
1165         * Because radix_tree_delete won't complain even if the item was never
1166         * added to the tree assert that it's been there before to catch
1167         * problems with the inode life time early on.
1168         */
1169        spin_lock(&pag->pag_ici_lock);
1170        if (!radix_tree_delete(&pag->pag_ici_root,
1171                                XFS_INO_TO_AGINO(ip->i_mount, ino)))
1172                ASSERT(0);
1173        xfs_perag_clear_reclaim_tag(pag);
1174        spin_unlock(&pag->pag_ici_lock);
1175
1176        /*
1177         * Here we do an (almost) spurious inode lock in order to coordinate
1178         * with inode cache radix tree lookups.  This is because the lookup
1179         * can reference the inodes in the cache without taking references.
1180         *
1181         * We make that OK here by ensuring that we wait until the inode is
1182         * unlocked after the lookup before we go ahead and free it.
1183         */
1184        xfs_ilock(ip, XFS_ILOCK_EXCL);
1185        xfs_qm_dqdetach(ip);
1186        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1187
1188        __xfs_inode_free(ip);
1189        return error;
1190
1191out_ifunlock:
1192        xfs_ifunlock(ip);
1193out:
1194        xfs_iflags_clear(ip, XFS_IRECLAIM);
1195        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1196        /*
1197         * We could return -EAGAIN here to make reclaim rescan the inode tree in
1198         * a short while. However, this just burns CPU time scanning the tree
1199         * waiting for IO to complete and the reclaim work never goes back to
1200         * the idle state. Instead, return 0 to let the next scheduled
1201         * background reclaim attempt to reclaim the inode again.
1202         */
1203        return 0;
1204}
1205
1206/*
1207 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1208 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1209 * then a shut down during filesystem unmount reclaim walk leak all the
1210 * unreclaimed inodes.
1211 */
1212STATIC int
1213xfs_reclaim_inodes_ag(
1214        struct xfs_mount        *mp,
1215        int                     flags,
1216        int                     *nr_to_scan)
1217{
1218        struct xfs_perag        *pag;
1219        int                     error = 0;
1220        int                     last_error = 0;
1221        xfs_agnumber_t          ag;
1222        int                     trylock = flags & SYNC_TRYLOCK;
1223        int                     skipped;
1224
1225restart:
1226        ag = 0;
1227        skipped = 0;
1228        while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1229                unsigned long   first_index = 0;
1230                int             done = 0;
1231                int             nr_found = 0;
1232
1233                ag = pag->pag_agno + 1;
1234
1235                if (trylock) {
1236                        if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1237                                skipped++;
1238                                xfs_perag_put(pag);
1239                                continue;
1240                        }
1241                        first_index = pag->pag_ici_reclaim_cursor;
1242                } else
1243                        mutex_lock(&pag->pag_ici_reclaim_lock);
1244
1245                do {
1246                        struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1247                        int     i;
1248
1249                        rcu_read_lock();
1250                        nr_found = radix_tree_gang_lookup_tag(
1251                                        &pag->pag_ici_root,
1252                                        (void **)batch, first_index,
1253                                        XFS_LOOKUP_BATCH,
1254                                        XFS_ICI_RECLAIM_TAG);
1255                        if (!nr_found) {
1256                                done = 1;
1257                                rcu_read_unlock();
1258                                break;
1259                        }
1260
1261                        /*
1262                         * Grab the inodes before we drop the lock. if we found
1263                         * nothing, nr == 0 and the loop will be skipped.
1264                         */
1265                        for (i = 0; i < nr_found; i++) {
1266                                struct xfs_inode *ip = batch[i];
1267
1268                                if (done || xfs_reclaim_inode_grab(ip, flags))
1269                                        batch[i] = NULL;
1270
1271                                /*
1272                                 * Update the index for the next lookup. Catch
1273                                 * overflows into the next AG range which can
1274                                 * occur if we have inodes in the last block of
1275                                 * the AG and we are currently pointing to the
1276                                 * last inode.
1277                                 *
1278                                 * Because we may see inodes that are from the
1279                                 * wrong AG due to RCU freeing and
1280                                 * reallocation, only update the index if it
1281                                 * lies in this AG. It was a race that lead us
1282                                 * to see this inode, so another lookup from
1283                                 * the same index will not find it again.
1284                                 */
1285                                if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1286                                                                pag->pag_agno)
1287                                        continue;
1288                                first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1289                                if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1290                                        done = 1;
1291                        }
1292
1293                        /* unlock now we've grabbed the inodes. */
1294                        rcu_read_unlock();
1295
1296                        for (i = 0; i < nr_found; i++) {
1297                                if (!batch[i])
1298                                        continue;
1299                                error = xfs_reclaim_inode(batch[i], pag, flags);
1300                                if (error && last_error != -EFSCORRUPTED)
1301                                        last_error = error;
1302                        }
1303
1304                        *nr_to_scan -= XFS_LOOKUP_BATCH;
1305
1306                        cond_resched();
1307
1308                } while (nr_found && !done && *nr_to_scan > 0);
1309
1310                if (trylock && !done)
1311                        pag->pag_ici_reclaim_cursor = first_index;
1312                else
1313                        pag->pag_ici_reclaim_cursor = 0;
1314                mutex_unlock(&pag->pag_ici_reclaim_lock);
1315                xfs_perag_put(pag);
1316        }
1317
1318        /*
1319         * if we skipped any AG, and we still have scan count remaining, do
1320         * another pass this time using blocking reclaim semantics (i.e
1321         * waiting on the reclaim locks and ignoring the reclaim cursors). This
1322         * ensure that when we get more reclaimers than AGs we block rather
1323         * than spin trying to execute reclaim.
1324         */
1325        if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1326                trylock = 0;
1327                goto restart;
1328        }
1329        return last_error;
1330}
1331
1332int
1333xfs_reclaim_inodes(
1334        xfs_mount_t     *mp,
1335        int             mode)
1336{
1337        int             nr_to_scan = INT_MAX;
1338
1339        return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1340}
1341
1342/*
1343 * Scan a certain number of inodes for reclaim.
1344 *
1345 * When called we make sure that there is a background (fast) inode reclaim in
1346 * progress, while we will throttle the speed of reclaim via doing synchronous
1347 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1348 * them to be cleaned, which we hope will not be very long due to the
1349 * background walker having already kicked the IO off on those dirty inodes.
1350 */
1351void
1352xfs_reclaim_inodes_nr(
1353        struct xfs_mount        *mp,
1354        int                     nr_to_scan)
1355{
1356        /* kick background reclaimer and push the AIL */
1357        xfs_reclaim_work_queue(mp);
1358        xfs_ail_push_all(mp->m_ail);
1359
1360        xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1361}
1362
1363/*
1364 * Return the number of reclaimable inodes in the filesystem for
1365 * the shrinker to determine how much to reclaim.
1366 */
1367int
1368xfs_reclaim_inodes_count(
1369        struct xfs_mount        *mp)
1370{
1371        struct xfs_perag        *pag;
1372        xfs_agnumber_t          ag = 0;
1373        int                     reclaimable = 0;
1374
1375        while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1376                ag = pag->pag_agno + 1;
1377                reclaimable += pag->pag_ici_reclaimable;
1378                xfs_perag_put(pag);
1379        }
1380        return reclaimable;
1381}
1382
1383STATIC int
1384xfs_inode_match_id(
1385        struct xfs_inode        *ip,
1386        struct xfs_eofblocks    *eofb)
1387{
1388        if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1389            !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1390                return 0;
1391
1392        if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1393            !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1394                return 0;
1395
1396        if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1397            xfs_get_projid(ip) != eofb->eof_prid)
1398                return 0;
1399
1400        return 1;
1401}
1402
1403/*
1404 * A union-based inode filtering algorithm. Process the inode if any of the
1405 * criteria match. This is for global/internal scans only.
1406 */
1407STATIC int
1408xfs_inode_match_id_union(
1409        struct xfs_inode        *ip,
1410        struct xfs_eofblocks    *eofb)
1411{
1412        if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1413            uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1414                return 1;
1415
1416        if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1417            gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1418                return 1;
1419
1420        if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1421            xfs_get_projid(ip) == eofb->eof_prid)
1422                return 1;
1423
1424        return 0;
1425}
1426
1427STATIC int
1428xfs_inode_free_eofblocks(
1429        struct xfs_inode        *ip,
1430        int                     flags,
1431        void                    *args)
1432{
1433        int ret = 0;
1434        struct xfs_eofblocks *eofb = args;
1435        int match;
1436
1437        if (!xfs_can_free_eofblocks(ip, false)) {
1438                /* inode could be preallocated or append-only */
1439                trace_xfs_inode_free_eofblocks_invalid(ip);
1440                xfs_inode_clear_eofblocks_tag(ip);
1441                return 0;
1442        }
1443
1444        /*
1445         * If the mapping is dirty the operation can block and wait for some
1446         * time. Unless we are waiting, skip it.
1447         */
1448        if (!(flags & SYNC_WAIT) &&
1449            mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1450                return 0;
1451
1452        if (eofb) {
1453                if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1454                        match = xfs_inode_match_id_union(ip, eofb);
1455                else
1456                        match = xfs_inode_match_id(ip, eofb);
1457                if (!match)
1458                        return 0;
1459
1460                /* skip the inode if the file size is too small */
1461                if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1462                    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1463                        return 0;
1464        }
1465
1466        /*
1467         * If the caller is waiting, return -EAGAIN to keep the background
1468         * scanner moving and revisit the inode in a subsequent pass.
1469         */
1470        if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1471                if (flags & SYNC_WAIT)
1472                        ret = -EAGAIN;
1473                return ret;
1474        }
1475        ret = xfs_free_eofblocks(ip);
1476        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1477
1478        return ret;
1479}
1480
1481int
1482xfs_icache_free_eofblocks(
1483        struct xfs_mount        *mp,
1484        struct xfs_eofblocks    *eofb)
1485{
1486        int flags = SYNC_TRYLOCK;
1487
1488        if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1489                flags = SYNC_WAIT;
1490
1491        return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags,
1492                                         eofb, XFS_ICI_EOFBLOCKS_TAG);
1493}
1494
1495/*
1496 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1497 * multiple quotas, we don't know exactly which quota caused an allocation
1498 * failure. We make a best effort by including each quota under low free space
1499 * conditions (less than 1% free space) in the scan.
1500 */
1501int
1502xfs_inode_free_quota_eofblocks(
1503        struct xfs_inode *ip)
1504{
1505        int scan = 0;
1506        struct xfs_eofblocks eofb = {0};
1507        struct xfs_dquot *dq;
1508
1509        /*
1510         * Run a sync scan to increase effectiveness and use the union filter to
1511         * cover all applicable quotas in a single scan.
1512         */
1513        eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1514
1515        if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1516                dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1517                if (dq && xfs_dquot_lowsp(dq)) {
1518                        eofb.eof_uid = VFS_I(ip)->i_uid;
1519                        eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1520                        scan = 1;
1521                }
1522        }
1523
1524        if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1525                dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1526                if (dq && xfs_dquot_lowsp(dq)) {
1527                        eofb.eof_gid = VFS_I(ip)->i_gid;
1528                        eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1529                        scan = 1;
1530                }
1531        }
1532
1533        if (scan)
1534                xfs_icache_free_eofblocks(ip->i_mount, &eofb);
1535
1536        return scan;
1537}
1538
1539void
1540xfs_inode_set_eofblocks_tag(
1541        xfs_inode_t     *ip)
1542{
1543        struct xfs_mount *mp = ip->i_mount;
1544        struct xfs_perag *pag;
1545        int tagged;
1546
1547        /*
1548         * Don't bother locking the AG and looking up in the radix trees
1549         * if we already know that we have the tag set.
1550         */
1551        if (ip->i_flags & XFS_IEOFBLOCKS)
1552                return;
1553        spin_lock(&ip->i_flags_lock);
1554        ip->i_flags |= XFS_IEOFBLOCKS;
1555        spin_unlock(&ip->i_flags_lock);
1556
1557        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1558        spin_lock(&pag->pag_ici_lock);
1559        trace_xfs_inode_set_eofblocks_tag(ip);
1560
1561        tagged = radix_tree_tagged(&pag->pag_ici_root,
1562                                   XFS_ICI_EOFBLOCKS_TAG);
1563        radix_tree_tag_set(&pag->pag_ici_root,
1564                           XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1565                           XFS_ICI_EOFBLOCKS_TAG);
1566        if (!tagged) {
1567                /* propagate the eofblocks tag up into the perag radix tree */
1568                spin_lock(&ip->i_mount->m_perag_lock);
1569                radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1570                                   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1571                                   XFS_ICI_EOFBLOCKS_TAG);
1572                spin_unlock(&ip->i_mount->m_perag_lock);
1573
1574                /* kick off background trimming */
1575                xfs_queue_eofblocks(ip->i_mount);
1576
1577                trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno,
1578                                              -1, _RET_IP_);
1579        }
1580
1581        spin_unlock(&pag->pag_ici_lock);
1582        xfs_perag_put(pag);
1583}
1584
1585void
1586xfs_inode_clear_eofblocks_tag(
1587        xfs_inode_t     *ip)
1588{
1589        struct xfs_mount *mp = ip->i_mount;
1590        struct xfs_perag *pag;
1591
1592        spin_lock(&ip->i_flags_lock);
1593        ip->i_flags &= ~XFS_IEOFBLOCKS;
1594        spin_unlock(&ip->i_flags_lock);
1595
1596        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1597        spin_lock(&pag->pag_ici_lock);
1598        trace_xfs_inode_clear_eofblocks_tag(ip);
1599
1600        radix_tree_tag_clear(&pag->pag_ici_root,
1601                             XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1602                             XFS_ICI_EOFBLOCKS_TAG);
1603        if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) {
1604                /* clear the eofblocks tag from the perag radix tree */
1605                spin_lock(&ip->i_mount->m_perag_lock);
1606                radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1607                                     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1608                                     XFS_ICI_EOFBLOCKS_TAG);
1609                spin_unlock(&ip->i_mount->m_perag_lock);
1610                trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno,
1611                                               -1, _RET_IP_);
1612        }
1613
1614        spin_unlock(&pag->pag_ici_lock);
1615        xfs_perag_put(pag);
1616}
1617
1618