linux/fs/xfs/xfs_icache.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_format.h"
  21#include "xfs_log_format.h"
  22#include "xfs_trans_resv.h"
  23#include "xfs_sb.h"
  24#include "xfs_mount.h"
  25#include "xfs_inode.h"
  26#include "xfs_error.h"
  27#include "xfs_trans.h"
  28#include "xfs_trans_priv.h"
  29#include "xfs_inode_item.h"
  30#include "xfs_quota.h"
  31#include "xfs_trace.h"
  32#include "xfs_icache.h"
  33#include "xfs_bmap_util.h"
  34#include "xfs_dquot_item.h"
  35#include "xfs_dquot.h"
  36
  37#include <linux/kthread.h>
  38#include <linux/freezer.h>
  39
  40/*
  41 * Allocate and initialise an xfs_inode.
  42 */
  43struct xfs_inode *
  44xfs_inode_alloc(
  45        struct xfs_mount        *mp,
  46        xfs_ino_t               ino)
  47{
  48        struct xfs_inode        *ip;
  49
  50        /*
  51         * if this didn't occur in transactions, we could use
  52         * KM_MAYFAIL and return NULL here on ENOMEM. Set the
  53         * code up to do this anyway.
  54         */
  55        ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
  56        if (!ip)
  57                return NULL;
  58        if (inode_init_always(mp->m_super, VFS_I(ip))) {
  59                kmem_zone_free(xfs_inode_zone, ip);
  60                return NULL;
  61        }
  62
  63        /* VFS doesn't initialise i_mode! */
  64        VFS_I(ip)->i_mode = 0;
  65
  66        XFS_STATS_INC(mp, vn_active);
  67        ASSERT(atomic_read(&ip->i_pincount) == 0);
  68        ASSERT(!xfs_isiflocked(ip));
  69        ASSERT(ip->i_ino == 0);
  70
  71        mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
  72
  73        /* initialise the xfs inode */
  74        ip->i_ino = ino;
  75        ip->i_mount = mp;
  76        memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
  77        ip->i_afp = NULL;
  78        memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
  79        ip->i_flags = 0;
  80        ip->i_delayed_blks = 0;
  81        memset(&ip->i_d, 0, sizeof(ip->i_d));
  82
  83        return ip;
  84}
  85
  86STATIC void
  87xfs_inode_free_callback(
  88        struct rcu_head         *head)
  89{
  90        struct inode            *inode = container_of(head, struct inode, i_rcu);
  91        struct xfs_inode        *ip = XFS_I(inode);
  92
  93        switch (VFS_I(ip)->i_mode & S_IFMT) {
  94        case S_IFREG:
  95        case S_IFDIR:
  96        case S_IFLNK:
  97                xfs_idestroy_fork(ip, XFS_DATA_FORK);
  98                break;
  99        }
 100
 101        if (ip->i_afp)
 102                xfs_idestroy_fork(ip, XFS_ATTR_FORK);
 103
 104        if (ip->i_itemp) {
 105                ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
 106                xfs_inode_item_destroy(ip);
 107                ip->i_itemp = NULL;
 108        }
 109
 110        kmem_zone_free(xfs_inode_zone, ip);
 111}
 112
 113static void
 114__xfs_inode_free(
 115        struct xfs_inode        *ip)
 116{
 117        /* asserts to verify all state is correct here */
 118        ASSERT(atomic_read(&ip->i_pincount) == 0);
 119        XFS_STATS_DEC(ip->i_mount, vn_active);
 120
 121        call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 122}
 123
 124void
 125xfs_inode_free(
 126        struct xfs_inode        *ip)
 127{
 128        ASSERT(!xfs_isiflocked(ip));
 129
 130        /*
 131         * Because we use RCU freeing we need to ensure the inode always
 132         * appears to be reclaimed with an invalid inode number when in the
 133         * free state. The ip->i_flags_lock provides the barrier against lookup
 134         * races.
 135         */
 136        spin_lock(&ip->i_flags_lock);
 137        ip->i_flags = XFS_IRECLAIM;
 138        ip->i_ino = 0;
 139        spin_unlock(&ip->i_flags_lock);
 140
 141        __xfs_inode_free(ip);
 142}
 143
 144/*
 145 * Queue a new inode reclaim pass if there are reclaimable inodes and there
 146 * isn't a reclaim pass already in progress. By default it runs every 5s based
 147 * on the xfs periodic sync default of 30s. Perhaps this should have it's own
 148 * tunable, but that can be done if this method proves to be ineffective or too
 149 * aggressive.
 150 */
 151static void
 152xfs_reclaim_work_queue(
 153        struct xfs_mount        *mp)
 154{
 155
 156        rcu_read_lock();
 157        if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
 158                queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
 159                        msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
 160        }
 161        rcu_read_unlock();
 162}
 163
 164/*
 165 * This is a fast pass over the inode cache to try to get reclaim moving on as
 166 * many inodes as possible in a short period of time. It kicks itself every few
 167 * seconds, as well as being kicked by the inode cache shrinker when memory
 168 * goes low. It scans as quickly as possible avoiding locked inodes or those
 169 * already being flushed, and once done schedules a future pass.
 170 */
 171void
 172xfs_reclaim_worker(
 173        struct work_struct *work)
 174{
 175        struct xfs_mount *mp = container_of(to_delayed_work(work),
 176                                        struct xfs_mount, m_reclaim_work);
 177
 178        xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
 179        xfs_reclaim_work_queue(mp);
 180}
 181
 182static void
 183xfs_perag_set_reclaim_tag(
 184        struct xfs_perag        *pag)
 185{
 186        struct xfs_mount        *mp = pag->pag_mount;
 187
 188        lockdep_assert_held(&pag->pag_ici_lock);
 189        if (pag->pag_ici_reclaimable++)
 190                return;
 191
 192        /* propagate the reclaim tag up into the perag radix tree */
 193        spin_lock(&mp->m_perag_lock);
 194        radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
 195                           XFS_ICI_RECLAIM_TAG);
 196        spin_unlock(&mp->m_perag_lock);
 197
 198        /* schedule periodic background inode reclaim */
 199        xfs_reclaim_work_queue(mp);
 200
 201        trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
 202}
 203
 204static void
 205xfs_perag_clear_reclaim_tag(
 206        struct xfs_perag        *pag)
 207{
 208        struct xfs_mount        *mp = pag->pag_mount;
 209
 210        lockdep_assert_held(&pag->pag_ici_lock);
 211        if (--pag->pag_ici_reclaimable)
 212                return;
 213
 214        /* clear the reclaim tag from the perag radix tree */
 215        spin_lock(&mp->m_perag_lock);
 216        radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
 217                             XFS_ICI_RECLAIM_TAG);
 218        spin_unlock(&mp->m_perag_lock);
 219        trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
 220}
 221
 222
 223/*
 224 * We set the inode flag atomically with the radix tree tag.
 225 * Once we get tag lookups on the radix tree, this inode flag
 226 * can go away.
 227 */
 228void
 229xfs_inode_set_reclaim_tag(
 230        struct xfs_inode        *ip)
 231{
 232        struct xfs_mount        *mp = ip->i_mount;
 233        struct xfs_perag        *pag;
 234
 235        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
 236        spin_lock(&pag->pag_ici_lock);
 237        spin_lock(&ip->i_flags_lock);
 238
 239        radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
 240                           XFS_ICI_RECLAIM_TAG);
 241        xfs_perag_set_reclaim_tag(pag);
 242        __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
 243
 244        spin_unlock(&ip->i_flags_lock);
 245        spin_unlock(&pag->pag_ici_lock);
 246        xfs_perag_put(pag);
 247}
 248
 249STATIC void
 250xfs_inode_clear_reclaim_tag(
 251        struct xfs_perag        *pag,
 252        xfs_ino_t               ino)
 253{
 254        radix_tree_tag_clear(&pag->pag_ici_root,
 255                             XFS_INO_TO_AGINO(pag->pag_mount, ino),
 256                             XFS_ICI_RECLAIM_TAG);
 257        xfs_perag_clear_reclaim_tag(pag);
 258}
 259
 260static void
 261xfs_inew_wait(
 262        struct xfs_inode        *ip)
 263{
 264        wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
 265        DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
 266
 267        do {
 268                prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
 269                if (!xfs_iflags_test(ip, XFS_INEW))
 270                        break;
 271                schedule();
 272        } while (true);
 273        finish_wait(wq, &wait.wait);
 274}
 275
 276/*
 277 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
 278 * part of the structure. This is made more complex by the fact we store
 279 * information about the on-disk values in the VFS inode and so we can't just
 280 * overwrite the values unconditionally. Hence we save the parameters we
 281 * need to retain across reinitialisation, and rewrite them into the VFS inode
 282 * after reinitialisation even if it fails.
 283 */
 284static int
 285xfs_reinit_inode(
 286        struct xfs_mount        *mp,
 287        struct inode            *inode)
 288{
 289        int             error;
 290        uint32_t        nlink = inode->i_nlink;
 291        uint32_t        generation = inode->i_generation;
 292        uint64_t        version = inode->i_version;
 293        umode_t         mode = inode->i_mode;
 294        dev_t           dev = inode->i_rdev;
 295
 296        error = inode_init_always(mp->m_super, inode);
 297
 298        set_nlink(inode, nlink);
 299        inode->i_generation = generation;
 300        inode->i_version = version;
 301        inode->i_mode = mode;
 302        inode->i_rdev = dev;
 303        return error;
 304}
 305
 306/*
 307 * If we are allocating a new inode, then check what was returned is
 308 * actually a free, empty inode. If we are not allocating an inode,
 309 * then check we didn't find a free inode.
 310 *
 311 * Returns:
 312 *      0               if the inode free state matches the lookup context
 313 *      -ENOENT         if the inode is free and we are not allocating
 314 *      -EFSCORRUPTED   if there is any state mismatch at all
 315 */
 316static int
 317xfs_iget_check_free_state(
 318        struct xfs_inode        *ip,
 319        int                     flags)
 320{
 321        if (flags & XFS_IGET_CREATE) {
 322                /* should be a free inode */
 323                if (VFS_I(ip)->i_mode != 0) {
 324                        xfs_warn(ip->i_mount,
 325"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
 326                                ip->i_ino, VFS_I(ip)->i_mode);
 327                        return -EFSCORRUPTED;
 328                }
 329
 330                if (ip->i_d.di_nblocks != 0) {
 331                        xfs_warn(ip->i_mount,
 332"Corruption detected! Free inode 0x%llx has blocks allocated!",
 333                                ip->i_ino);
 334                        return -EFSCORRUPTED;
 335                }
 336                return 0;
 337        }
 338
 339        /* should be an allocated inode */
 340        if (VFS_I(ip)->i_mode == 0)
 341                return -ENOENT;
 342
 343        return 0;
 344}
 345
 346/*
 347 * Check the validity of the inode we just found it the cache
 348 */
 349static int
 350xfs_iget_cache_hit(
 351        struct xfs_perag        *pag,
 352        struct xfs_inode        *ip,
 353        xfs_ino_t               ino,
 354        int                     flags,
 355        int                     lock_flags) __releases(RCU)
 356{
 357        struct inode            *inode = VFS_I(ip);
 358        struct xfs_mount        *mp = ip->i_mount;
 359        int                     error;
 360
 361        /*
 362         * check for re-use of an inode within an RCU grace period due to the
 363         * radix tree nodes not being updated yet. We monitor for this by
 364         * setting the inode number to zero before freeing the inode structure.
 365         * If the inode has been reallocated and set up, then the inode number
 366         * will not match, so check for that, too.
 367         */
 368        spin_lock(&ip->i_flags_lock);
 369        if (ip->i_ino != ino) {
 370                trace_xfs_iget_skip(ip);
 371                XFS_STATS_INC(mp, xs_ig_frecycle);
 372                error = -EAGAIN;
 373                goto out_error;
 374        }
 375
 376
 377        /*
 378         * If we are racing with another cache hit that is currently
 379         * instantiating this inode or currently recycling it out of
 380         * reclaimabe state, wait for the initialisation to complete
 381         * before continuing.
 382         *
 383         * XXX(hch): eventually we should do something equivalent to
 384         *           wait_on_inode to wait for these flags to be cleared
 385         *           instead of polling for it.
 386         */
 387        if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
 388                trace_xfs_iget_skip(ip);
 389                XFS_STATS_INC(mp, xs_ig_frecycle);
 390                error = -EAGAIN;
 391                goto out_error;
 392        }
 393
 394        /*
 395         * Check the inode free state is valid. This also detects lookup
 396         * racing with unlinks.
 397         */
 398        error = xfs_iget_check_free_state(ip, flags);
 399        if (error)
 400                goto out_error;
 401
 402        /*
 403         * If IRECLAIMABLE is set, we've torn down the VFS inode already.
 404         * Need to carefully get it back into useable state.
 405         */
 406        if (ip->i_flags & XFS_IRECLAIMABLE) {
 407                trace_xfs_iget_reclaim(ip);
 408
 409                if (flags & XFS_IGET_INCORE) {
 410                        error = -EAGAIN;
 411                        goto out_error;
 412                }
 413
 414                /*
 415                 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
 416                 * from stomping over us while we recycle the inode.  We can't
 417                 * clear the radix tree reclaimable tag yet as it requires
 418                 * pag_ici_lock to be held exclusive.
 419                 */
 420                ip->i_flags |= XFS_IRECLAIM;
 421
 422                spin_unlock(&ip->i_flags_lock);
 423                rcu_read_unlock();
 424
 425                error = xfs_reinit_inode(mp, inode);
 426                if (error) {
 427                        bool wake;
 428                        /*
 429                         * Re-initializing the inode failed, and we are in deep
 430                         * trouble.  Try to re-add it to the reclaim list.
 431                         */
 432                        rcu_read_lock();
 433                        spin_lock(&ip->i_flags_lock);
 434                        wake = !!__xfs_iflags_test(ip, XFS_INEW);
 435                        ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
 436                        if (wake)
 437                                wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
 438                        ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
 439                        trace_xfs_iget_reclaim_fail(ip);
 440                        goto out_error;
 441                }
 442
 443                spin_lock(&pag->pag_ici_lock);
 444                spin_lock(&ip->i_flags_lock);
 445
 446                /*
 447                 * Clear the per-lifetime state in the inode as we are now
 448                 * effectively a new inode and need to return to the initial
 449                 * state before reuse occurs.
 450                 */
 451                ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
 452                ip->i_flags |= XFS_INEW;
 453                xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
 454                inode->i_state = I_NEW;
 455
 456                ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
 457                mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
 458
 459                spin_unlock(&ip->i_flags_lock);
 460                spin_unlock(&pag->pag_ici_lock);
 461        } else {
 462                /* If the VFS inode is being torn down, pause and try again. */
 463                if (!igrab(inode)) {
 464                        trace_xfs_iget_skip(ip);
 465                        error = -EAGAIN;
 466                        goto out_error;
 467                }
 468
 469                /* We've got a live one. */
 470                spin_unlock(&ip->i_flags_lock);
 471                rcu_read_unlock();
 472                trace_xfs_iget_hit(ip);
 473        }
 474
 475        if (lock_flags != 0)
 476                xfs_ilock(ip, lock_flags);
 477
 478        if (!(flags & XFS_IGET_INCORE))
 479                xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
 480        XFS_STATS_INC(mp, xs_ig_found);
 481
 482        return 0;
 483
 484out_error:
 485        spin_unlock(&ip->i_flags_lock);
 486        rcu_read_unlock();
 487        return error;
 488}
 489
 490
 491static int
 492xfs_iget_cache_miss(
 493        struct xfs_mount        *mp,
 494        struct xfs_perag        *pag,
 495        xfs_trans_t             *tp,
 496        xfs_ino_t               ino,
 497        struct xfs_inode        **ipp,
 498        int                     flags,
 499        int                     lock_flags)
 500{
 501        struct xfs_inode        *ip;
 502        int                     error;
 503        xfs_agino_t             agino = XFS_INO_TO_AGINO(mp, ino);
 504        int                     iflags;
 505
 506        ip = xfs_inode_alloc(mp, ino);
 507        if (!ip)
 508                return -ENOMEM;
 509
 510        error = xfs_iread(mp, tp, ip, flags);
 511        if (error)
 512                goto out_destroy;
 513
 514        trace_xfs_iget_miss(ip);
 515
 516
 517        /*
 518         * Check the inode free state is valid. This also detects lookup
 519         * racing with unlinks.
 520         */
 521        error = xfs_iget_check_free_state(ip, flags);
 522        if (error)
 523                goto out_destroy;
 524
 525        /*
 526         * Preload the radix tree so we can insert safely under the
 527         * write spinlock. Note that we cannot sleep inside the preload
 528         * region. Since we can be called from transaction context, don't
 529         * recurse into the file system.
 530         */
 531        if (radix_tree_preload(GFP_NOFS)) {
 532                error = -EAGAIN;
 533                goto out_destroy;
 534        }
 535
 536        /*
 537         * Because the inode hasn't been added to the radix-tree yet it can't
 538         * be found by another thread, so we can do the non-sleeping lock here.
 539         */
 540        if (lock_flags) {
 541                if (!xfs_ilock_nowait(ip, lock_flags))
 542                        BUG();
 543        }
 544
 545        /*
 546         * These values must be set before inserting the inode into the radix
 547         * tree as the moment it is inserted a concurrent lookup (allowed by the
 548         * RCU locking mechanism) can find it and that lookup must see that this
 549         * is an inode currently under construction (i.e. that XFS_INEW is set).
 550         * The ip->i_flags_lock that protects the XFS_INEW flag forms the
 551         * memory barrier that ensures this detection works correctly at lookup
 552         * time.
 553         */
 554        iflags = XFS_INEW;
 555        if (flags & XFS_IGET_DONTCACHE)
 556                iflags |= XFS_IDONTCACHE;
 557        ip->i_udquot = NULL;
 558        ip->i_gdquot = NULL;
 559        ip->i_pdquot = NULL;
 560        xfs_iflags_set(ip, iflags);
 561
 562        /* insert the new inode */
 563        spin_lock(&pag->pag_ici_lock);
 564        error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
 565        if (unlikely(error)) {
 566                WARN_ON(error != -EEXIST);
 567                XFS_STATS_INC(mp, xs_ig_dup);
 568                error = -EAGAIN;
 569                goto out_preload_end;
 570        }
 571        spin_unlock(&pag->pag_ici_lock);
 572        radix_tree_preload_end();
 573
 574        *ipp = ip;
 575        return 0;
 576
 577out_preload_end:
 578        spin_unlock(&pag->pag_ici_lock);
 579        radix_tree_preload_end();
 580        if (lock_flags)
 581                xfs_iunlock(ip, lock_flags);
 582out_destroy:
 583        __destroy_inode(VFS_I(ip));
 584        xfs_inode_free(ip);
 585        return error;
 586}
 587
 588/*
 589 * Look up an inode by number in the given file system.
 590 * The inode is looked up in the cache held in each AG.
 591 * If the inode is found in the cache, initialise the vfs inode
 592 * if necessary.
 593 *
 594 * If it is not in core, read it in from the file system's device,
 595 * add it to the cache and initialise the vfs inode.
 596 *
 597 * The inode is locked according to the value of the lock_flags parameter.
 598 * This flag parameter indicates how and if the inode's IO lock and inode lock
 599 * should be taken.
 600 *
 601 * mp -- the mount point structure for the current file system.  It points
 602 *       to the inode hash table.
 603 * tp -- a pointer to the current transaction if there is one.  This is
 604 *       simply passed through to the xfs_iread() call.
 605 * ino -- the number of the inode desired.  This is the unique identifier
 606 *        within the file system for the inode being requested.
 607 * lock_flags -- flags indicating how to lock the inode.  See the comment
 608 *               for xfs_ilock() for a list of valid values.
 609 */
 610int
 611xfs_iget(
 612        xfs_mount_t     *mp,
 613        xfs_trans_t     *tp,
 614        xfs_ino_t       ino,
 615        uint            flags,
 616        uint            lock_flags,
 617        xfs_inode_t     **ipp)
 618{
 619        xfs_inode_t     *ip;
 620        int             error;
 621        xfs_perag_t     *pag;
 622        xfs_agino_t     agino;
 623
 624        /*
 625         * xfs_reclaim_inode() uses the ILOCK to ensure an inode
 626         * doesn't get freed while it's being referenced during a
 627         * radix tree traversal here.  It assumes this function
 628         * aqcuires only the ILOCK (and therefore it has no need to
 629         * involve the IOLOCK in this synchronization).
 630         */
 631        ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
 632
 633        /* reject inode numbers outside existing AGs */
 634        if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
 635                return -EINVAL;
 636
 637        XFS_STATS_INC(mp, xs_ig_attempts);
 638
 639        /* get the perag structure and ensure that it's inode capable */
 640        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
 641        agino = XFS_INO_TO_AGINO(mp, ino);
 642
 643again:
 644        error = 0;
 645        rcu_read_lock();
 646        ip = radix_tree_lookup(&pag->pag_ici_root, agino);
 647
 648        if (ip) {
 649                error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
 650                if (error)
 651                        goto out_error_or_again;
 652        } else {
 653                rcu_read_unlock();
 654                if (flags & XFS_IGET_INCORE) {
 655                        error = -ENODATA;
 656                        goto out_error_or_again;
 657                }
 658                XFS_STATS_INC(mp, xs_ig_missed);
 659
 660                error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
 661                                                        flags, lock_flags);
 662                if (error)
 663                        goto out_error_or_again;
 664        }
 665        xfs_perag_put(pag);
 666
 667        *ipp = ip;
 668
 669        /*
 670         * If we have a real type for an on-disk inode, we can setup the inode
 671         * now.  If it's a new inode being created, xfs_ialloc will handle it.
 672         */
 673        if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
 674                xfs_setup_existing_inode(ip);
 675        return 0;
 676
 677out_error_or_again:
 678        if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
 679                delay(1);
 680                goto again;
 681        }
 682        xfs_perag_put(pag);
 683        return error;
 684}
 685
 686/*
 687 * "Is this a cached inode that's also allocated?"
 688 *
 689 * Look up an inode by number in the given file system.  If the inode is
 690 * in cache and isn't in purgatory, return 1 if the inode is allocated
 691 * and 0 if it is not.  For all other cases (not in cache, being torn
 692 * down, etc.), return a negative error code.
 693 *
 694 * The caller has to prevent inode allocation and freeing activity,
 695 * presumably by locking the AGI buffer.   This is to ensure that an
 696 * inode cannot transition from allocated to freed until the caller is
 697 * ready to allow that.  If the inode is in an intermediate state (new,
 698 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
 699 * inode is not in the cache, -ENOENT will be returned.  The caller must
 700 * deal with these scenarios appropriately.
 701 *
 702 * This is a specialized use case for the online scrubber; if you're
 703 * reading this, you probably want xfs_iget.
 704 */
 705int
 706xfs_icache_inode_is_allocated(
 707        struct xfs_mount        *mp,
 708        struct xfs_trans        *tp,
 709        xfs_ino_t               ino,
 710        bool                    *inuse)
 711{
 712        struct xfs_inode        *ip;
 713        int                     error;
 714
 715        error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
 716        if (error)
 717                return error;
 718
 719        *inuse = !!(VFS_I(ip)->i_mode);
 720        IRELE(ip);
 721        return 0;
 722}
 723
 724/*
 725 * The inode lookup is done in batches to keep the amount of lock traffic and
 726 * radix tree lookups to a minimum. The batch size is a trade off between
 727 * lookup reduction and stack usage. This is in the reclaim path, so we can't
 728 * be too greedy.
 729 */
 730#define XFS_LOOKUP_BATCH        32
 731
 732STATIC int
 733xfs_inode_ag_walk_grab(
 734        struct xfs_inode        *ip,
 735        int                     flags)
 736{
 737        struct inode            *inode = VFS_I(ip);
 738        bool                    newinos = !!(flags & XFS_AGITER_INEW_WAIT);
 739
 740        ASSERT(rcu_read_lock_held());
 741
 742        /*
 743         * check for stale RCU freed inode
 744         *
 745         * If the inode has been reallocated, it doesn't matter if it's not in
 746         * the AG we are walking - we are walking for writeback, so if it
 747         * passes all the "valid inode" checks and is dirty, then we'll write
 748         * it back anyway.  If it has been reallocated and still being
 749         * initialised, the XFS_INEW check below will catch it.
 750         */
 751        spin_lock(&ip->i_flags_lock);
 752        if (!ip->i_ino)
 753                goto out_unlock_noent;
 754
 755        /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
 756        if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
 757            __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
 758                goto out_unlock_noent;
 759        spin_unlock(&ip->i_flags_lock);
 760
 761        /* nothing to sync during shutdown */
 762        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 763                return -EFSCORRUPTED;
 764
 765        /* If we can't grab the inode, it must on it's way to reclaim. */
 766        if (!igrab(inode))
 767                return -ENOENT;
 768
 769        /* inode is valid */
 770        return 0;
 771
 772out_unlock_noent:
 773        spin_unlock(&ip->i_flags_lock);
 774        return -ENOENT;
 775}
 776
 777STATIC int
 778xfs_inode_ag_walk(
 779        struct xfs_mount        *mp,
 780        struct xfs_perag        *pag,
 781        int                     (*execute)(struct xfs_inode *ip, int flags,
 782                                           void *args),
 783        int                     flags,
 784        void                    *args,
 785        int                     tag,
 786        int                     iter_flags)
 787{
 788        uint32_t                first_index;
 789        int                     last_error = 0;
 790        int                     skipped;
 791        int                     done;
 792        int                     nr_found;
 793
 794restart:
 795        done = 0;
 796        skipped = 0;
 797        first_index = 0;
 798        nr_found = 0;
 799        do {
 800                struct xfs_inode *batch[XFS_LOOKUP_BATCH];
 801                int             error = 0;
 802                int             i;
 803
 804                rcu_read_lock();
 805
 806                if (tag == -1)
 807                        nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
 808                                        (void **)batch, first_index,
 809                                        XFS_LOOKUP_BATCH);
 810                else
 811                        nr_found = radix_tree_gang_lookup_tag(
 812                                        &pag->pag_ici_root,
 813                                        (void **) batch, first_index,
 814                                        XFS_LOOKUP_BATCH, tag);
 815
 816                if (!nr_found) {
 817                        rcu_read_unlock();
 818                        break;
 819                }
 820
 821                /*
 822                 * Grab the inodes before we drop the lock. if we found
 823                 * nothing, nr == 0 and the loop will be skipped.
 824                 */
 825                for (i = 0; i < nr_found; i++) {
 826                        struct xfs_inode *ip = batch[i];
 827
 828                        if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
 829                                batch[i] = NULL;
 830
 831                        /*
 832                         * Update the index for the next lookup. Catch
 833                         * overflows into the next AG range which can occur if
 834                         * we have inodes in the last block of the AG and we
 835                         * are currently pointing to the last inode.
 836                         *
 837                         * Because we may see inodes that are from the wrong AG
 838                         * due to RCU freeing and reallocation, only update the
 839                         * index if it lies in this AG. It was a race that lead
 840                         * us to see this inode, so another lookup from the
 841                         * same index will not find it again.
 842                         */
 843                        if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
 844                                continue;
 845                        first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
 846                        if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
 847                                done = 1;
 848                }
 849
 850                /* unlock now we've grabbed the inodes. */
 851                rcu_read_unlock();
 852
 853                for (i = 0; i < nr_found; i++) {
 854                        if (!batch[i])
 855                                continue;
 856                        if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
 857                            xfs_iflags_test(batch[i], XFS_INEW))
 858                                xfs_inew_wait(batch[i]);
 859                        error = execute(batch[i], flags, args);
 860                        IRELE(batch[i]);
 861                        if (error == -EAGAIN) {
 862                                skipped++;
 863                                continue;
 864                        }
 865                        if (error && last_error != -EFSCORRUPTED)
 866                                last_error = error;
 867                }
 868
 869                /* bail out if the filesystem is corrupted.  */
 870                if (error == -EFSCORRUPTED)
 871                        break;
 872
 873                cond_resched();
 874
 875        } while (nr_found && !done);
 876
 877        if (skipped) {
 878                delay(1);
 879                goto restart;
 880        }
 881        return last_error;
 882}
 883
 884/*
 885 * Background scanning to trim post-EOF preallocated space. This is queued
 886 * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
 887 */
 888void
 889xfs_queue_eofblocks(
 890        struct xfs_mount *mp)
 891{
 892        rcu_read_lock();
 893        if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
 894                queue_delayed_work(mp->m_eofblocks_workqueue,
 895                                   &mp->m_eofblocks_work,
 896                                   msecs_to_jiffies(xfs_eofb_secs * 1000));
 897        rcu_read_unlock();
 898}
 899
 900void
 901xfs_eofblocks_worker(
 902        struct work_struct *work)
 903{
 904        struct xfs_mount *mp = container_of(to_delayed_work(work),
 905                                struct xfs_mount, m_eofblocks_work);
 906        xfs_icache_free_eofblocks(mp, NULL);
 907        xfs_queue_eofblocks(mp);
 908}
 909
 910int
 911xfs_inode_ag_iterator_flags(
 912        struct xfs_mount        *mp,
 913        int                     (*execute)(struct xfs_inode *ip, int flags,
 914                                           void *args),
 915        int                     flags,
 916        void                    *args,
 917        int                     iter_flags)
 918{
 919        struct xfs_perag        *pag;
 920        int                     error = 0;
 921        int                     last_error = 0;
 922        xfs_agnumber_t          ag;
 923
 924        ag = 0;
 925        while ((pag = xfs_perag_get(mp, ag))) {
 926                ag = pag->pag_agno + 1;
 927                error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
 928                                          iter_flags);
 929                xfs_perag_put(pag);
 930                if (error) {
 931                        last_error = error;
 932                        if (error == -EFSCORRUPTED)
 933                                break;
 934                }
 935        }
 936        return last_error;
 937}
 938
 939int
 940xfs_inode_ag_iterator(
 941        struct xfs_mount        *mp,
 942        int                     (*execute)(struct xfs_inode *ip, int flags,
 943                                           void *args),
 944        int                     flags,
 945        void                    *args)
 946{
 947        return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
 948}
 949
 950int
 951xfs_inode_ag_iterator_tag(
 952        struct xfs_mount        *mp,
 953        int                     (*execute)(struct xfs_inode *ip, int flags,
 954                                           void *args),
 955        int                     flags,
 956        void                    *args,
 957        int                     tag)
 958{
 959        struct xfs_perag        *pag;
 960        int                     error = 0;
 961        int                     last_error = 0;
 962        xfs_agnumber_t          ag;
 963
 964        ag = 0;
 965        while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
 966                ag = pag->pag_agno + 1;
 967                error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
 968                                          0);
 969                xfs_perag_put(pag);
 970                if (error) {
 971                        last_error = error;
 972                        if (error == -EFSCORRUPTED)
 973                                break;
 974                }
 975        }
 976        return last_error;
 977}
 978
 979/*
 980 * Grab the inode for reclaim exclusively.
 981 * Return 0 if we grabbed it, non-zero otherwise.
 982 */
 983STATIC int
 984xfs_reclaim_inode_grab(
 985        struct xfs_inode        *ip,
 986        int                     flags)
 987{
 988        ASSERT(rcu_read_lock_held());
 989
 990        /* quick check for stale RCU freed inode */
 991        if (!ip->i_ino)
 992                return 1;
 993
 994        /*
 995         * If we are asked for non-blocking operation, do unlocked checks to
 996         * see if the inode already is being flushed or in reclaim to avoid
 997         * lock traffic.
 998         */
 999        if ((flags & SYNC_TRYLOCK) &&
1000            __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
1001                return 1;
1002
1003        /*
1004         * The radix tree lock here protects a thread in xfs_iget from racing
1005         * with us starting reclaim on the inode.  Once we have the
1006         * XFS_IRECLAIM flag set it will not touch us.
1007         *
1008         * Due to RCU lookup, we may find inodes that have been freed and only
1009         * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
1010         * aren't candidates for reclaim at all, so we must check the
1011         * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
1012         */
1013        spin_lock(&ip->i_flags_lock);
1014        if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
1015            __xfs_iflags_test(ip, XFS_IRECLAIM)) {
1016                /* not a reclaim candidate. */
1017                spin_unlock(&ip->i_flags_lock);
1018                return 1;
1019        }
1020        __xfs_iflags_set(ip, XFS_IRECLAIM);
1021        spin_unlock(&ip->i_flags_lock);
1022        return 0;
1023}
1024
1025/*
1026 * Inodes in different states need to be treated differently. The following
1027 * table lists the inode states and the reclaim actions necessary:
1028 *
1029 *      inode state          iflush ret         required action
1030 *      ---------------      ----------         ---------------
1031 *      bad                     -               reclaim
1032 *      shutdown                EIO             unpin and reclaim
1033 *      clean, unpinned         0               reclaim
1034 *      stale, unpinned         0               reclaim
1035 *      clean, pinned(*)        0               requeue
1036 *      stale, pinned           EAGAIN          requeue
1037 *      dirty, async            -               requeue
1038 *      dirty, sync             0               reclaim
1039 *
1040 * (*) dgc: I don't think the clean, pinned state is possible but it gets
1041 * handled anyway given the order of checks implemented.
1042 *
1043 * Also, because we get the flush lock first, we know that any inode that has
1044 * been flushed delwri has had the flush completed by the time we check that
1045 * the inode is clean.
1046 *
1047 * Note that because the inode is flushed delayed write by AIL pushing, the
1048 * flush lock may already be held here and waiting on it can result in very
1049 * long latencies.  Hence for sync reclaims, where we wait on the flush lock,
1050 * the caller should push the AIL first before trying to reclaim inodes to
1051 * minimise the amount of time spent waiting.  For background relaim, we only
1052 * bother to reclaim clean inodes anyway.
1053 *
1054 * Hence the order of actions after gaining the locks should be:
1055 *      bad             => reclaim
1056 *      shutdown        => unpin and reclaim
1057 *      pinned, async   => requeue
1058 *      pinned, sync    => unpin
1059 *      stale           => reclaim
1060 *      clean           => reclaim
1061 *      dirty, async    => requeue
1062 *      dirty, sync     => flush, wait and reclaim
1063 */
1064STATIC int
1065xfs_reclaim_inode(
1066        struct xfs_inode        *ip,
1067        struct xfs_perag        *pag,
1068        int                     sync_mode)
1069{
1070        struct xfs_buf          *bp = NULL;
1071        xfs_ino_t               ino = ip->i_ino; /* for radix_tree_delete */
1072        int                     error;
1073
1074restart:
1075        error = 0;
1076        xfs_ilock(ip, XFS_ILOCK_EXCL);
1077        if (!xfs_iflock_nowait(ip)) {
1078                if (!(sync_mode & SYNC_WAIT))
1079                        goto out;
1080                xfs_iflock(ip);
1081        }
1082
1083        if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1084                xfs_iunpin_wait(ip);
1085                /* xfs_iflush_abort() drops the flush lock */
1086                xfs_iflush_abort(ip, false);
1087                goto reclaim;
1088        }
1089        if (xfs_ipincount(ip)) {
1090                if (!(sync_mode & SYNC_WAIT))
1091                        goto out_ifunlock;
1092                xfs_iunpin_wait(ip);
1093        }
1094        if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
1095                xfs_ifunlock(ip);
1096                goto reclaim;
1097        }
1098
1099        /*
1100         * Never flush out dirty data during non-blocking reclaim, as it would
1101         * just contend with AIL pushing trying to do the same job.
1102         */
1103        if (!(sync_mode & SYNC_WAIT))
1104                goto out_ifunlock;
1105
1106        /*
1107         * Now we have an inode that needs flushing.
1108         *
1109         * Note that xfs_iflush will never block on the inode buffer lock, as
1110         * xfs_ifree_cluster() can lock the inode buffer before it locks the
1111         * ip->i_lock, and we are doing the exact opposite here.  As a result,
1112         * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
1113         * result in an ABBA deadlock with xfs_ifree_cluster().
1114         *
1115         * As xfs_ifree_cluser() must gather all inodes that are active in the
1116         * cache to mark them stale, if we hit this case we don't actually want
1117         * to do IO here - we want the inode marked stale so we can simply
1118         * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the
1119         * inode, back off and try again.  Hopefully the next pass through will
1120         * see the stale flag set on the inode.
1121         */
1122        error = xfs_iflush(ip, &bp);
1123        if (error == -EAGAIN) {
1124                xfs_iunlock(ip, XFS_ILOCK_EXCL);
1125                /* backoff longer than in xfs_ifree_cluster */
1126                delay(2);
1127                goto restart;
1128        }
1129
1130        if (!error) {
1131                error = xfs_bwrite(bp);
1132                xfs_buf_relse(bp);
1133        }
1134
1135reclaim:
1136        ASSERT(!xfs_isiflocked(ip));
1137
1138        /*
1139         * Because we use RCU freeing we need to ensure the inode always appears
1140         * to be reclaimed with an invalid inode number when in the free state.
1141         * We do this as early as possible under the ILOCK so that
1142         * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
1143         * detect races with us here. By doing this, we guarantee that once
1144         * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
1145         * it will see either a valid inode that will serialise correctly, or it
1146         * will see an invalid inode that it can skip.
1147         */
1148        spin_lock(&ip->i_flags_lock);
1149        ip->i_flags = XFS_IRECLAIM;
1150        ip->i_ino = 0;
1151        spin_unlock(&ip->i_flags_lock);
1152
1153        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1154
1155        XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1156        /*
1157         * Remove the inode from the per-AG radix tree.
1158         *
1159         * Because radix_tree_delete won't complain even if the item was never
1160         * added to the tree assert that it's been there before to catch
1161         * problems with the inode life time early on.
1162         */
1163        spin_lock(&pag->pag_ici_lock);
1164        if (!radix_tree_delete(&pag->pag_ici_root,
1165                                XFS_INO_TO_AGINO(ip->i_mount, ino)))
1166                ASSERT(0);
1167        xfs_perag_clear_reclaim_tag(pag);
1168        spin_unlock(&pag->pag_ici_lock);
1169
1170        /*
1171         * Here we do an (almost) spurious inode lock in order to coordinate
1172         * with inode cache radix tree lookups.  This is because the lookup
1173         * can reference the inodes in the cache without taking references.
1174         *
1175         * We make that OK here by ensuring that we wait until the inode is
1176         * unlocked after the lookup before we go ahead and free it.
1177         */
1178        xfs_ilock(ip, XFS_ILOCK_EXCL);
1179        xfs_qm_dqdetach(ip);
1180        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1181
1182        __xfs_inode_free(ip);
1183        return error;
1184
1185out_ifunlock:
1186        xfs_ifunlock(ip);
1187out:
1188        xfs_iflags_clear(ip, XFS_IRECLAIM);
1189        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1190        /*
1191         * We could return -EAGAIN here to make reclaim rescan the inode tree in
1192         * a short while. However, this just burns CPU time scanning the tree
1193         * waiting for IO to complete and the reclaim work never goes back to
1194         * the idle state. Instead, return 0 to let the next scheduled
1195         * background reclaim attempt to reclaim the inode again.
1196         */
1197        return 0;
1198}
1199
1200/*
1201 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1202 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1203 * then a shut down during filesystem unmount reclaim walk leak all the
1204 * unreclaimed inodes.
1205 */
1206STATIC int
1207xfs_reclaim_inodes_ag(
1208        struct xfs_mount        *mp,
1209        int                     flags,
1210        int                     *nr_to_scan)
1211{
1212        struct xfs_perag        *pag;
1213        int                     error = 0;
1214        int                     last_error = 0;
1215        xfs_agnumber_t          ag;
1216        int                     trylock = flags & SYNC_TRYLOCK;
1217        int                     skipped;
1218
1219restart:
1220        ag = 0;
1221        skipped = 0;
1222        while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1223                unsigned long   first_index = 0;
1224                int             done = 0;
1225                int             nr_found = 0;
1226
1227                ag = pag->pag_agno + 1;
1228
1229                if (trylock) {
1230                        if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1231                                skipped++;
1232                                xfs_perag_put(pag);
1233                                continue;
1234                        }
1235                        first_index = pag->pag_ici_reclaim_cursor;
1236                } else
1237                        mutex_lock(&pag->pag_ici_reclaim_lock);
1238
1239                do {
1240                        struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1241                        int     i;
1242
1243                        rcu_read_lock();
1244                        nr_found = radix_tree_gang_lookup_tag(
1245                                        &pag->pag_ici_root,
1246                                        (void **)batch, first_index,
1247                                        XFS_LOOKUP_BATCH,
1248                                        XFS_ICI_RECLAIM_TAG);
1249                        if (!nr_found) {
1250                                done = 1;
1251                                rcu_read_unlock();
1252                                break;
1253                        }
1254
1255                        /*
1256                         * Grab the inodes before we drop the lock. if we found
1257                         * nothing, nr == 0 and the loop will be skipped.
1258                         */
1259                        for (i = 0; i < nr_found; i++) {
1260                                struct xfs_inode *ip = batch[i];
1261
1262                                if (done || xfs_reclaim_inode_grab(ip, flags))
1263                                        batch[i] = NULL;
1264
1265                                /*
1266                                 * Update the index for the next lookup. Catch
1267                                 * overflows into the next AG range which can
1268                                 * occur if we have inodes in the last block of
1269                                 * the AG and we are currently pointing to the
1270                                 * last inode.
1271                                 *
1272                                 * Because we may see inodes that are from the
1273                                 * wrong AG due to RCU freeing and
1274                                 * reallocation, only update the index if it
1275                                 * lies in this AG. It was a race that lead us
1276                                 * to see this inode, so another lookup from
1277                                 * the same index will not find it again.
1278                                 */
1279                                if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1280                                                                pag->pag_agno)
1281                                        continue;
1282                                first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1283                                if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1284                                        done = 1;
1285                        }
1286
1287                        /* unlock now we've grabbed the inodes. */
1288                        rcu_read_unlock();
1289
1290                        for (i = 0; i < nr_found; i++) {
1291                                if (!batch[i])
1292                                        continue;
1293                                error = xfs_reclaim_inode(batch[i], pag, flags);
1294                                if (error && last_error != -EFSCORRUPTED)
1295                                        last_error = error;
1296                        }
1297
1298                        *nr_to_scan -= XFS_LOOKUP_BATCH;
1299
1300                        cond_resched();
1301
1302                } while (nr_found && !done && *nr_to_scan > 0);
1303
1304                if (trylock && !done)
1305                        pag->pag_ici_reclaim_cursor = first_index;
1306                else
1307                        pag->pag_ici_reclaim_cursor = 0;
1308                mutex_unlock(&pag->pag_ici_reclaim_lock);
1309                xfs_perag_put(pag);
1310        }
1311
1312        /*
1313         * if we skipped any AG, and we still have scan count remaining, do
1314         * another pass this time using blocking reclaim semantics (i.e
1315         * waiting on the reclaim locks and ignoring the reclaim cursors). This
1316         * ensure that when we get more reclaimers than AGs we block rather
1317         * than spin trying to execute reclaim.
1318         */
1319        if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1320                trylock = 0;
1321                goto restart;
1322        }
1323        return last_error;
1324}
1325
1326int
1327xfs_reclaim_inodes(
1328        xfs_mount_t     *mp,
1329        int             mode)
1330{
1331        int             nr_to_scan = INT_MAX;
1332
1333        return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1334}
1335
1336/*
1337 * Scan a certain number of inodes for reclaim.
1338 *
1339 * When called we make sure that there is a background (fast) inode reclaim in
1340 * progress, while we will throttle the speed of reclaim via doing synchronous
1341 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1342 * them to be cleaned, which we hope will not be very long due to the
1343 * background walker having already kicked the IO off on those dirty inodes.
1344 */
1345void
1346xfs_reclaim_inodes_nr(
1347        struct xfs_mount        *mp,
1348        int                     nr_to_scan)
1349{
1350        /* kick background reclaimer and push the AIL */
1351        xfs_reclaim_work_queue(mp);
1352        xfs_ail_push_all(mp->m_ail);
1353
1354        xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1355}
1356
1357/*
1358 * Return the number of reclaimable inodes in the filesystem for
1359 * the shrinker to determine how much to reclaim.
1360 */
1361int
1362xfs_reclaim_inodes_count(
1363        struct xfs_mount        *mp)
1364{
1365        struct xfs_perag        *pag;
1366        xfs_agnumber_t          ag = 0;
1367        int                     reclaimable = 0;
1368
1369        while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1370                ag = pag->pag_agno + 1;
1371                reclaimable += pag->pag_ici_reclaimable;
1372                xfs_perag_put(pag);
1373        }
1374        return reclaimable;
1375}
1376
1377STATIC int
1378xfs_inode_match_id(
1379        struct xfs_inode        *ip,
1380        struct xfs_eofblocks    *eofb)
1381{
1382        if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1383            !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1384                return 0;
1385
1386        if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1387            !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1388                return 0;
1389
1390        if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1391            xfs_get_projid(ip) != eofb->eof_prid)
1392                return 0;
1393
1394        return 1;
1395}
1396
1397/*
1398 * A union-based inode filtering algorithm. Process the inode if any of the
1399 * criteria match. This is for global/internal scans only.
1400 */
1401STATIC int
1402xfs_inode_match_id_union(
1403        struct xfs_inode        *ip,
1404        struct xfs_eofblocks    *eofb)
1405{
1406        if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1407            uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1408                return 1;
1409
1410        if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1411            gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1412                return 1;
1413
1414        if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1415            xfs_get_projid(ip) == eofb->eof_prid)
1416                return 1;
1417
1418        return 0;
1419}
1420
1421STATIC int
1422xfs_inode_free_eofblocks(
1423        struct xfs_inode        *ip,
1424        int                     flags,
1425        void                    *args)
1426{
1427        int ret = 0;
1428        struct xfs_eofblocks *eofb = args;
1429        int match;
1430
1431        if (!xfs_can_free_eofblocks(ip, false)) {
1432                /* inode could be preallocated or append-only */
1433                trace_xfs_inode_free_eofblocks_invalid(ip);
1434                xfs_inode_clear_eofblocks_tag(ip);
1435                return 0;
1436        }
1437
1438        /*
1439         * If the mapping is dirty the operation can block and wait for some
1440         * time. Unless we are waiting, skip it.
1441         */
1442        if (!(flags & SYNC_WAIT) &&
1443            mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1444                return 0;
1445
1446        if (eofb) {
1447                if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1448                        match = xfs_inode_match_id_union(ip, eofb);
1449                else
1450                        match = xfs_inode_match_id(ip, eofb);
1451                if (!match)
1452                        return 0;
1453
1454                /* skip the inode if the file size is too small */
1455                if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1456                    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1457                        return 0;
1458        }
1459
1460        /*
1461         * If the caller is waiting, return -EAGAIN to keep the background
1462         * scanner moving and revisit the inode in a subsequent pass.
1463         */
1464        if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1465                if (flags & SYNC_WAIT)
1466                        ret = -EAGAIN;
1467                return ret;
1468        }
1469        ret = xfs_free_eofblocks(ip);
1470        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1471
1472        return ret;
1473}
1474
1475int
1476xfs_icache_free_eofblocks(
1477        struct xfs_mount        *mp,
1478        struct xfs_eofblocks    *eofb)
1479{
1480        int flags = SYNC_TRYLOCK;
1481
1482        if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1483                flags = SYNC_WAIT;
1484
1485        return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags,
1486                                         eofb, XFS_ICI_EOFBLOCKS_TAG);
1487}
1488
1489/*
1490 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1491 * multiple quotas, we don't know exactly which quota caused an allocation
1492 * failure. We make a best effort by including each quota under low free space
1493 * conditions (less than 1% free space) in the scan.
1494 */
1495int
1496xfs_inode_free_quota_eofblocks(
1497        struct xfs_inode *ip)
1498{
1499        int scan = 0;
1500        struct xfs_eofblocks eofb = {0};
1501        struct xfs_dquot *dq;
1502
1503        /*
1504         * Run a sync scan to increase effectiveness and use the union filter to
1505         * cover all applicable quotas in a single scan.
1506         */
1507        eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1508
1509        if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1510                dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1511                if (dq && xfs_dquot_lowsp(dq)) {
1512                        eofb.eof_uid = VFS_I(ip)->i_uid;
1513                        eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1514                        scan = 1;
1515                }
1516        }
1517
1518        if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1519                dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1520                if (dq && xfs_dquot_lowsp(dq)) {
1521                        eofb.eof_gid = VFS_I(ip)->i_gid;
1522                        eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1523                        scan = 1;
1524                }
1525        }
1526
1527        if (scan)
1528                xfs_icache_free_eofblocks(ip->i_mount, &eofb);
1529
1530        return scan;
1531}
1532
1533void
1534xfs_inode_set_eofblocks_tag(
1535        xfs_inode_t     *ip)
1536{
1537        struct xfs_mount *mp = ip->i_mount;
1538        struct xfs_perag *pag;
1539        int tagged;
1540
1541        /*
1542         * Don't bother locking the AG and looking up in the radix trees
1543         * if we already know that we have the tag set.
1544         */
1545        if (ip->i_flags & XFS_IEOFBLOCKS)
1546                return;
1547        spin_lock(&ip->i_flags_lock);
1548        ip->i_flags |= XFS_IEOFBLOCKS;
1549        spin_unlock(&ip->i_flags_lock);
1550
1551        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1552        spin_lock(&pag->pag_ici_lock);
1553        trace_xfs_inode_set_eofblocks_tag(ip);
1554
1555        tagged = radix_tree_tagged(&pag->pag_ici_root,
1556                                   XFS_ICI_EOFBLOCKS_TAG);
1557        radix_tree_tag_set(&pag->pag_ici_root,
1558                           XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1559                           XFS_ICI_EOFBLOCKS_TAG);
1560        if (!tagged) {
1561                /* propagate the eofblocks tag up into the perag radix tree */
1562                spin_lock(&ip->i_mount->m_perag_lock);
1563                radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1564                                   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1565                                   XFS_ICI_EOFBLOCKS_TAG);
1566                spin_unlock(&ip->i_mount->m_perag_lock);
1567
1568                /* kick off background trimming */
1569                xfs_queue_eofblocks(ip->i_mount);
1570
1571                trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno,
1572                                              -1, _RET_IP_);
1573        }
1574
1575        spin_unlock(&pag->pag_ici_lock);
1576        xfs_perag_put(pag);
1577}
1578
1579void
1580xfs_inode_clear_eofblocks_tag(
1581        xfs_inode_t     *ip)
1582{
1583        struct xfs_mount *mp = ip->i_mount;
1584        struct xfs_perag *pag;
1585
1586        spin_lock(&ip->i_flags_lock);
1587        ip->i_flags &= ~XFS_IEOFBLOCKS;
1588        spin_unlock(&ip->i_flags_lock);
1589
1590        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1591        spin_lock(&pag->pag_ici_lock);
1592        trace_xfs_inode_clear_eofblocks_tag(ip);
1593
1594        radix_tree_tag_clear(&pag->pag_ici_root,
1595                             XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1596                             XFS_ICI_EOFBLOCKS_TAG);
1597        if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) {
1598                /* clear the eofblocks tag from the perag radix tree */
1599                spin_lock(&ip->i_mount->m_perag_lock);
1600                radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1601                                     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1602                                     XFS_ICI_EOFBLOCKS_TAG);
1603                spin_unlock(&ip->i_mount->m_perag_lock);
1604                trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno,
1605                                               -1, _RET_IP_);
1606        }
1607
1608        spin_unlock(&pag->pag_ici_lock);
1609        xfs_perag_put(pag);
1610}
1611
1612