linux/fs/xfs/linux-2.6/xfs_sync.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
  21#include "xfs_bit.h"
  22#include "xfs_log.h"
  23#include "xfs_inum.h"
  24#include "xfs_trans.h"
  25#include "xfs_sb.h"
  26#include "xfs_ag.h"
  27#include "xfs_mount.h"
  28#include "xfs_bmap_btree.h"
  29#include "xfs_inode.h"
  30#include "xfs_dinode.h"
  31#include "xfs_error.h"
  32#include "xfs_filestream.h"
  33#include "xfs_vnodeops.h"
  34#include "xfs_inode_item.h"
  35#include "xfs_quota.h"
  36#include "xfs_trace.h"
  37#include "xfs_fsops.h"
  38
  39#include <linux/kthread.h>
  40#include <linux/freezer.h>
  41
  42/*
  43 * The inode lookup is done in batches to keep the amount of lock traffic and
  44 * radix tree lookups to a minimum. The batch size is a trade off between
  45 * lookup reduction and stack usage. This is in the reclaim path, so we can't
  46 * be too greedy.
  47 */
  48#define XFS_LOOKUP_BATCH        32
  49
  50STATIC int
  51xfs_inode_ag_walk_grab(
  52        struct xfs_inode        *ip)
  53{
  54        struct inode            *inode = VFS_I(ip);
  55
  56        ASSERT(rcu_read_lock_held());
  57
  58        /*
  59         * check for stale RCU freed inode
  60         *
  61         * If the inode has been reallocated, it doesn't matter if it's not in
  62         * the AG we are walking - we are walking for writeback, so if it
  63         * passes all the "valid inode" checks and is dirty, then we'll write
  64         * it back anyway.  If it has been reallocated and still being
  65         * initialised, the XFS_INEW check below will catch it.
  66         */
  67        spin_lock(&ip->i_flags_lock);
  68        if (!ip->i_ino)
  69                goto out_unlock_noent;
  70
  71        /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
  72        if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
  73                goto out_unlock_noent;
  74        spin_unlock(&ip->i_flags_lock);
  75
  76        /* nothing to sync during shutdown */
  77        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
  78                return EFSCORRUPTED;
  79
  80        /* If we can't grab the inode, it must on it's way to reclaim. */
  81        if (!igrab(inode))
  82                return ENOENT;
  83
  84        if (is_bad_inode(inode)) {
  85                IRELE(ip);
  86                return ENOENT;
  87        }
  88
  89        /* inode is valid */
  90        return 0;
  91
  92out_unlock_noent:
  93        spin_unlock(&ip->i_flags_lock);
  94        return ENOENT;
  95}
  96
  97STATIC int
  98xfs_inode_ag_walk(
  99        struct xfs_mount        *mp,
 100        struct xfs_perag        *pag,
 101        int                     (*execute)(struct xfs_inode *ip,
 102                                           struct xfs_perag *pag, int flags),
 103        int                     flags)
 104{
 105        uint32_t                first_index;
 106        int                     last_error = 0;
 107        int                     skipped;
 108        int                     done;
 109        int                     nr_found;
 110
 111restart:
 112        done = 0;
 113        skipped = 0;
 114        first_index = 0;
 115        nr_found = 0;
 116        do {
 117                struct xfs_inode *batch[XFS_LOOKUP_BATCH];
 118                int             error = 0;
 119                int             i;
 120
 121                rcu_read_lock();
 122                nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
 123                                        (void **)batch, first_index,
 124                                        XFS_LOOKUP_BATCH);
 125                if (!nr_found) {
 126                        rcu_read_unlock();
 127                        break;
 128                }
 129
 130                /*
 131                 * Grab the inodes before we drop the lock. if we found
 132                 * nothing, nr == 0 and the loop will be skipped.
 133                 */
 134                for (i = 0; i < nr_found; i++) {
 135                        struct xfs_inode *ip = batch[i];
 136
 137                        if (done || xfs_inode_ag_walk_grab(ip))
 138                                batch[i] = NULL;
 139
 140                        /*
 141                         * Update the index for the next lookup. Catch
 142                         * overflows into the next AG range which can occur if
 143                         * we have inodes in the last block of the AG and we
 144                         * are currently pointing to the last inode.
 145                         *
 146                         * Because we may see inodes that are from the wrong AG
 147                         * due to RCU freeing and reallocation, only update the
 148                         * index if it lies in this AG. It was a race that lead
 149                         * us to see this inode, so another lookup from the
 150                         * same index will not find it again.
 151                         */
 152                        if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
 153                                continue;
 154                        first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
 155                        if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
 156                                done = 1;
 157                }
 158
 159                /* unlock now we've grabbed the inodes. */
 160                rcu_read_unlock();
 161
 162                for (i = 0; i < nr_found; i++) {
 163                        if (!batch[i])
 164                                continue;
 165                        error = execute(batch[i], pag, flags);
 166                        IRELE(batch[i]);
 167                        if (error == EAGAIN) {
 168                                skipped++;
 169                                continue;
 170                        }
 171                        if (error && last_error != EFSCORRUPTED)
 172                                last_error = error;
 173                }
 174
 175                /* bail out if the filesystem is corrupted.  */
 176                if (error == EFSCORRUPTED)
 177                        break;
 178
 179        } while (nr_found && !done);
 180
 181        if (skipped) {
 182                delay(1);
 183                goto restart;
 184        }
 185        return last_error;
 186}
 187
 188int
 189xfs_inode_ag_iterator(
 190        struct xfs_mount        *mp,
 191        int                     (*execute)(struct xfs_inode *ip,
 192                                           struct xfs_perag *pag, int flags),
 193        int                     flags)
 194{
 195        struct xfs_perag        *pag;
 196        int                     error = 0;
 197        int                     last_error = 0;
 198        xfs_agnumber_t          ag;
 199
 200        ag = 0;
 201        while ((pag = xfs_perag_get(mp, ag))) {
 202                ag = pag->pag_agno + 1;
 203                error = xfs_inode_ag_walk(mp, pag, execute, flags);
 204                xfs_perag_put(pag);
 205                if (error) {
 206                        last_error = error;
 207                        if (error == EFSCORRUPTED)
 208                                break;
 209                }
 210        }
 211        return XFS_ERROR(last_error);
 212}
 213
 214STATIC int
 215xfs_sync_inode_data(
 216        struct xfs_inode        *ip,
 217        struct xfs_perag        *pag,
 218        int                     flags)
 219{
 220        struct inode            *inode = VFS_I(ip);
 221        struct address_space *mapping = inode->i_mapping;
 222        int                     error = 0;
 223
 224        if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
 225                goto out_wait;
 226
 227        if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
 228                if (flags & SYNC_TRYLOCK)
 229                        goto out_wait;
 230                xfs_ilock(ip, XFS_IOLOCK_SHARED);
 231        }
 232
 233        error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
 234                                0 : XBF_ASYNC, FI_NONE);
 235        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 236
 237 out_wait:
 238        if (flags & SYNC_WAIT)
 239                xfs_ioend_wait(ip);
 240        return error;
 241}
 242
 243STATIC int
 244xfs_sync_inode_attr(
 245        struct xfs_inode        *ip,
 246        struct xfs_perag        *pag,
 247        int                     flags)
 248{
 249        int                     error = 0;
 250
 251        xfs_ilock(ip, XFS_ILOCK_SHARED);
 252        if (xfs_inode_clean(ip))
 253                goto out_unlock;
 254        if (!xfs_iflock_nowait(ip)) {
 255                if (!(flags & SYNC_WAIT))
 256                        goto out_unlock;
 257                xfs_iflock(ip);
 258        }
 259
 260        if (xfs_inode_clean(ip)) {
 261                xfs_ifunlock(ip);
 262                goto out_unlock;
 263        }
 264
 265        error = xfs_iflush(ip, flags);
 266
 267 out_unlock:
 268        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 269        return error;
 270}
 271
 272/*
 273 * Write out pagecache data for the whole filesystem.
 274 */
 275STATIC int
 276xfs_sync_data(
 277        struct xfs_mount        *mp,
 278        int                     flags)
 279{
 280        int                     error;
 281
 282        ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
 283
 284        error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags);
 285        if (error)
 286                return XFS_ERROR(error);
 287
 288        xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
 289        return 0;
 290}
 291
 292/*
 293 * Write out inode metadata (attributes) for the whole filesystem.
 294 */
 295STATIC int
 296xfs_sync_attr(
 297        struct xfs_mount        *mp,
 298        int                     flags)
 299{
 300        ASSERT((flags & ~SYNC_WAIT) == 0);
 301
 302        return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags);
 303}
 304
 305STATIC int
 306xfs_sync_fsdata(
 307        struct xfs_mount        *mp)
 308{
 309        struct xfs_buf          *bp;
 310
 311        /*
 312         * If the buffer is pinned then push on the log so we won't get stuck
 313         * waiting in the write for someone, maybe ourselves, to flush the log.
 314         *
 315         * Even though we just pushed the log above, we did not have the
 316         * superblock buffer locked at that point so it can become pinned in
 317         * between there and here.
 318         */
 319        bp = xfs_getsb(mp, 0);
 320        if (XFS_BUF_ISPINNED(bp))
 321                xfs_log_force(mp, 0);
 322
 323        return xfs_bwrite(mp, bp);
 324}
 325
 326/*
 327 * When remounting a filesystem read-only or freezing the filesystem, we have
 328 * two phases to execute. This first phase is syncing the data before we
 329 * quiesce the filesystem, and the second is flushing all the inodes out after
 330 * we've waited for all the transactions created by the first phase to
 331 * complete. The second phase ensures that the inodes are written to their
 332 * location on disk rather than just existing in transactions in the log. This
 333 * means after a quiesce there is no log replay required to write the inodes to
 334 * disk (this is the main difference between a sync and a quiesce).
 335 */
 336/*
 337 * First stage of freeze - no writers will make progress now we are here,
 338 * so we flush delwri and delalloc buffers here, then wait for all I/O to
 339 * complete.  Data is frozen at that point. Metadata is not frozen,
 340 * transactions can still occur here so don't bother flushing the buftarg
 341 * because it'll just get dirty again.
 342 */
 343int
 344xfs_quiesce_data(
 345        struct xfs_mount        *mp)
 346{
 347        int                     error, error2 = 0;
 348
 349        /* push non-blocking */
 350        xfs_sync_data(mp, 0);
 351        xfs_qm_sync(mp, SYNC_TRYLOCK);
 352
 353        /* push and block till complete */
 354        xfs_sync_data(mp, SYNC_WAIT);
 355        xfs_qm_sync(mp, SYNC_WAIT);
 356
 357        /* write superblock and hoover up shutdown errors */
 358        error = xfs_sync_fsdata(mp);
 359
 360        /* make sure all delwri buffers are written out */
 361        xfs_flush_buftarg(mp->m_ddev_targp, 1);
 362
 363        /* mark the log as covered if needed */
 364        if (xfs_log_need_covered(mp))
 365                error2 = xfs_fs_log_dummy(mp);
 366
 367        /* flush data-only devices */
 368        if (mp->m_rtdev_targp)
 369                XFS_bflush(mp->m_rtdev_targp);
 370
 371        return error ? error : error2;
 372}
 373
 374STATIC void
 375xfs_quiesce_fs(
 376        struct xfs_mount        *mp)
 377{
 378        int     count = 0, pincount;
 379
 380        xfs_reclaim_inodes(mp, 0);
 381        xfs_flush_buftarg(mp->m_ddev_targp, 0);
 382
 383        /*
 384         * This loop must run at least twice.  The first instance of the loop
 385         * will flush most meta data but that will generate more meta data
 386         * (typically directory updates).  Which then must be flushed and
 387         * logged before we can write the unmount record. We also so sync
 388         * reclaim of inodes to catch any that the above delwri flush skipped.
 389         */
 390        do {
 391                xfs_reclaim_inodes(mp, SYNC_WAIT);
 392                xfs_sync_attr(mp, SYNC_WAIT);
 393                pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
 394                if (!pincount) {
 395                        delay(50);
 396                        count++;
 397                }
 398        } while (count < 2);
 399}
 400
 401/*
 402 * Second stage of a quiesce. The data is already synced, now we have to take
 403 * care of the metadata. New transactions are already blocked, so we need to
 404 * wait for any remaining transactions to drain out before proceding.
 405 */
 406void
 407xfs_quiesce_attr(
 408        struct xfs_mount        *mp)
 409{
 410        int     error = 0;
 411
 412        /* wait for all modifications to complete */
 413        while (atomic_read(&mp->m_active_trans) > 0)
 414                delay(100);
 415
 416        /* flush inodes and push all remaining buffers out to disk */
 417        xfs_quiesce_fs(mp);
 418
 419        /*
 420         * Just warn here till VFS can correctly support
 421         * read-only remount without racing.
 422         */
 423        WARN_ON(atomic_read(&mp->m_active_trans) != 0);
 424
 425        /* Push the superblock and write an unmount record */
 426        error = xfs_log_sbcount(mp, 1);
 427        if (error)
 428                xfs_fs_cmn_err(CE_WARN, mp,
 429                                "xfs_attr_quiesce: failed to log sb changes. "
 430                                "Frozen image may not be consistent.");
 431        xfs_log_unmount_write(mp);
 432        xfs_unmountfs_writesb(mp);
 433}
 434
 435/*
 436 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
 437 * Doing this has two advantages:
 438 * - It saves on stack space, which is tight in certain situations
 439 * - It can be used (with care) as a mechanism to avoid deadlocks.
 440 * Flushing while allocating in a full filesystem requires both.
 441 */
 442STATIC void
 443xfs_syncd_queue_work(
 444        struct xfs_mount *mp,
 445        void            *data,
 446        void            (*syncer)(struct xfs_mount *, void *),
 447        struct completion *completion)
 448{
 449        struct xfs_sync_work *work;
 450
 451        work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
 452        INIT_LIST_HEAD(&work->w_list);
 453        work->w_syncer = syncer;
 454        work->w_data = data;
 455        work->w_mount = mp;
 456        work->w_completion = completion;
 457        spin_lock(&mp->m_sync_lock);
 458        list_add_tail(&work->w_list, &mp->m_sync_list);
 459        spin_unlock(&mp->m_sync_lock);
 460        wake_up_process(mp->m_sync_task);
 461}
 462
 463/*
 464 * Flush delayed allocate data, attempting to free up reserved space
 465 * from existing allocations.  At this point a new allocation attempt
 466 * has failed with ENOSPC and we are in the process of scratching our
 467 * heads, looking about for more room...
 468 */
 469STATIC void
 470xfs_flush_inodes_work(
 471        struct xfs_mount *mp,
 472        void            *arg)
 473{
 474        struct inode    *inode = arg;
 475        xfs_sync_data(mp, SYNC_TRYLOCK);
 476        xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
 477        iput(inode);
 478}
 479
 480void
 481xfs_flush_inodes(
 482        xfs_inode_t     *ip)
 483{
 484        struct inode    *inode = VFS_I(ip);
 485        DECLARE_COMPLETION_ONSTACK(completion);
 486
 487        igrab(inode);
 488        xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
 489        wait_for_completion(&completion);
 490        xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
 491}
 492
 493/*
 494 * Every sync period we need to unpin all items, reclaim inodes and sync
 495 * disk quotas.  We might need to cover the log to indicate that the
 496 * filesystem is idle and not frozen.
 497 */
 498STATIC void
 499xfs_sync_worker(
 500        struct xfs_mount *mp,
 501        void            *unused)
 502{
 503        int             error;
 504
 505        if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
 506                /* dgc: errors ignored here */
 507                if (mp->m_super->s_frozen == SB_UNFROZEN &&
 508                    xfs_log_need_covered(mp))
 509                        error = xfs_fs_log_dummy(mp);
 510                else
 511                        xfs_log_force(mp, 0);
 512                xfs_reclaim_inodes(mp, 0);
 513                error = xfs_qm_sync(mp, SYNC_TRYLOCK);
 514        }
 515        mp->m_sync_seq++;
 516        wake_up(&mp->m_wait_single_sync_task);
 517}
 518
 519STATIC int
 520xfssyncd(
 521        void                    *arg)
 522{
 523        struct xfs_mount        *mp = arg;
 524        long                    timeleft;
 525        xfs_sync_work_t         *work, *n;
 526        LIST_HEAD               (tmp);
 527
 528        set_freezable();
 529        timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
 530        for (;;) {
 531                if (list_empty(&mp->m_sync_list))
 532                        timeleft = schedule_timeout_interruptible(timeleft);
 533                /* swsusp */
 534                try_to_freeze();
 535                if (kthread_should_stop() && list_empty(&mp->m_sync_list))
 536                        break;
 537
 538                spin_lock(&mp->m_sync_lock);
 539                /*
 540                 * We can get woken by laptop mode, to do a sync -
 541                 * that's the (only!) case where the list would be
 542                 * empty with time remaining.
 543                 */
 544                if (!timeleft || list_empty(&mp->m_sync_list)) {
 545                        if (!timeleft)
 546                                timeleft = xfs_syncd_centisecs *
 547                                                        msecs_to_jiffies(10);
 548                        INIT_LIST_HEAD(&mp->m_sync_work.w_list);
 549                        list_add_tail(&mp->m_sync_work.w_list,
 550                                        &mp->m_sync_list);
 551                }
 552                list_splice_init(&mp->m_sync_list, &tmp);
 553                spin_unlock(&mp->m_sync_lock);
 554
 555                list_for_each_entry_safe(work, n, &tmp, w_list) {
 556                        (*work->w_syncer)(mp, work->w_data);
 557                        list_del(&work->w_list);
 558                        if (work == &mp->m_sync_work)
 559                                continue;
 560                        if (work->w_completion)
 561                                complete(work->w_completion);
 562                        kmem_free(work);
 563                }
 564        }
 565
 566        return 0;
 567}
 568
 569int
 570xfs_syncd_init(
 571        struct xfs_mount        *mp)
 572{
 573        mp->m_sync_work.w_syncer = xfs_sync_worker;
 574        mp->m_sync_work.w_mount = mp;
 575        mp->m_sync_work.w_completion = NULL;
 576        mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname);
 577        if (IS_ERR(mp->m_sync_task))
 578                return -PTR_ERR(mp->m_sync_task);
 579        return 0;
 580}
 581
 582void
 583xfs_syncd_stop(
 584        struct xfs_mount        *mp)
 585{
 586        kthread_stop(mp->m_sync_task);
 587}
 588
 589void
 590__xfs_inode_set_reclaim_tag(
 591        struct xfs_perag        *pag,
 592        struct xfs_inode        *ip)
 593{
 594        radix_tree_tag_set(&pag->pag_ici_root,
 595                           XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
 596                           XFS_ICI_RECLAIM_TAG);
 597
 598        if (!pag->pag_ici_reclaimable) {
 599                /* propagate the reclaim tag up into the perag radix tree */
 600                spin_lock(&ip->i_mount->m_perag_lock);
 601                radix_tree_tag_set(&ip->i_mount->m_perag_tree,
 602                                XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
 603                                XFS_ICI_RECLAIM_TAG);
 604                spin_unlock(&ip->i_mount->m_perag_lock);
 605                trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
 606                                                        -1, _RET_IP_);
 607        }
 608        pag->pag_ici_reclaimable++;
 609}
 610
 611/*
 612 * We set the inode flag atomically with the radix tree tag.
 613 * Once we get tag lookups on the radix tree, this inode flag
 614 * can go away.
 615 */
 616void
 617xfs_inode_set_reclaim_tag(
 618        xfs_inode_t     *ip)
 619{
 620        struct xfs_mount *mp = ip->i_mount;
 621        struct xfs_perag *pag;
 622
 623        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
 624        spin_lock(&pag->pag_ici_lock);
 625        spin_lock(&ip->i_flags_lock);
 626        __xfs_inode_set_reclaim_tag(pag, ip);
 627        __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
 628        spin_unlock(&ip->i_flags_lock);
 629        spin_unlock(&pag->pag_ici_lock);
 630        xfs_perag_put(pag);
 631}
 632
 633STATIC void
 634__xfs_inode_clear_reclaim(
 635        xfs_perag_t     *pag,
 636        xfs_inode_t     *ip)
 637{
 638        pag->pag_ici_reclaimable--;
 639        if (!pag->pag_ici_reclaimable) {
 640                /* clear the reclaim tag from the perag radix tree */
 641                spin_lock(&ip->i_mount->m_perag_lock);
 642                radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
 643                                XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
 644                                XFS_ICI_RECLAIM_TAG);
 645                spin_unlock(&ip->i_mount->m_perag_lock);
 646                trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
 647                                                        -1, _RET_IP_);
 648        }
 649}
 650
 651void
 652__xfs_inode_clear_reclaim_tag(
 653        xfs_mount_t     *mp,
 654        xfs_perag_t     *pag,
 655        xfs_inode_t     *ip)
 656{
 657        radix_tree_tag_clear(&pag->pag_ici_root,
 658                        XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
 659        __xfs_inode_clear_reclaim(pag, ip);
 660}
 661
 662/*
 663 * Grab the inode for reclaim exclusively.
 664 * Return 0 if we grabbed it, non-zero otherwise.
 665 */
 666STATIC int
 667xfs_reclaim_inode_grab(
 668        struct xfs_inode        *ip,
 669        int                     flags)
 670{
 671        ASSERT(rcu_read_lock_held());
 672
 673        /* quick check for stale RCU freed inode */
 674        if (!ip->i_ino)
 675                return 1;
 676
 677        /*
 678         * do some unlocked checks first to avoid unnecessary lock traffic.
 679         * The first is a flush lock check, the second is a already in reclaim
 680         * check. Only do these checks if we are not going to block on locks.
 681         */
 682        if ((flags & SYNC_TRYLOCK) &&
 683            (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) {
 684                return 1;
 685        }
 686
 687        /*
 688         * The radix tree lock here protects a thread in xfs_iget from racing
 689         * with us starting reclaim on the inode.  Once we have the
 690         * XFS_IRECLAIM flag set it will not touch us.
 691         *
 692         * Due to RCU lookup, we may find inodes that have been freed and only
 693         * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
 694         * aren't candidates for reclaim at all, so we must check the
 695         * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
 696         */
 697        spin_lock(&ip->i_flags_lock);
 698        if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
 699            __xfs_iflags_test(ip, XFS_IRECLAIM)) {
 700                /* not a reclaim candidate. */
 701                spin_unlock(&ip->i_flags_lock);
 702                return 1;
 703        }
 704        __xfs_iflags_set(ip, XFS_IRECLAIM);
 705        spin_unlock(&ip->i_flags_lock);
 706        return 0;
 707}
 708
 709/*
 710 * Inodes in different states need to be treated differently, and the return
 711 * value of xfs_iflush is not sufficient to get this right. The following table
 712 * lists the inode states and the reclaim actions necessary for non-blocking
 713 * reclaim:
 714 *
 715 *
 716 *      inode state          iflush ret         required action
 717 *      ---------------      ----------         ---------------
 718 *      bad                     -               reclaim
 719 *      shutdown                EIO             unpin and reclaim
 720 *      clean, unpinned         0               reclaim
 721 *      stale, unpinned         0               reclaim
 722 *      clean, pinned(*)        0               requeue
 723 *      stale, pinned           EAGAIN          requeue
 724 *      dirty, delwri ok        0               requeue
 725 *      dirty, delwri blocked   EAGAIN          requeue
 726 *      dirty, sync flush       0               reclaim
 727 *
 728 * (*) dgc: I don't think the clean, pinned state is possible but it gets
 729 * handled anyway given the order of checks implemented.
 730 *
 731 * As can be seen from the table, the return value of xfs_iflush() is not
 732 * sufficient to correctly decide the reclaim action here. The checks in
 733 * xfs_iflush() might look like duplicates, but they are not.
 734 *
 735 * Also, because we get the flush lock first, we know that any inode that has
 736 * been flushed delwri has had the flush completed by the time we check that
 737 * the inode is clean. The clean inode check needs to be done before flushing
 738 * the inode delwri otherwise we would loop forever requeuing clean inodes as
 739 * we cannot tell apart a successful delwri flush and a clean inode from the
 740 * return value of xfs_iflush().
 741 *
 742 * Note that because the inode is flushed delayed write by background
 743 * writeback, the flush lock may already be held here and waiting on it can
 744 * result in very long latencies. Hence for sync reclaims, where we wait on the
 745 * flush lock, the caller should push out delayed write inodes first before
 746 * trying to reclaim them to minimise the amount of time spent waiting. For
 747 * background relaim, we just requeue the inode for the next pass.
 748 *
 749 * Hence the order of actions after gaining the locks should be:
 750 *      bad             => reclaim
 751 *      shutdown        => unpin and reclaim
 752 *      pinned, delwri  => requeue
 753 *      pinned, sync    => unpin
 754 *      stale           => reclaim
 755 *      clean           => reclaim
 756 *      dirty, delwri   => flush and requeue
 757 *      dirty, sync     => flush, wait and reclaim
 758 */
 759STATIC int
 760xfs_reclaim_inode(
 761        struct xfs_inode        *ip,
 762        struct xfs_perag        *pag,
 763        int                     sync_mode)
 764{
 765        int     error = 0;
 766
 767        xfs_ilock(ip, XFS_ILOCK_EXCL);
 768        if (!xfs_iflock_nowait(ip)) {
 769                if (!(sync_mode & SYNC_WAIT))
 770                        goto out;
 771                xfs_iflock(ip);
 772        }
 773
 774        if (is_bad_inode(VFS_I(ip)))
 775                goto reclaim;
 776        if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 777                xfs_iunpin_wait(ip);
 778                goto reclaim;
 779        }
 780        if (xfs_ipincount(ip)) {
 781                if (!(sync_mode & SYNC_WAIT)) {
 782                        xfs_ifunlock(ip);
 783                        goto out;
 784                }
 785                xfs_iunpin_wait(ip);
 786        }
 787        if (xfs_iflags_test(ip, XFS_ISTALE))
 788                goto reclaim;
 789        if (xfs_inode_clean(ip))
 790                goto reclaim;
 791
 792        /* Now we have an inode that needs flushing */
 793        error = xfs_iflush(ip, sync_mode);
 794        if (sync_mode & SYNC_WAIT) {
 795                xfs_iflock(ip);
 796                goto reclaim;
 797        }
 798
 799        /*
 800         * When we have to flush an inode but don't have SYNC_WAIT set, we
 801         * flush the inode out using a delwri buffer and wait for the next
 802         * call into reclaim to find it in a clean state instead of waiting for
 803         * it now. We also don't return errors here - if the error is transient
 804         * then the next reclaim pass will flush the inode, and if the error
 805         * is permanent then the next sync reclaim will reclaim the inode and
 806         * pass on the error.
 807         */
 808        if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 809                xfs_fs_cmn_err(CE_WARN, ip->i_mount,
 810                        "inode 0x%llx background reclaim flush failed with %d",
 811                        (long long)ip->i_ino, error);
 812        }
 813out:
 814        xfs_iflags_clear(ip, XFS_IRECLAIM);
 815        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 816        /*
 817         * We could return EAGAIN here to make reclaim rescan the inode tree in
 818         * a short while. However, this just burns CPU time scanning the tree
 819         * waiting for IO to complete and xfssyncd never goes back to the idle
 820         * state. Instead, return 0 to let the next scheduled background reclaim
 821         * attempt to reclaim the inode again.
 822         */
 823        return 0;
 824
 825reclaim:
 826        xfs_ifunlock(ip);
 827        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 828
 829        XFS_STATS_INC(xs_ig_reclaims);
 830        /*
 831         * Remove the inode from the per-AG radix tree.
 832         *
 833         * Because radix_tree_delete won't complain even if the item was never
 834         * added to the tree assert that it's been there before to catch
 835         * problems with the inode life time early on.
 836         */
 837        spin_lock(&pag->pag_ici_lock);
 838        if (!radix_tree_delete(&pag->pag_ici_root,
 839                                XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
 840                ASSERT(0);
 841        __xfs_inode_clear_reclaim(pag, ip);
 842        spin_unlock(&pag->pag_ici_lock);
 843
 844        /*
 845         * Here we do an (almost) spurious inode lock in order to coordinate
 846         * with inode cache radix tree lookups.  This is because the lookup
 847         * can reference the inodes in the cache without taking references.
 848         *
 849         * We make that OK here by ensuring that we wait until the inode is
 850         * unlocked after the lookup before we go ahead and free it.  We get
 851         * both the ilock and the iolock because the code may need to drop the
 852         * ilock one but will still hold the iolock.
 853         */
 854        xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
 855        xfs_qm_dqdetach(ip);
 856        xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
 857
 858        xfs_inode_free(ip);
 859        return error;
 860
 861}
 862
 863/*
 864 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
 865 * corrupted, we still want to try to reclaim all the inodes. If we don't,
 866 * then a shut down during filesystem unmount reclaim walk leak all the
 867 * unreclaimed inodes.
 868 */
 869int
 870xfs_reclaim_inodes_ag(
 871        struct xfs_mount        *mp,
 872        int                     flags,
 873        int                     *nr_to_scan)
 874{
 875        struct xfs_perag        *pag;
 876        int                     error = 0;
 877        int                     last_error = 0;
 878        xfs_agnumber_t          ag;
 879        int                     trylock = flags & SYNC_TRYLOCK;
 880        int                     skipped;
 881
 882restart:
 883        ag = 0;
 884        skipped = 0;
 885        while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
 886                unsigned long   first_index = 0;
 887                int             done = 0;
 888                int             nr_found = 0;
 889
 890                ag = pag->pag_agno + 1;
 891
 892                if (trylock) {
 893                        if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
 894                                skipped++;
 895                                xfs_perag_put(pag);
 896                                continue;
 897                        }
 898                        first_index = pag->pag_ici_reclaim_cursor;
 899                } else
 900                        mutex_lock(&pag->pag_ici_reclaim_lock);
 901
 902                do {
 903                        struct xfs_inode *batch[XFS_LOOKUP_BATCH];
 904                        int     i;
 905
 906                        rcu_read_lock();
 907                        nr_found = radix_tree_gang_lookup_tag(
 908                                        &pag->pag_ici_root,
 909                                        (void **)batch, first_index,
 910                                        XFS_LOOKUP_BATCH,
 911                                        XFS_ICI_RECLAIM_TAG);
 912                        if (!nr_found) {
 913                                rcu_read_unlock();
 914                                break;
 915                        }
 916
 917                        /*
 918                         * Grab the inodes before we drop the lock. if we found
 919                         * nothing, nr == 0 and the loop will be skipped.
 920                         */
 921                        for (i = 0; i < nr_found; i++) {
 922                                struct xfs_inode *ip = batch[i];
 923
 924                                if (done || xfs_reclaim_inode_grab(ip, flags))
 925                                        batch[i] = NULL;
 926
 927                                /*
 928                                 * Update the index for the next lookup. Catch
 929                                 * overflows into the next AG range which can
 930                                 * occur if we have inodes in the last block of
 931                                 * the AG and we are currently pointing to the
 932                                 * last inode.
 933                                 *
 934                                 * Because we may see inodes that are from the
 935                                 * wrong AG due to RCU freeing and
 936                                 * reallocation, only update the index if it
 937                                 * lies in this AG. It was a race that lead us
 938                                 * to see this inode, so another lookup from
 939                                 * the same index will not find it again.
 940                                 */
 941                                if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
 942                                                                pag->pag_agno)
 943                                        continue;
 944                                first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
 945                                if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
 946                                        done = 1;
 947                        }
 948
 949                        /* unlock now we've grabbed the inodes. */
 950                        rcu_read_unlock();
 951
 952                        for (i = 0; i < nr_found; i++) {
 953                                if (!batch[i])
 954                                        continue;
 955                                error = xfs_reclaim_inode(batch[i], pag, flags);
 956                                if (error && last_error != EFSCORRUPTED)
 957                                        last_error = error;
 958                        }
 959
 960                        *nr_to_scan -= XFS_LOOKUP_BATCH;
 961
 962                } while (nr_found && !done && *nr_to_scan > 0);
 963
 964                if (trylock && !done)
 965                        pag->pag_ici_reclaim_cursor = first_index;
 966                else
 967                        pag->pag_ici_reclaim_cursor = 0;
 968                mutex_unlock(&pag->pag_ici_reclaim_lock);
 969                xfs_perag_put(pag);
 970        }
 971
 972        /*
 973         * if we skipped any AG, and we still have scan count remaining, do
 974         * another pass this time using blocking reclaim semantics (i.e
 975         * waiting on the reclaim locks and ignoring the reclaim cursors). This
 976         * ensure that when we get more reclaimers than AGs we block rather
 977         * than spin trying to execute reclaim.
 978         */
 979        if (trylock && skipped && *nr_to_scan > 0) {
 980                trylock = 0;
 981                goto restart;
 982        }
 983        return XFS_ERROR(last_error);
 984}
 985
 986int
 987xfs_reclaim_inodes(
 988        xfs_mount_t     *mp,
 989        int             mode)
 990{
 991        int             nr_to_scan = INT_MAX;
 992
 993        return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
 994}
 995
 996/*
 997 * Shrinker infrastructure.
 998 */
 999static int
1000xfs_reclaim_inode_shrink(
1001        struct shrinker *shrink,
1002        int             nr_to_scan,
1003        gfp_t           gfp_mask)
1004{
1005        struct xfs_mount *mp;
1006        struct xfs_perag *pag;
1007        xfs_agnumber_t  ag;
1008        int             reclaimable;
1009
1010        mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
1011        if (nr_to_scan) {
1012                if (!(gfp_mask & __GFP_FS))
1013                        return -1;
1014
1015                xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan);
1016                /* terminate if we don't exhaust the scan */
1017                if (nr_to_scan > 0)
1018                        return -1;
1019       }
1020
1021        reclaimable = 0;
1022        ag = 0;
1023        while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1024                ag = pag->pag_agno + 1;
1025                reclaimable += pag->pag_ici_reclaimable;
1026                xfs_perag_put(pag);
1027        }
1028        return reclaimable;
1029}
1030
1031void
1032xfs_inode_shrinker_register(
1033        struct xfs_mount        *mp)
1034{
1035        mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink;
1036        mp->m_inode_shrink.seeks = DEFAULT_SEEKS;
1037        register_shrinker(&mp->m_inode_shrink);
1038}
1039
1040void
1041xfs_inode_shrinker_unregister(
1042        struct xfs_mount        *mp)
1043{
1044        unregister_shrinker(&mp->m_inode_shrink);
1045}
1046