linux/fs/fs-writeback.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/fs-writeback.c
   4 *
   5 * Copyright (C) 2002, Linus Torvalds.
   6 *
   7 * Contains all the functions related to writing back and waiting
   8 * upon dirty inodes against superblocks, and writing back dirty
   9 * pages against inodes.  ie: data writeback.  Writeout of the
  10 * inode itself is not handled here.
  11 *
  12 * 10Apr2002    Andrew Morton
  13 *              Split out of fs/inode.c
  14 *              Additions for address_space-based writeback
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/export.h>
  19#include <linux/spinlock.h>
  20#include <linux/slab.h>
  21#include <linux/sched.h>
  22#include <linux/fs.h>
  23#include <linux/mm.h>
  24#include <linux/pagemap.h>
  25#include <linux/kthread.h>
  26#include <linux/writeback.h>
  27#include <linux/blkdev.h>
  28#include <linux/backing-dev.h>
  29#include <linux/tracepoint.h>
  30#include <linux/device.h>
  31#include <linux/memcontrol.h>
  32#include "internal.h"
  33
  34/*
  35 * 4MB minimal write chunk size
  36 */
  37#define MIN_WRITEBACK_PAGES     (4096UL >> (PAGE_SHIFT - 10))
  38
  39/*
  40 * Passed into wb_writeback(), essentially a subset of writeback_control
  41 */
  42struct wb_writeback_work {
  43        long nr_pages;
  44        struct super_block *sb;
  45        enum writeback_sync_modes sync_mode;
  46        unsigned int tagged_writepages:1;
  47        unsigned int for_kupdate:1;
  48        unsigned int range_cyclic:1;
  49        unsigned int for_background:1;
  50        unsigned int for_sync:1;        /* sync(2) WB_SYNC_ALL writeback */
  51        unsigned int auto_free:1;       /* free on completion */
  52        enum wb_reason reason;          /* why was writeback initiated? */
  53
  54        struct list_head list;          /* pending work list */
  55        struct wb_completion *done;     /* set if the caller waits */
  56};
  57
  58/*
  59 * If an inode is constantly having its pages dirtied, but then the
  60 * updates stop dirtytime_expire_interval seconds in the past, it's
  61 * possible for the worst case time between when an inode has its
  62 * timestamps updated and when they finally get written out to be two
  63 * dirtytime_expire_intervals.  We set the default to 12 hours (in
  64 * seconds), which means most of the time inodes will have their
  65 * timestamps written to disk after 12 hours, but in the worst case a
  66 * few inodes might not their timestamps updated for 24 hours.
  67 */
  68unsigned int dirtytime_expire_interval = 12 * 60 * 60;
  69
  70static inline struct inode *wb_inode(struct list_head *head)
  71{
  72        return list_entry(head, struct inode, i_io_list);
  73}
  74
  75/*
  76 * Include the creation of the trace points after defining the
  77 * wb_writeback_work structure and inline functions so that the definition
  78 * remains local to this file.
  79 */
  80#define CREATE_TRACE_POINTS
  81#include <trace/events/writeback.h>
  82
  83EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
  84
  85static bool wb_io_lists_populated(struct bdi_writeback *wb)
  86{
  87        if (wb_has_dirty_io(wb)) {
  88                return false;
  89        } else {
  90                set_bit(WB_has_dirty_io, &wb->state);
  91                WARN_ON_ONCE(!wb->avg_write_bandwidth);
  92                atomic_long_add(wb->avg_write_bandwidth,
  93                                &wb->bdi->tot_write_bandwidth);
  94                return true;
  95        }
  96}
  97
  98static void wb_io_lists_depopulated(struct bdi_writeback *wb)
  99{
 100        if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
 101            list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
 102                clear_bit(WB_has_dirty_io, &wb->state);
 103                WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
 104                                        &wb->bdi->tot_write_bandwidth) < 0);
 105        }
 106}
 107
 108/**
 109 * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
 110 * @inode: inode to be moved
 111 * @wb: target bdi_writeback
 112 * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
 113 *
 114 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
 115 * Returns %true if @inode is the first occupant of the !dirty_time IO
 116 * lists; otherwise, %false.
 117 */
 118static bool inode_io_list_move_locked(struct inode *inode,
 119                                      struct bdi_writeback *wb,
 120                                      struct list_head *head)
 121{
 122        assert_spin_locked(&wb->list_lock);
 123
 124        list_move(&inode->i_io_list, head);
 125
 126        /* dirty_time doesn't count as dirty_io until expiration */
 127        if (head != &wb->b_dirty_time)
 128                return wb_io_lists_populated(wb);
 129
 130        wb_io_lists_depopulated(wb);
 131        return false;
 132}
 133
 134static void wb_wakeup(struct bdi_writeback *wb)
 135{
 136        spin_lock_bh(&wb->work_lock);
 137        if (test_bit(WB_registered, &wb->state))
 138                mod_delayed_work(bdi_wq, &wb->dwork, 0);
 139        spin_unlock_bh(&wb->work_lock);
 140}
 141
 142static void finish_writeback_work(struct bdi_writeback *wb,
 143                                  struct wb_writeback_work *work)
 144{
 145        struct wb_completion *done = work->done;
 146
 147        if (work->auto_free)
 148                kfree(work);
 149        if (done) {
 150                wait_queue_head_t *waitq = done->waitq;
 151
 152                /* @done can't be accessed after the following dec */
 153                if (atomic_dec_and_test(&done->cnt))
 154                        wake_up_all(waitq);
 155        }
 156}
 157
 158static void wb_queue_work(struct bdi_writeback *wb,
 159                          struct wb_writeback_work *work)
 160{
 161        trace_writeback_queue(wb, work);
 162
 163        if (work->done)
 164                atomic_inc(&work->done->cnt);
 165
 166        spin_lock_bh(&wb->work_lock);
 167
 168        if (test_bit(WB_registered, &wb->state)) {
 169                list_add_tail(&work->list, &wb->work_list);
 170                mod_delayed_work(bdi_wq, &wb->dwork, 0);
 171        } else
 172                finish_writeback_work(wb, work);
 173
 174        spin_unlock_bh(&wb->work_lock);
 175}
 176
 177/**
 178 * wb_wait_for_completion - wait for completion of bdi_writeback_works
 179 * @done: target wb_completion
 180 *
 181 * Wait for one or more work items issued to @bdi with their ->done field
 182 * set to @done, which should have been initialized with
 183 * DEFINE_WB_COMPLETION().  This function returns after all such work items
 184 * are completed.  Work items which are waited upon aren't freed
 185 * automatically on completion.
 186 */
 187void wb_wait_for_completion(struct wb_completion *done)
 188{
 189        atomic_dec(&done->cnt);         /* put down the initial count */
 190        wait_event(*done->waitq, !atomic_read(&done->cnt));
 191}
 192
 193#ifdef CONFIG_CGROUP_WRITEBACK
 194
 195/*
 196 * Parameters for foreign inode detection, see wbc_detach_inode() to see
 197 * how they're used.
 198 *
 199 * These paramters are inherently heuristical as the detection target
 200 * itself is fuzzy.  All we want to do is detaching an inode from the
 201 * current owner if it's being written to by some other cgroups too much.
 202 *
 203 * The current cgroup writeback is built on the assumption that multiple
 204 * cgroups writing to the same inode concurrently is very rare and a mode
 205 * of operation which isn't well supported.  As such, the goal is not
 206 * taking too long when a different cgroup takes over an inode while
 207 * avoiding too aggressive flip-flops from occasional foreign writes.
 208 *
 209 * We record, very roughly, 2s worth of IO time history and if more than
 210 * half of that is foreign, trigger the switch.  The recording is quantized
 211 * to 16 slots.  To avoid tiny writes from swinging the decision too much,
 212 * writes smaller than 1/8 of avg size are ignored.
 213 */
 214#define WB_FRN_TIME_SHIFT       13      /* 1s = 2^13, upto 8 secs w/ 16bit */
 215#define WB_FRN_TIME_AVG_SHIFT   3       /* avg = avg * 7/8 + new * 1/8 */
 216#define WB_FRN_TIME_CUT_DIV     8       /* ignore rounds < avg / 8 */
 217#define WB_FRN_TIME_PERIOD      (2 * (1 << WB_FRN_TIME_SHIFT))  /* 2s */
 218
 219#define WB_FRN_HIST_SLOTS       16      /* inode->i_wb_frn_history is 16bit */
 220#define WB_FRN_HIST_UNIT        (WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
 221                                        /* each slot's duration is 2s / 16 */
 222#define WB_FRN_HIST_THR_SLOTS   (WB_FRN_HIST_SLOTS / 2)
 223                                        /* if foreign slots >= 8, switch */
 224#define WB_FRN_HIST_MAX_SLOTS   (WB_FRN_HIST_THR_SLOTS / 2 + 1)
 225                                        /* one round can affect upto 5 slots */
 226#define WB_FRN_MAX_IN_FLIGHT    1024    /* don't queue too many concurrently */
 227
 228/*
 229 * Maximum inodes per isw.  A specific value has been chosen to make
 230 * struct inode_switch_wbs_context fit into 1024 bytes kmalloc.
 231 */
 232#define WB_MAX_INODES_PER_ISW  ((1024UL - sizeof(struct inode_switch_wbs_context)) \
 233                                / sizeof(struct inode *))
 234
 235static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
 236static struct workqueue_struct *isw_wq;
 237
 238void __inode_attach_wb(struct inode *inode, struct page *page)
 239{
 240        struct backing_dev_info *bdi = inode_to_bdi(inode);
 241        struct bdi_writeback *wb = NULL;
 242
 243        if (inode_cgwb_enabled(inode)) {
 244                struct cgroup_subsys_state *memcg_css;
 245
 246                if (page) {
 247                        memcg_css = mem_cgroup_css_from_page(page);
 248                        wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 249                } else {
 250                        /* must pin memcg_css, see wb_get_create() */
 251                        memcg_css = task_get_css(current, memory_cgrp_id);
 252                        wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 253                        css_put(memcg_css);
 254                }
 255        }
 256
 257        if (!wb)
 258                wb = &bdi->wb;
 259
 260        /*
 261         * There may be multiple instances of this function racing to
 262         * update the same inode.  Use cmpxchg() to tell the winner.
 263         */
 264        if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
 265                wb_put(wb);
 266}
 267EXPORT_SYMBOL_GPL(__inode_attach_wb);
 268
 269/**
 270 * inode_cgwb_move_to_attached - put the inode onto wb->b_attached list
 271 * @inode: inode of interest with i_lock held
 272 * @wb: target bdi_writeback
 273 *
 274 * Remove the inode from wb's io lists and if necessarily put onto b_attached
 275 * list.  Only inodes attached to cgwb's are kept on this list.
 276 */
 277static void inode_cgwb_move_to_attached(struct inode *inode,
 278                                        struct bdi_writeback *wb)
 279{
 280        assert_spin_locked(&wb->list_lock);
 281        assert_spin_locked(&inode->i_lock);
 282
 283        inode->i_state &= ~I_SYNC_QUEUED;
 284        if (wb != &wb->bdi->wb)
 285                list_move(&inode->i_io_list, &wb->b_attached);
 286        else
 287                list_del_init(&inode->i_io_list);
 288        wb_io_lists_depopulated(wb);
 289}
 290
 291/**
 292 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
 293 * @inode: inode of interest with i_lock held
 294 *
 295 * Returns @inode's wb with its list_lock held.  @inode->i_lock must be
 296 * held on entry and is released on return.  The returned wb is guaranteed
 297 * to stay @inode's associated wb until its list_lock is released.
 298 */
 299static struct bdi_writeback *
 300locked_inode_to_wb_and_lock_list(struct inode *inode)
 301        __releases(&inode->i_lock)
 302        __acquires(&wb->list_lock)
 303{
 304        while (true) {
 305                struct bdi_writeback *wb = inode_to_wb(inode);
 306
 307                /*
 308                 * inode_to_wb() association is protected by both
 309                 * @inode->i_lock and @wb->list_lock but list_lock nests
 310                 * outside i_lock.  Drop i_lock and verify that the
 311                 * association hasn't changed after acquiring list_lock.
 312                 */
 313                wb_get(wb);
 314                spin_unlock(&inode->i_lock);
 315                spin_lock(&wb->list_lock);
 316
 317                /* i_wb may have changed inbetween, can't use inode_to_wb() */
 318                if (likely(wb == inode->i_wb)) {
 319                        wb_put(wb);     /* @inode already has ref */
 320                        return wb;
 321                }
 322
 323                spin_unlock(&wb->list_lock);
 324                wb_put(wb);
 325                cpu_relax();
 326                spin_lock(&inode->i_lock);
 327        }
 328}
 329
 330/**
 331 * inode_to_wb_and_lock_list - determine an inode's wb and lock it
 332 * @inode: inode of interest
 333 *
 334 * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
 335 * on entry.
 336 */
 337static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
 338        __acquires(&wb->list_lock)
 339{
 340        spin_lock(&inode->i_lock);
 341        return locked_inode_to_wb_and_lock_list(inode);
 342}
 343
 344struct inode_switch_wbs_context {
 345        struct rcu_work         work;
 346
 347        /*
 348         * Multiple inodes can be switched at once.  The switching procedure
 349         * consists of two parts, separated by a RCU grace period.  To make
 350         * sure that the second part is executed for each inode gone through
 351         * the first part, all inode pointers are placed into a NULL-terminated
 352         * array embedded into struct inode_switch_wbs_context.  Otherwise
 353         * an inode could be left in a non-consistent state.
 354         */
 355        struct bdi_writeback    *new_wb;
 356        struct inode            *inodes[];
 357};
 358
 359static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
 360{
 361        down_write(&bdi->wb_switch_rwsem);
 362}
 363
 364static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
 365{
 366        up_write(&bdi->wb_switch_rwsem);
 367}
 368
 369static bool inode_do_switch_wbs(struct inode *inode,
 370                                struct bdi_writeback *old_wb,
 371                                struct bdi_writeback *new_wb)
 372{
 373        struct address_space *mapping = inode->i_mapping;
 374        XA_STATE(xas, &mapping->i_pages, 0);
 375        struct page *page;
 376        bool switched = false;
 377
 378        spin_lock(&inode->i_lock);
 379        xa_lock_irq(&mapping->i_pages);
 380
 381        /*
 382         * Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction
 383         * path owns the inode and we shouldn't modify ->i_io_list.
 384         */
 385        if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE)))
 386                goto skip_switch;
 387
 388        trace_inode_switch_wbs(inode, old_wb, new_wb);
 389
 390        /*
 391         * Count and transfer stats.  Note that PAGECACHE_TAG_DIRTY points
 392         * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
 393         * pages actually under writeback.
 394         */
 395        xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
 396                if (PageDirty(page)) {
 397                        dec_wb_stat(old_wb, WB_RECLAIMABLE);
 398                        inc_wb_stat(new_wb, WB_RECLAIMABLE);
 399                }
 400        }
 401
 402        xas_set(&xas, 0);
 403        xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
 404                WARN_ON_ONCE(!PageWriteback(page));
 405                dec_wb_stat(old_wb, WB_WRITEBACK);
 406                inc_wb_stat(new_wb, WB_WRITEBACK);
 407        }
 408
 409        if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
 410                atomic_dec(&old_wb->writeback_inodes);
 411                atomic_inc(&new_wb->writeback_inodes);
 412        }
 413
 414        wb_get(new_wb);
 415
 416        /*
 417         * Transfer to @new_wb's IO list if necessary.  If the @inode is dirty,
 418         * the specific list @inode was on is ignored and the @inode is put on
 419         * ->b_dirty which is always correct including from ->b_dirty_time.
 420         * The transfer preserves @inode->dirtied_when ordering.  If the @inode
 421         * was clean, it means it was on the b_attached list, so move it onto
 422         * the b_attached list of @new_wb.
 423         */
 424        if (!list_empty(&inode->i_io_list)) {
 425                inode->i_wb = new_wb;
 426
 427                if (inode->i_state & I_DIRTY_ALL) {
 428                        struct inode *pos;
 429
 430                        list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
 431                                if (time_after_eq(inode->dirtied_when,
 432                                                  pos->dirtied_when))
 433                                        break;
 434                        inode_io_list_move_locked(inode, new_wb,
 435                                                  pos->i_io_list.prev);
 436                } else {
 437                        inode_cgwb_move_to_attached(inode, new_wb);
 438                }
 439        } else {
 440                inode->i_wb = new_wb;
 441        }
 442
 443        /* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */
 444        inode->i_wb_frn_winner = 0;
 445        inode->i_wb_frn_avg_time = 0;
 446        inode->i_wb_frn_history = 0;
 447        switched = true;
 448skip_switch:
 449        /*
 450         * Paired with load_acquire in unlocked_inode_to_wb_begin() and
 451         * ensures that the new wb is visible if they see !I_WB_SWITCH.
 452         */
 453        smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
 454
 455        xa_unlock_irq(&mapping->i_pages);
 456        spin_unlock(&inode->i_lock);
 457
 458        return switched;
 459}
 460
 461static void inode_switch_wbs_work_fn(struct work_struct *work)
 462{
 463        struct inode_switch_wbs_context *isw =
 464                container_of(to_rcu_work(work), struct inode_switch_wbs_context, work);
 465        struct backing_dev_info *bdi = inode_to_bdi(isw->inodes[0]);
 466        struct bdi_writeback *old_wb = isw->inodes[0]->i_wb;
 467        struct bdi_writeback *new_wb = isw->new_wb;
 468        unsigned long nr_switched = 0;
 469        struct inode **inodep;
 470
 471        /*
 472         * If @inode switches cgwb membership while sync_inodes_sb() is
 473         * being issued, sync_inodes_sb() might miss it.  Synchronize.
 474         */
 475        down_read(&bdi->wb_switch_rwsem);
 476
 477        /*
 478         * By the time control reaches here, RCU grace period has passed
 479         * since I_WB_SWITCH assertion and all wb stat update transactions
 480         * between unlocked_inode_to_wb_begin/end() are guaranteed to be
 481         * synchronizing against the i_pages lock.
 482         *
 483         * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
 484         * gives us exclusion against all wb related operations on @inode
 485         * including IO list manipulations and stat updates.
 486         */
 487        if (old_wb < new_wb) {
 488                spin_lock(&old_wb->list_lock);
 489                spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
 490        } else {
 491                spin_lock(&new_wb->list_lock);
 492                spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
 493        }
 494
 495        for (inodep = isw->inodes; *inodep; inodep++) {
 496                WARN_ON_ONCE((*inodep)->i_wb != old_wb);
 497                if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
 498                        nr_switched++;
 499        }
 500
 501        spin_unlock(&new_wb->list_lock);
 502        spin_unlock(&old_wb->list_lock);
 503
 504        up_read(&bdi->wb_switch_rwsem);
 505
 506        if (nr_switched) {
 507                wb_wakeup(new_wb);
 508                wb_put_many(old_wb, nr_switched);
 509        }
 510
 511        for (inodep = isw->inodes; *inodep; inodep++)
 512                iput(*inodep);
 513        wb_put(new_wb);
 514        kfree(isw);
 515        atomic_dec(&isw_nr_in_flight);
 516}
 517
 518static bool inode_prepare_wbs_switch(struct inode *inode,
 519                                     struct bdi_writeback *new_wb)
 520{
 521        /*
 522         * Paired with smp_mb() in cgroup_writeback_umount().
 523         * isw_nr_in_flight must be increased before checking SB_ACTIVE and
 524         * grabbing an inode, otherwise isw_nr_in_flight can be observed as 0
 525         * in cgroup_writeback_umount() and the isw_wq will be not flushed.
 526         */
 527        smp_mb();
 528
 529        if (IS_DAX(inode))
 530                return false;
 531
 532        /* while holding I_WB_SWITCH, no one else can update the association */
 533        spin_lock(&inode->i_lock);
 534        if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
 535            inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
 536            inode_to_wb(inode) == new_wb) {
 537                spin_unlock(&inode->i_lock);
 538                return false;
 539        }
 540        inode->i_state |= I_WB_SWITCH;
 541        __iget(inode);
 542        spin_unlock(&inode->i_lock);
 543
 544        return true;
 545}
 546
 547/**
 548 * inode_switch_wbs - change the wb association of an inode
 549 * @inode: target inode
 550 * @new_wb_id: ID of the new wb
 551 *
 552 * Switch @inode's wb association to the wb identified by @new_wb_id.  The
 553 * switching is performed asynchronously and may fail silently.
 554 */
 555static void inode_switch_wbs(struct inode *inode, int new_wb_id)
 556{
 557        struct backing_dev_info *bdi = inode_to_bdi(inode);
 558        struct cgroup_subsys_state *memcg_css;
 559        struct inode_switch_wbs_context *isw;
 560
 561        /* noop if seems to be already in progress */
 562        if (inode->i_state & I_WB_SWITCH)
 563                return;
 564
 565        /* avoid queueing a new switch if too many are already in flight */
 566        if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
 567                return;
 568
 569        isw = kzalloc(sizeof(*isw) + 2 * sizeof(struct inode *), GFP_ATOMIC);
 570        if (!isw)
 571                return;
 572
 573        atomic_inc(&isw_nr_in_flight);
 574
 575        /* find and pin the new wb */
 576        rcu_read_lock();
 577        memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
 578        if (memcg_css && !css_tryget(memcg_css))
 579                memcg_css = NULL;
 580        rcu_read_unlock();
 581        if (!memcg_css)
 582                goto out_free;
 583
 584        isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 585        css_put(memcg_css);
 586        if (!isw->new_wb)
 587                goto out_free;
 588
 589        if (!inode_prepare_wbs_switch(inode, isw->new_wb))
 590                goto out_free;
 591
 592        isw->inodes[0] = inode;
 593
 594        /*
 595         * In addition to synchronizing among switchers, I_WB_SWITCH tells
 596         * the RCU protected stat update paths to grab the i_page
 597         * lock so that stat transfer can synchronize against them.
 598         * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 599         */
 600        INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
 601        queue_rcu_work(isw_wq, &isw->work);
 602        return;
 603
 604out_free:
 605        atomic_dec(&isw_nr_in_flight);
 606        if (isw->new_wb)
 607                wb_put(isw->new_wb);
 608        kfree(isw);
 609}
 610
 611/**
 612 * cleanup_offline_cgwb - detach associated inodes
 613 * @wb: target wb
 614 *
 615 * Switch all inodes attached to @wb to a nearest living ancestor's wb in order
 616 * to eventually release the dying @wb.  Returns %true if not all inodes were
 617 * switched and the function has to be restarted.
 618 */
 619bool cleanup_offline_cgwb(struct bdi_writeback *wb)
 620{
 621        struct cgroup_subsys_state *memcg_css;
 622        struct inode_switch_wbs_context *isw;
 623        struct inode *inode;
 624        int nr;
 625        bool restart = false;
 626
 627        isw = kzalloc(sizeof(*isw) + WB_MAX_INODES_PER_ISW *
 628                      sizeof(struct inode *), GFP_KERNEL);
 629        if (!isw)
 630                return restart;
 631
 632        atomic_inc(&isw_nr_in_flight);
 633
 634        for (memcg_css = wb->memcg_css->parent; memcg_css;
 635             memcg_css = memcg_css->parent) {
 636                isw->new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL);
 637                if (isw->new_wb)
 638                        break;
 639        }
 640        if (unlikely(!isw->new_wb))
 641                isw->new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */
 642
 643        nr = 0;
 644        spin_lock(&wb->list_lock);
 645        list_for_each_entry(inode, &wb->b_attached, i_io_list) {
 646                if (!inode_prepare_wbs_switch(inode, isw->new_wb))
 647                        continue;
 648
 649                isw->inodes[nr++] = inode;
 650
 651                if (nr >= WB_MAX_INODES_PER_ISW - 1) {
 652                        restart = true;
 653                        break;
 654                }
 655        }
 656        spin_unlock(&wb->list_lock);
 657
 658        /* no attached inodes? bail out */
 659        if (nr == 0) {
 660                atomic_dec(&isw_nr_in_flight);
 661                wb_put(isw->new_wb);
 662                kfree(isw);
 663                return restart;
 664        }
 665
 666        /*
 667         * In addition to synchronizing among switchers, I_WB_SWITCH tells
 668         * the RCU protected stat update paths to grab the i_page
 669         * lock so that stat transfer can synchronize against them.
 670         * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 671         */
 672        INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
 673        queue_rcu_work(isw_wq, &isw->work);
 674
 675        return restart;
 676}
 677
 678/**
 679 * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
 680 * @wbc: writeback_control of interest
 681 * @inode: target inode
 682 *
 683 * @inode is locked and about to be written back under the control of @wbc.
 684 * Record @inode's writeback context into @wbc and unlock the i_lock.  On
 685 * writeback completion, wbc_detach_inode() should be called.  This is used
 686 * to track the cgroup writeback context.
 687 */
 688void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
 689                                 struct inode *inode)
 690{
 691        if (!inode_cgwb_enabled(inode)) {
 692                spin_unlock(&inode->i_lock);
 693                return;
 694        }
 695
 696        wbc->wb = inode_to_wb(inode);
 697        wbc->inode = inode;
 698
 699        wbc->wb_id = wbc->wb->memcg_css->id;
 700        wbc->wb_lcand_id = inode->i_wb_frn_winner;
 701        wbc->wb_tcand_id = 0;
 702        wbc->wb_bytes = 0;
 703        wbc->wb_lcand_bytes = 0;
 704        wbc->wb_tcand_bytes = 0;
 705
 706        wb_get(wbc->wb);
 707        spin_unlock(&inode->i_lock);
 708
 709        /*
 710         * A dying wb indicates that either the blkcg associated with the
 711         * memcg changed or the associated memcg is dying.  In the first
 712         * case, a replacement wb should already be available and we should
 713         * refresh the wb immediately.  In the second case, trying to
 714         * refresh will keep failing.
 715         */
 716        if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
 717                inode_switch_wbs(inode, wbc->wb_id);
 718}
 719EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
 720
 721/**
 722 * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
 723 * @wbc: writeback_control of the just finished writeback
 724 *
 725 * To be called after a writeback attempt of an inode finishes and undoes
 726 * wbc_attach_and_unlock_inode().  Can be called under any context.
 727 *
 728 * As concurrent write sharing of an inode is expected to be very rare and
 729 * memcg only tracks page ownership on first-use basis severely confining
 730 * the usefulness of such sharing, cgroup writeback tracks ownership
 731 * per-inode.  While the support for concurrent write sharing of an inode
 732 * is deemed unnecessary, an inode being written to by different cgroups at
 733 * different points in time is a lot more common, and, more importantly,
 734 * charging only by first-use can too readily lead to grossly incorrect
 735 * behaviors (single foreign page can lead to gigabytes of writeback to be
 736 * incorrectly attributed).
 737 *
 738 * To resolve this issue, cgroup writeback detects the majority dirtier of
 739 * an inode and transfers the ownership to it.  To avoid unnnecessary
 740 * oscillation, the detection mechanism keeps track of history and gives
 741 * out the switch verdict only if the foreign usage pattern is stable over
 742 * a certain amount of time and/or writeback attempts.
 743 *
 744 * On each writeback attempt, @wbc tries to detect the majority writer
 745 * using Boyer-Moore majority vote algorithm.  In addition to the byte
 746 * count from the majority voting, it also counts the bytes written for the
 747 * current wb and the last round's winner wb (max of last round's current
 748 * wb, the winner from two rounds ago, and the last round's majority
 749 * candidate).  Keeping track of the historical winner helps the algorithm
 750 * to semi-reliably detect the most active writer even when it's not the
 751 * absolute majority.
 752 *
 753 * Once the winner of the round is determined, whether the winner is
 754 * foreign or not and how much IO time the round consumed is recorded in
 755 * inode->i_wb_frn_history.  If the amount of recorded foreign IO time is
 756 * over a certain threshold, the switch verdict is given.
 757 */
 758void wbc_detach_inode(struct writeback_control *wbc)
 759{
 760        struct bdi_writeback *wb = wbc->wb;
 761        struct inode *inode = wbc->inode;
 762        unsigned long avg_time, max_bytes, max_time;
 763        u16 history;
 764        int max_id;
 765
 766        if (!wb)
 767                return;
 768
 769        history = inode->i_wb_frn_history;
 770        avg_time = inode->i_wb_frn_avg_time;
 771
 772        /* pick the winner of this round */
 773        if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
 774            wbc->wb_bytes >= wbc->wb_tcand_bytes) {
 775                max_id = wbc->wb_id;
 776                max_bytes = wbc->wb_bytes;
 777        } else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
 778                max_id = wbc->wb_lcand_id;
 779                max_bytes = wbc->wb_lcand_bytes;
 780        } else {
 781                max_id = wbc->wb_tcand_id;
 782                max_bytes = wbc->wb_tcand_bytes;
 783        }
 784
 785        /*
 786         * Calculate the amount of IO time the winner consumed and fold it
 787         * into the running average kept per inode.  If the consumed IO
 788         * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for
 789         * deciding whether to switch or not.  This is to prevent one-off
 790         * small dirtiers from skewing the verdict.
 791         */
 792        max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
 793                                wb->avg_write_bandwidth);
 794        if (avg_time)
 795                avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
 796                            (avg_time >> WB_FRN_TIME_AVG_SHIFT);
 797        else
 798                avg_time = max_time;    /* immediate catch up on first run */
 799
 800        if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
 801                int slots;
 802
 803                /*
 804                 * The switch verdict is reached if foreign wb's consume
 805                 * more than a certain proportion of IO time in a
 806                 * WB_FRN_TIME_PERIOD.  This is loosely tracked by 16 slot
 807                 * history mask where each bit represents one sixteenth of
 808                 * the period.  Determine the number of slots to shift into
 809                 * history from @max_time.
 810                 */
 811                slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
 812                            (unsigned long)WB_FRN_HIST_MAX_SLOTS);
 813                history <<= slots;
 814                if (wbc->wb_id != max_id)
 815                        history |= (1U << slots) - 1;
 816
 817                if (history)
 818                        trace_inode_foreign_history(inode, wbc, history);
 819
 820                /*
 821                 * Switch if the current wb isn't the consistent winner.
 822                 * If there are multiple closely competing dirtiers, the
 823                 * inode may switch across them repeatedly over time, which
 824                 * is okay.  The main goal is avoiding keeping an inode on
 825                 * the wrong wb for an extended period of time.
 826                 */
 827                if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
 828                        inode_switch_wbs(inode, max_id);
 829        }
 830
 831        /*
 832         * Multiple instances of this function may race to update the
 833         * following fields but we don't mind occassional inaccuracies.
 834         */
 835        inode->i_wb_frn_winner = max_id;
 836        inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
 837        inode->i_wb_frn_history = history;
 838
 839        wb_put(wbc->wb);
 840        wbc->wb = NULL;
 841}
 842EXPORT_SYMBOL_GPL(wbc_detach_inode);
 843
 844/**
 845 * wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
 846 * @wbc: writeback_control of the writeback in progress
 847 * @page: page being written out
 848 * @bytes: number of bytes being written out
 849 *
 850 * @bytes from @page are about to written out during the writeback
 851 * controlled by @wbc.  Keep the book for foreign inode detection.  See
 852 * wbc_detach_inode().
 853 */
 854void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
 855                              size_t bytes)
 856{
 857        struct cgroup_subsys_state *css;
 858        int id;
 859
 860        /*
 861         * pageout() path doesn't attach @wbc to the inode being written
 862         * out.  This is intentional as we don't want the function to block
 863         * behind a slow cgroup.  Ultimately, we want pageout() to kick off
 864         * regular writeback instead of writing things out itself.
 865         */
 866        if (!wbc->wb || wbc->no_cgroup_owner)
 867                return;
 868
 869        css = mem_cgroup_css_from_page(page);
 870        /* dead cgroups shouldn't contribute to inode ownership arbitration */
 871        if (!(css->flags & CSS_ONLINE))
 872                return;
 873
 874        id = css->id;
 875
 876        if (id == wbc->wb_id) {
 877                wbc->wb_bytes += bytes;
 878                return;
 879        }
 880
 881        if (id == wbc->wb_lcand_id)
 882                wbc->wb_lcand_bytes += bytes;
 883
 884        /* Boyer-Moore majority vote algorithm */
 885        if (!wbc->wb_tcand_bytes)
 886                wbc->wb_tcand_id = id;
 887        if (id == wbc->wb_tcand_id)
 888                wbc->wb_tcand_bytes += bytes;
 889        else
 890                wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
 891}
 892EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner);
 893
 894/**
 895 * inode_congested - test whether an inode is congested
 896 * @inode: inode to test for congestion (may be NULL)
 897 * @cong_bits: mask of WB_[a]sync_congested bits to test
 898 *
 899 * Tests whether @inode is congested.  @cong_bits is the mask of congestion
 900 * bits to test and the return value is the mask of set bits.
 901 *
 902 * If cgroup writeback is enabled for @inode, the congestion state is
 903 * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
 904 * associated with @inode is congested; otherwise, the root wb's congestion
 905 * state is used.
 906 *
 907 * @inode is allowed to be NULL as this function is often called on
 908 * mapping->host which is NULL for the swapper space.
 909 */
 910int inode_congested(struct inode *inode, int cong_bits)
 911{
 912        /*
 913         * Once set, ->i_wb never becomes NULL while the inode is alive.
 914         * Start transaction iff ->i_wb is visible.
 915         */
 916        if (inode && inode_to_wb_is_valid(inode)) {
 917                struct bdi_writeback *wb;
 918                struct wb_lock_cookie lock_cookie = {};
 919                bool congested;
 920
 921                wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
 922                congested = wb_congested(wb, cong_bits);
 923                unlocked_inode_to_wb_end(inode, &lock_cookie);
 924                return congested;
 925        }
 926
 927        return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
 928}
 929EXPORT_SYMBOL_GPL(inode_congested);
 930
 931/**
 932 * wb_split_bdi_pages - split nr_pages to write according to bandwidth
 933 * @wb: target bdi_writeback to split @nr_pages to
 934 * @nr_pages: number of pages to write for the whole bdi
 935 *
 936 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
 937 * relation to the total write bandwidth of all wb's w/ dirty inodes on
 938 * @wb->bdi.
 939 */
 940static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
 941{
 942        unsigned long this_bw = wb->avg_write_bandwidth;
 943        unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
 944
 945        if (nr_pages == LONG_MAX)
 946                return LONG_MAX;
 947
 948        /*
 949         * This may be called on clean wb's and proportional distribution
 950         * may not make sense, just use the original @nr_pages in those
 951         * cases.  In general, we wanna err on the side of writing more.
 952         */
 953        if (!tot_bw || this_bw >= tot_bw)
 954                return nr_pages;
 955        else
 956                return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
 957}
 958
 959/**
 960 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
 961 * @bdi: target backing_dev_info
 962 * @base_work: wb_writeback_work to issue
 963 * @skip_if_busy: skip wb's which already have writeback in progress
 964 *
 965 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
 966 * have dirty inodes.  If @base_work->nr_page isn't %LONG_MAX, it's
 967 * distributed to the busy wbs according to each wb's proportion in the
 968 * total active write bandwidth of @bdi.
 969 */
 970static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
 971                                  struct wb_writeback_work *base_work,
 972                                  bool skip_if_busy)
 973{
 974        struct bdi_writeback *last_wb = NULL;
 975        struct bdi_writeback *wb = list_entry(&bdi->wb_list,
 976                                              struct bdi_writeback, bdi_node);
 977
 978        might_sleep();
 979restart:
 980        rcu_read_lock();
 981        list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
 982                DEFINE_WB_COMPLETION(fallback_work_done, bdi);
 983                struct wb_writeback_work fallback_work;
 984                struct wb_writeback_work *work;
 985                long nr_pages;
 986
 987                if (last_wb) {
 988                        wb_put(last_wb);
 989                        last_wb = NULL;
 990                }
 991
 992                /* SYNC_ALL writes out I_DIRTY_TIME too */
 993                if (!wb_has_dirty_io(wb) &&
 994                    (base_work->sync_mode == WB_SYNC_NONE ||
 995                     list_empty(&wb->b_dirty_time)))
 996                        continue;
 997                if (skip_if_busy && writeback_in_progress(wb))
 998                        continue;
 999
1000                nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
1001
1002                work = kmalloc(sizeof(*work), GFP_ATOMIC);
1003                if (work) {
1004                        *work = *base_work;
1005                        work->nr_pages = nr_pages;
1006                        work->auto_free = 1;
1007                        wb_queue_work(wb, work);
1008                        continue;
1009                }
1010
1011                /* alloc failed, execute synchronously using on-stack fallback */
1012                work = &fallback_work;
1013                *work = *base_work;
1014                work->nr_pages = nr_pages;
1015                work->auto_free = 0;
1016                work->done = &fallback_work_done;
1017
1018                wb_queue_work(wb, work);
1019
1020                /*
1021                 * Pin @wb so that it stays on @bdi->wb_list.  This allows
1022                 * continuing iteration from @wb after dropping and
1023                 * regrabbing rcu read lock.
1024                 */
1025                wb_get(wb);
1026                last_wb = wb;
1027
1028                rcu_read_unlock();
1029                wb_wait_for_completion(&fallback_work_done);
1030                goto restart;
1031        }
1032        rcu_read_unlock();
1033
1034        if (last_wb)
1035                wb_put(last_wb);
1036}
1037
1038/**
1039 * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
1040 * @bdi_id: target bdi id
1041 * @memcg_id: target memcg css id
1042 * @reason: reason why some writeback work initiated
1043 * @done: target wb_completion
1044 *
1045 * Initiate flush of the bdi_writeback identified by @bdi_id and @memcg_id
1046 * with the specified parameters.
1047 */
1048int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
1049                           enum wb_reason reason, struct wb_completion *done)
1050{
1051        struct backing_dev_info *bdi;
1052        struct cgroup_subsys_state *memcg_css;
1053        struct bdi_writeback *wb;
1054        struct wb_writeback_work *work;
1055        unsigned long dirty;
1056        int ret;
1057
1058        /* lookup bdi and memcg */
1059        bdi = bdi_get_by_id(bdi_id);
1060        if (!bdi)
1061                return -ENOENT;
1062
1063        rcu_read_lock();
1064        memcg_css = css_from_id(memcg_id, &memory_cgrp_subsys);
1065        if (memcg_css && !css_tryget(memcg_css))
1066                memcg_css = NULL;
1067        rcu_read_unlock();
1068        if (!memcg_css) {
1069                ret = -ENOENT;
1070                goto out_bdi_put;
1071        }
1072
1073        /*
1074         * And find the associated wb.  If the wb isn't there already
1075         * there's nothing to flush, don't create one.
1076         */
1077        wb = wb_get_lookup(bdi, memcg_css);
1078        if (!wb) {
1079                ret = -ENOENT;
1080                goto out_css_put;
1081        }
1082
1083        /*
1084         * The caller is attempting to write out most of
1085         * the currently dirty pages.  Let's take the current dirty page
1086         * count and inflate it by 25% which should be large enough to
1087         * flush out most dirty pages while avoiding getting livelocked by
1088         * concurrent dirtiers.
1089         *
1090         * BTW the memcg stats are flushed periodically and this is best-effort
1091         * estimation, so some potential error is ok.
1092         */
1093        dirty = memcg_page_state(mem_cgroup_from_css(memcg_css), NR_FILE_DIRTY);
1094        dirty = dirty * 10 / 8;
1095
1096        /* issue the writeback work */
1097        work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
1098        if (work) {
1099                work->nr_pages = dirty;
1100                work->sync_mode = WB_SYNC_NONE;
1101                work->range_cyclic = 1;
1102                work->reason = reason;
1103                work->done = done;
1104                work->auto_free = 1;
1105                wb_queue_work(wb, work);
1106                ret = 0;
1107        } else {
1108                ret = -ENOMEM;
1109        }
1110
1111        wb_put(wb);
1112out_css_put:
1113        css_put(memcg_css);
1114out_bdi_put:
1115        bdi_put(bdi);
1116        return ret;
1117}
1118
1119/**
1120 * cgroup_writeback_umount - flush inode wb switches for umount
1121 *
1122 * This function is called when a super_block is about to be destroyed and
1123 * flushes in-flight inode wb switches.  An inode wb switch goes through
1124 * RCU and then workqueue, so the two need to be flushed in order to ensure
1125 * that all previously scheduled switches are finished.  As wb switches are
1126 * rare occurrences and synchronize_rcu() can take a while, perform
1127 * flushing iff wb switches are in flight.
1128 */
1129void cgroup_writeback_umount(void)
1130{
1131        /*
1132         * SB_ACTIVE should be reliably cleared before checking
1133         * isw_nr_in_flight, see generic_shutdown_super().
1134         */
1135        smp_mb();
1136
1137        if (atomic_read(&isw_nr_in_flight)) {
1138                /*
1139                 * Use rcu_barrier() to wait for all pending callbacks to
1140                 * ensure that all in-flight wb switches are in the workqueue.
1141                 */
1142                rcu_barrier();
1143                flush_workqueue(isw_wq);
1144        }
1145}
1146
1147static int __init cgroup_writeback_init(void)
1148{
1149        isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
1150        if (!isw_wq)
1151                return -ENOMEM;
1152        return 0;
1153}
1154fs_initcall(cgroup_writeback_init);
1155
1156#else   /* CONFIG_CGROUP_WRITEBACK */
1157
1158static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1159static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1160
1161static void inode_cgwb_move_to_attached(struct inode *inode,
1162                                        struct bdi_writeback *wb)
1163{
1164        assert_spin_locked(&wb->list_lock);
1165        assert_spin_locked(&inode->i_lock);
1166
1167        inode->i_state &= ~I_SYNC_QUEUED;
1168        list_del_init(&inode->i_io_list);
1169        wb_io_lists_depopulated(wb);
1170}
1171
1172static struct bdi_writeback *
1173locked_inode_to_wb_and_lock_list(struct inode *inode)
1174        __releases(&inode->i_lock)
1175        __acquires(&wb->list_lock)
1176{
1177        struct bdi_writeback *wb = inode_to_wb(inode);
1178
1179        spin_unlock(&inode->i_lock);
1180        spin_lock(&wb->list_lock);
1181        return wb;
1182}
1183
1184static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
1185        __acquires(&wb->list_lock)
1186{
1187        struct bdi_writeback *wb = inode_to_wb(inode);
1188
1189        spin_lock(&wb->list_lock);
1190        return wb;
1191}
1192
1193static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
1194{
1195        return nr_pages;
1196}
1197
1198static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
1199                                  struct wb_writeback_work *base_work,
1200                                  bool skip_if_busy)
1201{
1202        might_sleep();
1203
1204        if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
1205                base_work->auto_free = 0;
1206                wb_queue_work(&bdi->wb, base_work);
1207        }
1208}
1209
1210#endif  /* CONFIG_CGROUP_WRITEBACK */
1211
1212/*
1213 * Add in the number of potentially dirty inodes, because each inode
1214 * write can dirty pagecache in the underlying blockdev.
1215 */
1216static unsigned long get_nr_dirty_pages(void)
1217{
1218        return global_node_page_state(NR_FILE_DIRTY) +
1219                get_nr_dirty_inodes();
1220}
1221
1222static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
1223{
1224        if (!wb_has_dirty_io(wb))
1225                return;
1226
1227        /*
1228         * All callers of this function want to start writeback of all
1229         * dirty pages. Places like vmscan can call this at a very
1230         * high frequency, causing pointless allocations of tons of
1231         * work items and keeping the flusher threads busy retrieving
1232         * that work. Ensure that we only allow one of them pending and
1233         * inflight at the time.
1234         */
1235        if (test_bit(WB_start_all, &wb->state) ||
1236            test_and_set_bit(WB_start_all, &wb->state))
1237                return;
1238
1239        wb->start_all_reason = reason;
1240        wb_wakeup(wb);
1241}
1242
1243/**
1244 * wb_start_background_writeback - start background writeback
1245 * @wb: bdi_writback to write from
1246 *
1247 * Description:
1248 *   This makes sure WB_SYNC_NONE background writeback happens. When
1249 *   this function returns, it is only guaranteed that for given wb
1250 *   some IO is happening if we are over background dirty threshold.
1251 *   Caller need not hold sb s_umount semaphore.
1252 */
1253void wb_start_background_writeback(struct bdi_writeback *wb)
1254{
1255        /*
1256         * We just wake up the flusher thread. It will perform background
1257         * writeback as soon as there is no other work to do.
1258         */
1259        trace_writeback_wake_background(wb);
1260        wb_wakeup(wb);
1261}
1262
1263/*
1264 * Remove the inode from the writeback list it is on.
1265 */
1266void inode_io_list_del(struct inode *inode)
1267{
1268        struct bdi_writeback *wb;
1269
1270        wb = inode_to_wb_and_lock_list(inode);
1271        spin_lock(&inode->i_lock);
1272
1273        inode->i_state &= ~I_SYNC_QUEUED;
1274        list_del_init(&inode->i_io_list);
1275        wb_io_lists_depopulated(wb);
1276
1277        spin_unlock(&inode->i_lock);
1278        spin_unlock(&wb->list_lock);
1279}
1280EXPORT_SYMBOL(inode_io_list_del);
1281
1282/*
1283 * mark an inode as under writeback on the sb
1284 */
1285void sb_mark_inode_writeback(struct inode *inode)
1286{
1287        struct super_block *sb = inode->i_sb;
1288        unsigned long flags;
1289
1290        if (list_empty(&inode->i_wb_list)) {
1291                spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1292                if (list_empty(&inode->i_wb_list)) {
1293                        list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
1294                        trace_sb_mark_inode_writeback(inode);
1295                }
1296                spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1297        }
1298}
1299
1300/*
1301 * clear an inode as under writeback on the sb
1302 */
1303void sb_clear_inode_writeback(struct inode *inode)
1304{
1305        struct super_block *sb = inode->i_sb;
1306        unsigned long flags;
1307
1308        if (!list_empty(&inode->i_wb_list)) {
1309                spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1310                if (!list_empty(&inode->i_wb_list)) {
1311                        list_del_init(&inode->i_wb_list);
1312                        trace_sb_clear_inode_writeback(inode);
1313                }
1314                spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1315        }
1316}
1317
1318/*
1319 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
1320 * furthest end of its superblock's dirty-inode list.
1321 *
1322 * Before stamping the inode's ->dirtied_when, we check to see whether it is
1323 * already the most-recently-dirtied inode on the b_dirty list.  If that is
1324 * the case then the inode must have been redirtied while it was being written
1325 * out and we don't reset its dirtied_when.
1326 */
1327static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
1328{
1329        assert_spin_locked(&inode->i_lock);
1330
1331        if (!list_empty(&wb->b_dirty)) {
1332                struct inode *tail;
1333
1334                tail = wb_inode(wb->b_dirty.next);
1335                if (time_before(inode->dirtied_when, tail->dirtied_when))
1336                        inode->dirtied_when = jiffies;
1337        }
1338        inode_io_list_move_locked(inode, wb, &wb->b_dirty);
1339        inode->i_state &= ~I_SYNC_QUEUED;
1340}
1341
1342static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
1343{
1344        spin_lock(&inode->i_lock);
1345        redirty_tail_locked(inode, wb);
1346        spin_unlock(&inode->i_lock);
1347}
1348
1349/*
1350 * requeue inode for re-scanning after bdi->b_io list is exhausted.
1351 */
1352static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
1353{
1354        inode_io_list_move_locked(inode, wb, &wb->b_more_io);
1355}
1356
1357static void inode_sync_complete(struct inode *inode)
1358{
1359        inode->i_state &= ~I_SYNC;
1360        /* If inode is clean an unused, put it into LRU now... */
1361        inode_add_lru(inode);
1362        /* Waiters must see I_SYNC cleared before being woken up */
1363        smp_mb();
1364        wake_up_bit(&inode->i_state, __I_SYNC);
1365}
1366
1367static bool inode_dirtied_after(struct inode *inode, unsigned long t)
1368{
1369        bool ret = time_after(inode->dirtied_when, t);
1370#ifndef CONFIG_64BIT
1371        /*
1372         * For inodes being constantly redirtied, dirtied_when can get stuck.
1373         * It _appears_ to be in the future, but is actually in distant past.
1374         * This test is necessary to prevent such wrapped-around relative times
1375         * from permanently stopping the whole bdi writeback.
1376         */
1377        ret = ret && time_before_eq(inode->dirtied_when, jiffies);
1378#endif
1379        return ret;
1380}
1381
1382#define EXPIRE_DIRTY_ATIME 0x0001
1383
1384/*
1385 * Move expired (dirtied before dirtied_before) dirty inodes from
1386 * @delaying_queue to @dispatch_queue.
1387 */
1388static int move_expired_inodes(struct list_head *delaying_queue,
1389                               struct list_head *dispatch_queue,
1390                               unsigned long dirtied_before)
1391{
1392        LIST_HEAD(tmp);
1393        struct list_head *pos, *node;
1394        struct super_block *sb = NULL;
1395        struct inode *inode;
1396        int do_sb_sort = 0;
1397        int moved = 0;
1398
1399        while (!list_empty(delaying_queue)) {
1400                inode = wb_inode(delaying_queue->prev);
1401                if (inode_dirtied_after(inode, dirtied_before))
1402                        break;
1403                list_move(&inode->i_io_list, &tmp);
1404                moved++;
1405                spin_lock(&inode->i_lock);
1406                inode->i_state |= I_SYNC_QUEUED;
1407                spin_unlock(&inode->i_lock);
1408                if (sb_is_blkdev_sb(inode->i_sb))
1409                        continue;
1410                if (sb && sb != inode->i_sb)
1411                        do_sb_sort = 1;
1412                sb = inode->i_sb;
1413        }
1414
1415        /* just one sb in list, splice to dispatch_queue and we're done */
1416        if (!do_sb_sort) {
1417                list_splice(&tmp, dispatch_queue);
1418                goto out;
1419        }
1420
1421        /* Move inodes from one superblock together */
1422        while (!list_empty(&tmp)) {
1423                sb = wb_inode(tmp.prev)->i_sb;
1424                list_for_each_prev_safe(pos, node, &tmp) {
1425                        inode = wb_inode(pos);
1426                        if (inode->i_sb == sb)
1427                                list_move(&inode->i_io_list, dispatch_queue);
1428                }
1429        }
1430out:
1431        return moved;
1432}
1433
1434/*
1435 * Queue all expired dirty inodes for io, eldest first.
1436 * Before
1437 *         newly dirtied     b_dirty    b_io    b_more_io
1438 *         =============>    gf         edc     BA
1439 * After
1440 *         newly dirtied     b_dirty    b_io    b_more_io
1441 *         =============>    g          fBAedc
1442 *                                           |
1443 *                                           +--> dequeue for IO
1444 */
1445static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
1446                     unsigned long dirtied_before)
1447{
1448        int moved;
1449        unsigned long time_expire_jif = dirtied_before;
1450
1451        assert_spin_locked(&wb->list_lock);
1452        list_splice_init(&wb->b_more_io, &wb->b_io);
1453        moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
1454        if (!work->for_sync)
1455                time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
1456        moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
1457                                     time_expire_jif);
1458        if (moved)
1459                wb_io_lists_populated(wb);
1460        trace_writeback_queue_io(wb, work, dirtied_before, moved);
1461}
1462
1463static int write_inode(struct inode *inode, struct writeback_control *wbc)
1464{
1465        int ret;
1466
1467        if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
1468                trace_writeback_write_inode_start(inode, wbc);
1469                ret = inode->i_sb->s_op->write_inode(inode, wbc);
1470                trace_writeback_write_inode(inode, wbc);
1471                return ret;
1472        }
1473        return 0;
1474}
1475
1476/*
1477 * Wait for writeback on an inode to complete. Called with i_lock held.
1478 * Caller must make sure inode cannot go away when we drop i_lock.
1479 */
1480static void __inode_wait_for_writeback(struct inode *inode)
1481        __releases(inode->i_lock)
1482        __acquires(inode->i_lock)
1483{
1484        DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
1485        wait_queue_head_t *wqh;
1486
1487        wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1488        while (inode->i_state & I_SYNC) {
1489                spin_unlock(&inode->i_lock);
1490                __wait_on_bit(wqh, &wq, bit_wait,
1491                              TASK_UNINTERRUPTIBLE);
1492                spin_lock(&inode->i_lock);
1493        }
1494}
1495
1496/*
1497 * Wait for writeback on an inode to complete. Caller must have inode pinned.
1498 */
1499void inode_wait_for_writeback(struct inode *inode)
1500{
1501        spin_lock(&inode->i_lock);
1502        __inode_wait_for_writeback(inode);
1503        spin_unlock(&inode->i_lock);
1504}
1505
1506/*
1507 * Sleep until I_SYNC is cleared. This function must be called with i_lock
1508 * held and drops it. It is aimed for callers not holding any inode reference
1509 * so once i_lock is dropped, inode can go away.
1510 */
1511static void inode_sleep_on_writeback(struct inode *inode)
1512        __releases(inode->i_lock)
1513{
1514        DEFINE_WAIT(wait);
1515        wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1516        int sleep;
1517
1518        prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1519        sleep = inode->i_state & I_SYNC;
1520        spin_unlock(&inode->i_lock);
1521        if (sleep)
1522                schedule();
1523        finish_wait(wqh, &wait);
1524}
1525
1526/*
1527 * Find proper writeback list for the inode depending on its current state and
1528 * possibly also change of its state while we were doing writeback.  Here we
1529 * handle things such as livelock prevention or fairness of writeback among
1530 * inodes. This function can be called only by flusher thread - noone else
1531 * processes all inodes in writeback lists and requeueing inodes behind flusher
1532 * thread's back can have unexpected consequences.
1533 */
1534static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
1535                          struct writeback_control *wbc)
1536{
1537        if (inode->i_state & I_FREEING)
1538                return;
1539
1540        /*
1541         * Sync livelock prevention. Each inode is tagged and synced in one
1542         * shot. If still dirty, it will be redirty_tail()'ed below.  Update
1543         * the dirty time to prevent enqueue and sync it again.
1544         */
1545        if ((inode->i_state & I_DIRTY) &&
1546            (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
1547                inode->dirtied_when = jiffies;
1548
1549        if (wbc->pages_skipped) {
1550                /*
1551                 * writeback is not making progress due to locked
1552                 * buffers. Skip this inode for now.
1553                 */
1554                redirty_tail_locked(inode, wb);
1555                return;
1556        }
1557
1558        if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
1559                /*
1560                 * We didn't write back all the pages.  nfs_writepages()
1561                 * sometimes bales out without doing anything.
1562                 */
1563                if (wbc->nr_to_write <= 0) {
1564                        /* Slice used up. Queue for next turn. */
1565                        requeue_io(inode, wb);
1566                } else {
1567                        /*
1568                         * Writeback blocked by something other than
1569                         * congestion. Delay the inode for some time to
1570                         * avoid spinning on the CPU (100% iowait)
1571                         * retrying writeback of the dirty page/inode
1572                         * that cannot be performed immediately.
1573                         */
1574                        redirty_tail_locked(inode, wb);
1575                }
1576        } else if (inode->i_state & I_DIRTY) {
1577                /*
1578                 * Filesystems can dirty the inode during writeback operations,
1579                 * such as delayed allocation during submission or metadata
1580                 * updates after data IO completion.
1581                 */
1582                redirty_tail_locked(inode, wb);
1583        } else if (inode->i_state & I_DIRTY_TIME) {
1584                inode->dirtied_when = jiffies;
1585                inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
1586                inode->i_state &= ~I_SYNC_QUEUED;
1587        } else {
1588                /* The inode is clean. Remove from writeback lists. */
1589                inode_cgwb_move_to_attached(inode, wb);
1590        }
1591}
1592
1593/*
1594 * Write out an inode and its dirty pages (or some of its dirty pages, depending
1595 * on @wbc->nr_to_write), and clear the relevant dirty flags from i_state.
1596 *
1597 * This doesn't remove the inode from the writeback list it is on, except
1598 * potentially to move it from b_dirty_time to b_dirty due to timestamp
1599 * expiration.  The caller is otherwise responsible for writeback list handling.
1600 *
1601 * The caller is also responsible for setting the I_SYNC flag beforehand and
1602 * calling inode_sync_complete() to clear it afterwards.
1603 */
1604static int
1605__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1606{
1607        struct address_space *mapping = inode->i_mapping;
1608        long nr_to_write = wbc->nr_to_write;
1609        unsigned dirty;
1610        int ret;
1611
1612        WARN_ON(!(inode->i_state & I_SYNC));
1613
1614        trace_writeback_single_inode_start(inode, wbc, nr_to_write);
1615
1616        ret = do_writepages(mapping, wbc);
1617
1618        /*
1619         * Make sure to wait on the data before writing out the metadata.
1620         * This is important for filesystems that modify metadata on data
1621         * I/O completion. We don't do it for sync(2) writeback because it has a
1622         * separate, external IO completion path and ->sync_fs for guaranteeing
1623         * inode metadata is written back correctly.
1624         */
1625        if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
1626                int err = filemap_fdatawait(mapping);
1627                if (ret == 0)
1628                        ret = err;
1629        }
1630
1631        /*
1632         * If the inode has dirty timestamps and we need to write them, call
1633         * mark_inode_dirty_sync() to notify the filesystem about it and to
1634         * change I_DIRTY_TIME into I_DIRTY_SYNC.
1635         */
1636        if ((inode->i_state & I_DIRTY_TIME) &&
1637            (wbc->sync_mode == WB_SYNC_ALL ||
1638             time_after(jiffies, inode->dirtied_time_when +
1639                        dirtytime_expire_interval * HZ))) {
1640                trace_writeback_lazytime(inode);
1641                mark_inode_dirty_sync(inode);
1642        }
1643
1644        /*
1645         * Get and clear the dirty flags from i_state.  This needs to be done
1646         * after calling writepages because some filesystems may redirty the
1647         * inode during writepages due to delalloc.  It also needs to be done
1648         * after handling timestamp expiration, as that may dirty the inode too.
1649         */
1650        spin_lock(&inode->i_lock);
1651        dirty = inode->i_state & I_DIRTY;
1652        inode->i_state &= ~dirty;
1653
1654        /*
1655         * Paired with smp_mb() in __mark_inode_dirty().  This allows
1656         * __mark_inode_dirty() to test i_state without grabbing i_lock -
1657         * either they see the I_DIRTY bits cleared or we see the dirtied
1658         * inode.
1659         *
1660         * I_DIRTY_PAGES is always cleared together above even if @mapping
1661         * still has dirty pages.  The flag is reinstated after smp_mb() if
1662         * necessary.  This guarantees that either __mark_inode_dirty()
1663         * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
1664         */
1665        smp_mb();
1666
1667        if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1668                inode->i_state |= I_DIRTY_PAGES;
1669
1670        spin_unlock(&inode->i_lock);
1671
1672        /* Don't write the inode if only I_DIRTY_PAGES was set */
1673        if (dirty & ~I_DIRTY_PAGES) {
1674                int err = write_inode(inode, wbc);
1675                if (ret == 0)
1676                        ret = err;
1677        }
1678        trace_writeback_single_inode(inode, wbc, nr_to_write);
1679        return ret;
1680}
1681
1682/*
1683 * Write out an inode's dirty data and metadata on-demand, i.e. separately from
1684 * the regular batched writeback done by the flusher threads in
1685 * writeback_sb_inodes().  @wbc controls various aspects of the write, such as
1686 * whether it is a data-integrity sync (%WB_SYNC_ALL) or not (%WB_SYNC_NONE).
1687 *
1688 * To prevent the inode from going away, either the caller must have a reference
1689 * to the inode, or the inode must have I_WILL_FREE or I_FREEING set.
1690 */
1691static int writeback_single_inode(struct inode *inode,
1692                                  struct writeback_control *wbc)
1693{
1694        struct bdi_writeback *wb;
1695        int ret = 0;
1696
1697        spin_lock(&inode->i_lock);
1698        if (!atomic_read(&inode->i_count))
1699                WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
1700        else
1701                WARN_ON(inode->i_state & I_WILL_FREE);
1702
1703        if (inode->i_state & I_SYNC) {
1704                /*
1705                 * Writeback is already running on the inode.  For WB_SYNC_NONE,
1706                 * that's enough and we can just return.  For WB_SYNC_ALL, we
1707                 * must wait for the existing writeback to complete, then do
1708                 * writeback again if there's anything left.
1709                 */
1710                if (wbc->sync_mode != WB_SYNC_ALL)
1711                        goto out;
1712                __inode_wait_for_writeback(inode);
1713        }
1714        WARN_ON(inode->i_state & I_SYNC);
1715        /*
1716         * If the inode is already fully clean, then there's nothing to do.
1717         *
1718         * For data-integrity syncs we also need to check whether any pages are
1719         * still under writeback, e.g. due to prior WB_SYNC_NONE writeback.  If
1720         * there are any such pages, we'll need to wait for them.
1721         */
1722        if (!(inode->i_state & I_DIRTY_ALL) &&
1723            (wbc->sync_mode != WB_SYNC_ALL ||
1724             !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
1725                goto out;
1726        inode->i_state |= I_SYNC;
1727        wbc_attach_and_unlock_inode(wbc, inode);
1728
1729        ret = __writeback_single_inode(inode, wbc);
1730
1731        wbc_detach_inode(wbc);
1732
1733        wb = inode_to_wb_and_lock_list(inode);
1734        spin_lock(&inode->i_lock);
1735        /*
1736         * If the inode is now fully clean, then it can be safely removed from
1737         * its writeback list (if any).  Otherwise the flusher threads are
1738         * responsible for the writeback lists.
1739         */
1740        if (!(inode->i_state & I_DIRTY_ALL))
1741                inode_cgwb_move_to_attached(inode, wb);
1742        spin_unlock(&wb->list_lock);
1743        inode_sync_complete(inode);
1744out:
1745        spin_unlock(&inode->i_lock);
1746        return ret;
1747}
1748
1749static long writeback_chunk_size(struct bdi_writeback *wb,
1750                                 struct wb_writeback_work *work)
1751{
1752        long pages;
1753
1754        /*
1755         * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
1756         * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
1757         * here avoids calling into writeback_inodes_wb() more than once.
1758         *
1759         * The intended call sequence for WB_SYNC_ALL writeback is:
1760         *
1761         *      wb_writeback()
1762         *          writeback_sb_inodes()       <== called only once
1763         *              write_cache_pages()     <== called once for each inode
1764         *                   (quickly) tag currently dirty pages
1765         *                   (maybe slowly) sync all tagged pages
1766         */
1767        if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
1768                pages = LONG_MAX;
1769        else {
1770                pages = min(wb->avg_write_bandwidth / 2,
1771                            global_wb_domain.dirty_limit / DIRTY_SCOPE);
1772                pages = min(pages, work->nr_pages);
1773                pages = round_down(pages + MIN_WRITEBACK_PAGES,
1774                                   MIN_WRITEBACK_PAGES);
1775        }
1776
1777        return pages;
1778}
1779
1780/*
1781 * Write a portion of b_io inodes which belong to @sb.
1782 *
1783 * Return the number of pages and/or inodes written.
1784 *
1785 * NOTE! This is called with wb->list_lock held, and will
1786 * unlock and relock that for each inode it ends up doing
1787 * IO for.
1788 */
1789static long writeback_sb_inodes(struct super_block *sb,
1790                                struct bdi_writeback *wb,
1791                                struct wb_writeback_work *work)
1792{
1793        struct writeback_control wbc = {
1794                .sync_mode              = work->sync_mode,
1795                .tagged_writepages      = work->tagged_writepages,
1796                .for_kupdate            = work->for_kupdate,
1797                .for_background         = work->for_background,
1798                .for_sync               = work->for_sync,
1799                .range_cyclic           = work->range_cyclic,
1800                .range_start            = 0,
1801                .range_end              = LLONG_MAX,
1802        };
1803        unsigned long start_time = jiffies;
1804        long write_chunk;
1805        long wrote = 0;  /* count both pages and inodes */
1806
1807        while (!list_empty(&wb->b_io)) {
1808                struct inode *inode = wb_inode(wb->b_io.prev);
1809                struct bdi_writeback *tmp_wb;
1810
1811                if (inode->i_sb != sb) {
1812                        if (work->sb) {
1813                                /*
1814                                 * We only want to write back data for this
1815                                 * superblock, move all inodes not belonging
1816                                 * to it back onto the dirty list.
1817                                 */
1818                                redirty_tail(inode, wb);
1819                                continue;
1820                        }
1821
1822                        /*
1823                         * The inode belongs to a different superblock.
1824                         * Bounce back to the caller to unpin this and
1825                         * pin the next superblock.
1826                         */
1827                        break;
1828                }
1829
1830                /*
1831                 * Don't bother with new inodes or inodes being freed, first
1832                 * kind does not need periodic writeout yet, and for the latter
1833                 * kind writeout is handled by the freer.
1834                 */
1835                spin_lock(&inode->i_lock);
1836                if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
1837                        redirty_tail_locked(inode, wb);
1838                        spin_unlock(&inode->i_lock);
1839                        continue;
1840                }
1841                if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
1842                        /*
1843                         * If this inode is locked for writeback and we are not
1844                         * doing writeback-for-data-integrity, move it to
1845                         * b_more_io so that writeback can proceed with the
1846                         * other inodes on s_io.
1847                         *
1848                         * We'll have another go at writing back this inode
1849                         * when we completed a full scan of b_io.
1850                         */
1851                        spin_unlock(&inode->i_lock);
1852                        requeue_io(inode, wb);
1853                        trace_writeback_sb_inodes_requeue(inode);
1854                        continue;
1855                }
1856                spin_unlock(&wb->list_lock);
1857
1858                /*
1859                 * We already requeued the inode if it had I_SYNC set and we
1860                 * are doing WB_SYNC_NONE writeback. So this catches only the
1861                 * WB_SYNC_ALL case.
1862                 */
1863                if (inode->i_state & I_SYNC) {
1864                        /* Wait for I_SYNC. This function drops i_lock... */
1865                        inode_sleep_on_writeback(inode);
1866                        /* Inode may be gone, start again */
1867                        spin_lock(&wb->list_lock);
1868                        continue;
1869                }
1870                inode->i_state |= I_SYNC;
1871                wbc_attach_and_unlock_inode(&wbc, inode);
1872
1873                write_chunk = writeback_chunk_size(wb, work);
1874                wbc.nr_to_write = write_chunk;
1875                wbc.pages_skipped = 0;
1876
1877                /*
1878                 * We use I_SYNC to pin the inode in memory. While it is set
1879                 * evict_inode() will wait so the inode cannot be freed.
1880                 */
1881                __writeback_single_inode(inode, &wbc);
1882
1883                wbc_detach_inode(&wbc);
1884                work->nr_pages -= write_chunk - wbc.nr_to_write;
1885                wrote += write_chunk - wbc.nr_to_write;
1886
1887                if (need_resched()) {
1888                        /*
1889                         * We're trying to balance between building up a nice
1890                         * long list of IOs to improve our merge rate, and
1891                         * getting those IOs out quickly for anyone throttling
1892                         * in balance_dirty_pages().  cond_resched() doesn't
1893                         * unplug, so get our IOs out the door before we
1894                         * give up the CPU.
1895                         */
1896                        blk_flush_plug(current);
1897                        cond_resched();
1898                }
1899
1900                /*
1901                 * Requeue @inode if still dirty.  Be careful as @inode may
1902                 * have been switched to another wb in the meantime.
1903                 */
1904                tmp_wb = inode_to_wb_and_lock_list(inode);
1905                spin_lock(&inode->i_lock);
1906                if (!(inode->i_state & I_DIRTY_ALL))
1907                        wrote++;
1908                requeue_inode(inode, tmp_wb, &wbc);
1909                inode_sync_complete(inode);
1910                spin_unlock(&inode->i_lock);
1911
1912                if (unlikely(tmp_wb != wb)) {
1913                        spin_unlock(&tmp_wb->list_lock);
1914                        spin_lock(&wb->list_lock);
1915                }
1916
1917                /*
1918                 * bail out to wb_writeback() often enough to check
1919                 * background threshold and other termination conditions.
1920                 */
1921                if (wrote) {
1922                        if (time_is_before_jiffies(start_time + HZ / 10UL))
1923                                break;
1924                        if (work->nr_pages <= 0)
1925                                break;
1926                }
1927        }
1928        return wrote;
1929}
1930
1931static long __writeback_inodes_wb(struct bdi_writeback *wb,
1932                                  struct wb_writeback_work *work)
1933{
1934        unsigned long start_time = jiffies;
1935        long wrote = 0;
1936
1937        while (!list_empty(&wb->b_io)) {
1938                struct inode *inode = wb_inode(wb->b_io.prev);
1939                struct super_block *sb = inode->i_sb;
1940
1941                if (!trylock_super(sb)) {
1942                        /*
1943                         * trylock_super() may fail consistently due to
1944                         * s_umount being grabbed by someone else. Don't use
1945                         * requeue_io() to avoid busy retrying the inode/sb.
1946                         */
1947                        redirty_tail(inode, wb);
1948                        continue;
1949                }
1950                wrote += writeback_sb_inodes(sb, wb, work);
1951                up_read(&sb->s_umount);
1952
1953                /* refer to the same tests at the end of writeback_sb_inodes */
1954                if (wrote) {
1955                        if (time_is_before_jiffies(start_time + HZ / 10UL))
1956                                break;
1957                        if (work->nr_pages <= 0)
1958                                break;
1959                }
1960        }
1961        /* Leave any unwritten inodes on b_io */
1962        return wrote;
1963}
1964
1965static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
1966                                enum wb_reason reason)
1967{
1968        struct wb_writeback_work work = {
1969                .nr_pages       = nr_pages,
1970                .sync_mode      = WB_SYNC_NONE,
1971                .range_cyclic   = 1,
1972                .reason         = reason,
1973        };
1974        struct blk_plug plug;
1975
1976        blk_start_plug(&plug);
1977        spin_lock(&wb->list_lock);
1978        if (list_empty(&wb->b_io))
1979                queue_io(wb, &work, jiffies);
1980        __writeback_inodes_wb(wb, &work);
1981        spin_unlock(&wb->list_lock);
1982        blk_finish_plug(&plug);
1983
1984        return nr_pages - work.nr_pages;
1985}
1986
1987/*
1988 * Explicit flushing or periodic writeback of "old" data.
1989 *
1990 * Define "old": the first time one of an inode's pages is dirtied, we mark the
1991 * dirtying-time in the inode's address_space.  So this periodic writeback code
1992 * just walks the superblock inode list, writing back any inodes which are
1993 * older than a specific point in time.
1994 *
1995 * Try to run once per dirty_writeback_interval.  But if a writeback event
1996 * takes longer than a dirty_writeback_interval interval, then leave a
1997 * one-second gap.
1998 *
1999 * dirtied_before takes precedence over nr_to_write.  So we'll only write back
2000 * all dirty pages if they are all attached to "old" mappings.
2001 */
2002static long wb_writeback(struct bdi_writeback *wb,
2003                         struct wb_writeback_work *work)
2004{
2005        long nr_pages = work->nr_pages;
2006        unsigned long dirtied_before = jiffies;
2007        struct inode *inode;
2008        long progress;
2009        struct blk_plug plug;
2010
2011        blk_start_plug(&plug);
2012        spin_lock(&wb->list_lock);
2013        for (;;) {
2014                /*
2015                 * Stop writeback when nr_pages has been consumed
2016                 */
2017                if (work->nr_pages <= 0)
2018                        break;
2019
2020                /*
2021                 * Background writeout and kupdate-style writeback may
2022                 * run forever. Stop them if there is other work to do
2023                 * so that e.g. sync can proceed. They'll be restarted
2024                 * after the other works are all done.
2025                 */
2026                if ((work->for_background || work->for_kupdate) &&
2027                    !list_empty(&wb->work_list))
2028                        break;
2029
2030                /*
2031                 * For background writeout, stop when we are below the
2032                 * background dirty threshold
2033                 */
2034                if (work->for_background && !wb_over_bg_thresh(wb))
2035                        break;
2036
2037                /*
2038                 * Kupdate and background works are special and we want to
2039                 * include all inodes that need writing. Livelock avoidance is
2040                 * handled by these works yielding to any other work so we are
2041                 * safe.
2042                 */
2043                if (work->for_kupdate) {
2044                        dirtied_before = jiffies -
2045                                msecs_to_jiffies(dirty_expire_interval * 10);
2046                } else if (work->for_background)
2047                        dirtied_before = jiffies;
2048
2049                trace_writeback_start(wb, work);
2050                if (list_empty(&wb->b_io))
2051                        queue_io(wb, work, dirtied_before);
2052                if (work->sb)
2053                        progress = writeback_sb_inodes(work->sb, wb, work);
2054                else
2055                        progress = __writeback_inodes_wb(wb, work);
2056                trace_writeback_written(wb, work);
2057
2058                /*
2059                 * Did we write something? Try for more
2060                 *
2061                 * Dirty inodes are moved to b_io for writeback in batches.
2062                 * The completion of the current batch does not necessarily
2063                 * mean the overall work is done. So we keep looping as long
2064                 * as made some progress on cleaning pages or inodes.
2065                 */
2066                if (progress)
2067                        continue;
2068                /*
2069                 * No more inodes for IO, bail
2070                 */
2071                if (list_empty(&wb->b_more_io))
2072                        break;
2073                /*
2074                 * Nothing written. Wait for some inode to
2075                 * become available for writeback. Otherwise
2076                 * we'll just busyloop.
2077                 */
2078                trace_writeback_wait(wb, work);
2079                inode = wb_inode(wb->b_more_io.prev);
2080                spin_lock(&inode->i_lock);
2081                spin_unlock(&wb->list_lock);
2082                /* This function drops i_lock... */
2083                inode_sleep_on_writeback(inode);
2084                spin_lock(&wb->list_lock);
2085        }
2086        spin_unlock(&wb->list_lock);
2087        blk_finish_plug(&plug);
2088
2089        return nr_pages - work->nr_pages;
2090}
2091
2092/*
2093 * Return the next wb_writeback_work struct that hasn't been processed yet.
2094 */
2095static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
2096{
2097        struct wb_writeback_work *work = NULL;
2098
2099        spin_lock_bh(&wb->work_lock);
2100        if (!list_empty(&wb->work_list)) {
2101                work = list_entry(wb->work_list.next,
2102                                  struct wb_writeback_work, list);
2103                list_del_init(&work->list);
2104        }
2105        spin_unlock_bh(&wb->work_lock);
2106        return work;
2107}
2108
2109static long wb_check_background_flush(struct bdi_writeback *wb)
2110{
2111        if (wb_over_bg_thresh(wb)) {
2112
2113                struct wb_writeback_work work = {
2114                        .nr_pages       = LONG_MAX,
2115                        .sync_mode      = WB_SYNC_NONE,
2116                        .for_background = 1,
2117                        .range_cyclic   = 1,
2118                        .reason         = WB_REASON_BACKGROUND,
2119                };
2120
2121                return wb_writeback(wb, &work);
2122        }
2123
2124        return 0;
2125}
2126
2127static long wb_check_old_data_flush(struct bdi_writeback *wb)
2128{
2129        unsigned long expired;
2130        long nr_pages;
2131
2132        /*
2133         * When set to zero, disable periodic writeback
2134         */
2135        if (!dirty_writeback_interval)
2136                return 0;
2137
2138        expired = wb->last_old_flush +
2139                        msecs_to_jiffies(dirty_writeback_interval * 10);
2140        if (time_before(jiffies, expired))
2141                return 0;
2142
2143        wb->last_old_flush = jiffies;
2144        nr_pages = get_nr_dirty_pages();
2145
2146        if (nr_pages) {
2147                struct wb_writeback_work work = {
2148                        .nr_pages       = nr_pages,
2149                        .sync_mode      = WB_SYNC_NONE,
2150                        .for_kupdate    = 1,
2151                        .range_cyclic   = 1,
2152                        .reason         = WB_REASON_PERIODIC,
2153                };
2154
2155                return wb_writeback(wb, &work);
2156        }
2157
2158        return 0;
2159}
2160
2161static long wb_check_start_all(struct bdi_writeback *wb)
2162{
2163        long nr_pages;
2164
2165        if (!test_bit(WB_start_all, &wb->state))
2166                return 0;
2167
2168        nr_pages = get_nr_dirty_pages();
2169        if (nr_pages) {
2170                struct wb_writeback_work work = {
2171                        .nr_pages       = wb_split_bdi_pages(wb, nr_pages),
2172                        .sync_mode      = WB_SYNC_NONE,
2173                        .range_cyclic   = 1,
2174                        .reason         = wb->start_all_reason,
2175                };
2176
2177                nr_pages = wb_writeback(wb, &work);
2178        }
2179
2180        clear_bit(WB_start_all, &wb->state);
2181        return nr_pages;
2182}
2183
2184
2185/*
2186 * Retrieve work items and do the writeback they describe
2187 */
2188static long wb_do_writeback(struct bdi_writeback *wb)
2189{
2190        struct wb_writeback_work *work;
2191        long wrote = 0;
2192
2193        set_bit(WB_writeback_running, &wb->state);
2194        while ((work = get_next_work_item(wb)) != NULL) {
2195                trace_writeback_exec(wb, work);
2196                wrote += wb_writeback(wb, work);
2197                finish_writeback_work(wb, work);
2198        }
2199
2200        /*
2201         * Check for a flush-everything request
2202         */
2203        wrote += wb_check_start_all(wb);
2204
2205        /*
2206         * Check for periodic writeback, kupdated() style
2207         */
2208        wrote += wb_check_old_data_flush(wb);
2209        wrote += wb_check_background_flush(wb);
2210        clear_bit(WB_writeback_running, &wb->state);
2211
2212        return wrote;
2213}
2214
2215/*
2216 * Handle writeback of dirty data for the device backed by this bdi. Also
2217 * reschedules periodically and does kupdated style flushing.
2218 */
2219void wb_workfn(struct work_struct *work)
2220{
2221        struct bdi_writeback *wb = container_of(to_delayed_work(work),
2222                                                struct bdi_writeback, dwork);
2223        long pages_written;
2224
2225        set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
2226        current->flags |= PF_SWAPWRITE;
2227
2228        if (likely(!current_is_workqueue_rescuer() ||
2229                   !test_bit(WB_registered, &wb->state))) {
2230                /*
2231                 * The normal path.  Keep writing back @wb until its
2232                 * work_list is empty.  Note that this path is also taken
2233                 * if @wb is shutting down even when we're running off the
2234                 * rescuer as work_list needs to be drained.
2235                 */
2236                do {
2237                        pages_written = wb_do_writeback(wb);
2238                        trace_writeback_pages_written(pages_written);
2239                } while (!list_empty(&wb->work_list));
2240        } else {
2241                /*
2242                 * bdi_wq can't get enough workers and we're running off
2243                 * the emergency worker.  Don't hog it.  Hopefully, 1024 is
2244                 * enough for efficient IO.
2245                 */
2246                pages_written = writeback_inodes_wb(wb, 1024,
2247                                                    WB_REASON_FORKER_THREAD);
2248                trace_writeback_pages_written(pages_written);
2249        }
2250
2251        if (!list_empty(&wb->work_list))
2252                wb_wakeup(wb);
2253        else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
2254                wb_wakeup_delayed(wb);
2255
2256        current->flags &= ~PF_SWAPWRITE;
2257}
2258
2259/*
2260 * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
2261 * write back the whole world.
2262 */
2263static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2264                                         enum wb_reason reason)
2265{
2266        struct bdi_writeback *wb;
2267
2268        if (!bdi_has_dirty_io(bdi))
2269                return;
2270
2271        list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2272                wb_start_writeback(wb, reason);
2273}
2274
2275void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2276                                enum wb_reason reason)
2277{
2278        rcu_read_lock();
2279        __wakeup_flusher_threads_bdi(bdi, reason);
2280        rcu_read_unlock();
2281}
2282
2283/*
2284 * Wakeup the flusher threads to start writeback of all currently dirty pages
2285 */
2286void wakeup_flusher_threads(enum wb_reason reason)
2287{
2288        struct backing_dev_info *bdi;
2289
2290        /*
2291         * If we are expecting writeback progress we must submit plugged IO.
2292         */
2293        if (blk_needs_flush_plug(current))
2294                blk_schedule_flush_plug(current);
2295
2296        rcu_read_lock();
2297        list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2298                __wakeup_flusher_threads_bdi(bdi, reason);
2299        rcu_read_unlock();
2300}
2301
2302/*
2303 * Wake up bdi's periodically to make sure dirtytime inodes gets
2304 * written back periodically.  We deliberately do *not* check the
2305 * b_dirtytime list in wb_has_dirty_io(), since this would cause the
2306 * kernel to be constantly waking up once there are any dirtytime
2307 * inodes on the system.  So instead we define a separate delayed work
2308 * function which gets called much more rarely.  (By default, only
2309 * once every 12 hours.)
2310 *
2311 * If there is any other write activity going on in the file system,
2312 * this function won't be necessary.  But if the only thing that has
2313 * happened on the file system is a dirtytime inode caused by an atime
2314 * update, we need this infrastructure below to make sure that inode
2315 * eventually gets pushed out to disk.
2316 */
2317static void wakeup_dirtytime_writeback(struct work_struct *w);
2318static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
2319
2320static void wakeup_dirtytime_writeback(struct work_struct *w)
2321{
2322        struct backing_dev_info *bdi;
2323
2324        rcu_read_lock();
2325        list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
2326                struct bdi_writeback *wb;
2327
2328                list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2329                        if (!list_empty(&wb->b_dirty_time))
2330                                wb_wakeup(wb);
2331        }
2332        rcu_read_unlock();
2333        schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2334}
2335
2336static int __init start_dirtytime_writeback(void)
2337{
2338        schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2339        return 0;
2340}
2341__initcall(start_dirtytime_writeback);
2342
2343int dirtytime_interval_handler(struct ctl_table *table, int write,
2344                               void *buffer, size_t *lenp, loff_t *ppos)
2345{
2346        int ret;
2347
2348        ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2349        if (ret == 0 && write)
2350                mod_delayed_work(system_wq, &dirtytime_work, 0);
2351        return ret;
2352}
2353
2354/**
2355 * __mark_inode_dirty - internal function to mark an inode dirty
2356 *
2357 * @inode: inode to mark
2358 * @flags: what kind of dirty, e.g. I_DIRTY_SYNC.  This can be a combination of
2359 *         multiple I_DIRTY_* flags, except that I_DIRTY_TIME can't be combined
2360 *         with I_DIRTY_PAGES.
2361 *
2362 * Mark an inode as dirty.  We notify the filesystem, then update the inode's
2363 * dirty flags.  Then, if needed we add the inode to the appropriate dirty list.
2364 *
2365 * Most callers should use mark_inode_dirty() or mark_inode_dirty_sync()
2366 * instead of calling this directly.
2367 *
2368 * CAREFUL!  We only add the inode to the dirty list if it is hashed or if it
2369 * refers to a blockdev.  Unhashed inodes will never be added to the dirty list
2370 * even if they are later hashed, as they will have been marked dirty already.
2371 *
2372 * In short, ensure you hash any inodes _before_ you start marking them dirty.
2373 *
2374 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
2375 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
2376 * the kernel-internal blockdev inode represents the dirtying time of the
2377 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
2378 * page->mapping->host, so the page-dirtying time is recorded in the internal
2379 * blockdev inode.
2380 */
2381void __mark_inode_dirty(struct inode *inode, int flags)
2382{
2383        struct super_block *sb = inode->i_sb;
2384        int dirtytime = 0;
2385
2386        trace_writeback_mark_inode_dirty(inode, flags);
2387
2388        if (flags & I_DIRTY_INODE) {
2389                /*
2390                 * Notify the filesystem about the inode being dirtied, so that
2391                 * (if needed) it can update on-disk fields and journal the
2392                 * inode.  This is only needed when the inode itself is being
2393                 * dirtied now.  I.e. it's only needed for I_DIRTY_INODE, not
2394                 * for just I_DIRTY_PAGES or I_DIRTY_TIME.
2395                 */
2396                trace_writeback_dirty_inode_start(inode, flags);
2397                if (sb->s_op->dirty_inode)
2398                        sb->s_op->dirty_inode(inode, flags & I_DIRTY_INODE);
2399                trace_writeback_dirty_inode(inode, flags);
2400
2401                /* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
2402                flags &= ~I_DIRTY_TIME;
2403        } else {
2404                /*
2405                 * Else it's either I_DIRTY_PAGES, I_DIRTY_TIME, or nothing.
2406                 * (We don't support setting both I_DIRTY_PAGES and I_DIRTY_TIME
2407                 * in one call to __mark_inode_dirty().)
2408                 */
2409                dirtytime = flags & I_DIRTY_TIME;
2410                WARN_ON_ONCE(dirtytime && flags != I_DIRTY_TIME);
2411        }
2412
2413        /*
2414         * Paired with smp_mb() in __writeback_single_inode() for the
2415         * following lockless i_state test.  See there for details.
2416         */
2417        smp_mb();
2418
2419        if (((inode->i_state & flags) == flags) ||
2420            (dirtytime && (inode->i_state & I_DIRTY_INODE)))
2421                return;
2422
2423        spin_lock(&inode->i_lock);
2424        if (dirtytime && (inode->i_state & I_DIRTY_INODE))
2425                goto out_unlock_inode;
2426        if ((inode->i_state & flags) != flags) {
2427                const int was_dirty = inode->i_state & I_DIRTY;
2428
2429                inode_attach_wb(inode, NULL);
2430
2431                /* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
2432                if (flags & I_DIRTY_INODE)
2433                        inode->i_state &= ~I_DIRTY_TIME;
2434                inode->i_state |= flags;
2435
2436                /*
2437                 * If the inode is queued for writeback by flush worker, just
2438                 * update its dirty state. Once the flush worker is done with
2439                 * the inode it will place it on the appropriate superblock
2440                 * list, based upon its state.
2441                 */
2442                if (inode->i_state & I_SYNC_QUEUED)
2443                        goto out_unlock_inode;
2444
2445                /*
2446                 * Only add valid (hashed) inodes to the superblock's
2447                 * dirty list.  Add blockdev inodes as well.
2448                 */
2449                if (!S_ISBLK(inode->i_mode)) {
2450                        if (inode_unhashed(inode))
2451                                goto out_unlock_inode;
2452                }
2453                if (inode->i_state & I_FREEING)
2454                        goto out_unlock_inode;
2455
2456                /*
2457                 * If the inode was already on b_dirty/b_io/b_more_io, don't
2458                 * reposition it (that would break b_dirty time-ordering).
2459                 */
2460                if (!was_dirty) {
2461                        struct bdi_writeback *wb;
2462                        struct list_head *dirty_list;
2463                        bool wakeup_bdi = false;
2464
2465                        wb = locked_inode_to_wb_and_lock_list(inode);
2466
2467                        inode->dirtied_when = jiffies;
2468                        if (dirtytime)
2469                                inode->dirtied_time_when = jiffies;
2470
2471                        if (inode->i_state & I_DIRTY)
2472                                dirty_list = &wb->b_dirty;
2473                        else
2474                                dirty_list = &wb->b_dirty_time;
2475
2476                        wakeup_bdi = inode_io_list_move_locked(inode, wb,
2477                                                               dirty_list);
2478
2479                        spin_unlock(&wb->list_lock);
2480                        trace_writeback_dirty_inode_enqueue(inode);
2481
2482                        /*
2483                         * If this is the first dirty inode for this bdi,
2484                         * we have to wake-up the corresponding bdi thread
2485                         * to make sure background write-back happens
2486                         * later.
2487                         */
2488                        if (wakeup_bdi &&
2489                            (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
2490                                wb_wakeup_delayed(wb);
2491                        return;
2492                }
2493        }
2494out_unlock_inode:
2495        spin_unlock(&inode->i_lock);
2496}
2497EXPORT_SYMBOL(__mark_inode_dirty);
2498
2499/*
2500 * The @s_sync_lock is used to serialise concurrent sync operations
2501 * to avoid lock contention problems with concurrent wait_sb_inodes() calls.
2502 * Concurrent callers will block on the s_sync_lock rather than doing contending
2503 * walks. The queueing maintains sync(2) required behaviour as all the IO that
2504 * has been issued up to the time this function is enter is guaranteed to be
2505 * completed by the time we have gained the lock and waited for all IO that is
2506 * in progress regardless of the order callers are granted the lock.
2507 */
2508static void wait_sb_inodes(struct super_block *sb)
2509{
2510        LIST_HEAD(sync_list);
2511
2512        /*
2513         * We need to be protected against the filesystem going from
2514         * r/o to r/w or vice versa.
2515         */
2516        WARN_ON(!rwsem_is_locked(&sb->s_umount));
2517
2518        mutex_lock(&sb->s_sync_lock);
2519
2520        /*
2521         * Splice the writeback list onto a temporary list to avoid waiting on
2522         * inodes that have started writeback after this point.
2523         *
2524         * Use rcu_read_lock() to keep the inodes around until we have a
2525         * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as
2526         * the local list because inodes can be dropped from either by writeback
2527         * completion.
2528         */
2529        rcu_read_lock();
2530        spin_lock_irq(&sb->s_inode_wblist_lock);
2531        list_splice_init(&sb->s_inodes_wb, &sync_list);
2532
2533        /*
2534         * Data integrity sync. Must wait for all pages under writeback, because
2535         * there may have been pages dirtied before our sync call, but which had
2536         * writeout started before we write it out.  In which case, the inode
2537         * may not be on the dirty list, but we still have to wait for that
2538         * writeout.
2539         */
2540        while (!list_empty(&sync_list)) {
2541                struct inode *inode = list_first_entry(&sync_list, struct inode,
2542                                                       i_wb_list);
2543                struct address_space *mapping = inode->i_mapping;
2544
2545                /*
2546                 * Move each inode back to the wb list before we drop the lock
2547                 * to preserve consistency between i_wb_list and the mapping
2548                 * writeback tag. Writeback completion is responsible to remove
2549                 * the inode from either list once the writeback tag is cleared.
2550                 */
2551                list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb);
2552
2553                /*
2554                 * The mapping can appear untagged while still on-list since we
2555                 * do not have the mapping lock. Skip it here, wb completion
2556                 * will remove it.
2557                 */
2558                if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
2559                        continue;
2560
2561                spin_unlock_irq(&sb->s_inode_wblist_lock);
2562
2563                spin_lock(&inode->i_lock);
2564                if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
2565                        spin_unlock(&inode->i_lock);
2566
2567                        spin_lock_irq(&sb->s_inode_wblist_lock);
2568                        continue;
2569                }
2570                __iget(inode);
2571                spin_unlock(&inode->i_lock);
2572                rcu_read_unlock();
2573
2574                /*
2575                 * We keep the error status of individual mapping so that
2576                 * applications can catch the writeback error using fsync(2).
2577                 * See filemap_fdatawait_keep_errors() for details.
2578                 */
2579                filemap_fdatawait_keep_errors(mapping);
2580
2581                cond_resched();
2582
2583                iput(inode);
2584
2585                rcu_read_lock();
2586                spin_lock_irq(&sb->s_inode_wblist_lock);
2587        }
2588        spin_unlock_irq(&sb->s_inode_wblist_lock);
2589        rcu_read_unlock();
2590        mutex_unlock(&sb->s_sync_lock);
2591}
2592
2593static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
2594                                     enum wb_reason reason, bool skip_if_busy)
2595{
2596        struct backing_dev_info *bdi = sb->s_bdi;
2597        DEFINE_WB_COMPLETION(done, bdi);
2598        struct wb_writeback_work work = {
2599                .sb                     = sb,
2600                .sync_mode              = WB_SYNC_NONE,
2601                .tagged_writepages      = 1,
2602                .done                   = &done,
2603                .nr_pages               = nr,
2604                .reason                 = reason,
2605        };
2606
2607        if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
2608                return;
2609        WARN_ON(!rwsem_is_locked(&sb->s_umount));
2610
2611        bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
2612        wb_wait_for_completion(&done);
2613}
2614
2615/**
2616 * writeback_inodes_sb_nr -     writeback dirty inodes from given super_block
2617 * @sb: the superblock
2618 * @nr: the number of pages to write
2619 * @reason: reason why some writeback work initiated
2620 *
2621 * Start writeback on some inodes on this super_block. No guarantees are made
2622 * on how many (if any) will be written, and this function does not wait
2623 * for IO completion of submitted IO.
2624 */
2625void writeback_inodes_sb_nr(struct super_block *sb,
2626                            unsigned long nr,
2627                            enum wb_reason reason)
2628{
2629        __writeback_inodes_sb_nr(sb, nr, reason, false);
2630}
2631EXPORT_SYMBOL(writeback_inodes_sb_nr);
2632
2633/**
2634 * writeback_inodes_sb  -       writeback dirty inodes from given super_block
2635 * @sb: the superblock
2636 * @reason: reason why some writeback work was initiated
2637 *
2638 * Start writeback on some inodes on this super_block. No guarantees are made
2639 * on how many (if any) will be written, and this function does not wait
2640 * for IO completion of submitted IO.
2641 */
2642void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2643{
2644        return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
2645}
2646EXPORT_SYMBOL(writeback_inodes_sb);
2647
2648/**
2649 * try_to_writeback_inodes_sb - try to start writeback if none underway
2650 * @sb: the superblock
2651 * @reason: reason why some writeback work was initiated
2652 *
2653 * Invoke __writeback_inodes_sb_nr if no writeback is currently underway.
2654 */
2655void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2656{
2657        if (!down_read_trylock(&sb->s_umount))
2658                return;
2659
2660        __writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason, true);
2661        up_read(&sb->s_umount);
2662}
2663EXPORT_SYMBOL(try_to_writeback_inodes_sb);
2664
2665/**
2666 * sync_inodes_sb       -       sync sb inode pages
2667 * @sb: the superblock
2668 *
2669 * This function writes and waits on any dirty inode belonging to this
2670 * super_block.
2671 */
2672void sync_inodes_sb(struct super_block *sb)
2673{
2674        struct backing_dev_info *bdi = sb->s_bdi;
2675        DEFINE_WB_COMPLETION(done, bdi);
2676        struct wb_writeback_work work = {
2677                .sb             = sb,
2678                .sync_mode      = WB_SYNC_ALL,
2679                .nr_pages       = LONG_MAX,
2680                .range_cyclic   = 0,
2681                .done           = &done,
2682                .reason         = WB_REASON_SYNC,
2683                .for_sync       = 1,
2684        };
2685
2686        /*
2687         * Can't skip on !bdi_has_dirty() because we should wait for !dirty
2688         * inodes under writeback and I_DIRTY_TIME inodes ignored by
2689         * bdi_has_dirty() need to be written out too.
2690         */
2691        if (bdi == &noop_backing_dev_info)
2692                return;
2693        WARN_ON(!rwsem_is_locked(&sb->s_umount));
2694
2695        /* protect against inode wb switch, see inode_switch_wbs_work_fn() */
2696        bdi_down_write_wb_switch_rwsem(bdi);
2697        bdi_split_work_to_wbs(bdi, &work, false);
2698        wb_wait_for_completion(&done);
2699        bdi_up_write_wb_switch_rwsem(bdi);
2700
2701        wait_sb_inodes(sb);
2702}
2703EXPORT_SYMBOL(sync_inodes_sb);
2704
2705/**
2706 * write_inode_now      -       write an inode to disk
2707 * @inode: inode to write to disk
2708 * @sync: whether the write should be synchronous or not
2709 *
2710 * This function commits an inode to disk immediately if it is dirty. This is
2711 * primarily needed by knfsd.
2712 *
2713 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
2714 */
2715int write_inode_now(struct inode *inode, int sync)
2716{
2717        struct writeback_control wbc = {
2718                .nr_to_write = LONG_MAX,
2719                .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
2720                .range_start = 0,
2721                .range_end = LLONG_MAX,
2722        };
2723
2724        if (!mapping_can_writeback(inode->i_mapping))
2725                wbc.nr_to_write = 0;
2726
2727        might_sleep();
2728        return writeback_single_inode(inode, &wbc);
2729}
2730EXPORT_SYMBOL(write_inode_now);
2731
2732/**
2733 * sync_inode_metadata - write an inode to disk
2734 * @inode: the inode to sync
2735 * @wait: wait for I/O to complete.
2736 *
2737 * Write an inode to disk and adjust its dirty state after completion.
2738 *
2739 * Note: only writes the actual inode, no associated data or other metadata.
2740 */
2741int sync_inode_metadata(struct inode *inode, int wait)
2742{
2743        struct writeback_control wbc = {
2744                .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
2745                .nr_to_write = 0, /* metadata-only */
2746        };
2747
2748        return writeback_single_inode(inode, &wbc);
2749}
2750EXPORT_SYMBOL(sync_inode_metadata);
2751