linux/mm/backing-dev.c
<<
>>
Prefs
   1
   2#include <linux/wait.h>
   3#include <linux/backing-dev.h>
   4#include <linux/kthread.h>
   5#include <linux/freezer.h>
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
   8#include <linux/mm.h>
   9#include <linux/sched.h>
  10#include <linux/module.h>
  11#include <linux/writeback.h>
  12#include <linux/device.h>
  13#include <trace/events/writeback.h>
  14
  15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
  16
  17struct backing_dev_info noop_backing_dev_info = {
  18        .name           = "noop",
  19        .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
  20};
  21EXPORT_SYMBOL_GPL(noop_backing_dev_info);
  22
  23static struct class *bdi_class;
  24
  25/*
  26 * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
  27 * locking.
  28 */
  29DEFINE_SPINLOCK(bdi_lock);
  30LIST_HEAD(bdi_list);
  31
  32/* bdi_wq serves all asynchronous writeback tasks */
  33struct workqueue_struct *bdi_wq;
  34
  35#ifdef CONFIG_DEBUG_FS
  36#include <linux/debugfs.h>
  37#include <linux/seq_file.h>
  38
  39static struct dentry *bdi_debug_root;
  40
  41static void bdi_debug_init(void)
  42{
  43        bdi_debug_root = debugfs_create_dir("bdi", NULL);
  44}
  45
  46static int bdi_debug_stats_show(struct seq_file *m, void *v)
  47{
  48        struct backing_dev_info *bdi = m->private;
  49        struct bdi_writeback *wb = &bdi->wb;
  50        unsigned long background_thresh;
  51        unsigned long dirty_thresh;
  52        unsigned long wb_thresh;
  53        unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
  54        struct inode *inode;
  55
  56        nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
  57        spin_lock(&wb->list_lock);
  58        list_for_each_entry(inode, &wb->b_dirty, i_io_list)
  59                nr_dirty++;
  60        list_for_each_entry(inode, &wb->b_io, i_io_list)
  61                nr_io++;
  62        list_for_each_entry(inode, &wb->b_more_io, i_io_list)
  63                nr_more_io++;
  64        list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
  65                if (inode->i_state & I_DIRTY_TIME)
  66                        nr_dirty_time++;
  67        spin_unlock(&wb->list_lock);
  68
  69        global_dirty_limits(&background_thresh, &dirty_thresh);
  70        wb_thresh = wb_calc_thresh(wb, dirty_thresh);
  71
  72#define K(x) ((x) << (PAGE_SHIFT - 10))
  73        seq_printf(m,
  74                   "BdiWriteback:       %10lu kB\n"
  75                   "BdiReclaimable:     %10lu kB\n"
  76                   "BdiDirtyThresh:     %10lu kB\n"
  77                   "DirtyThresh:        %10lu kB\n"
  78                   "BackgroundThresh:   %10lu kB\n"
  79                   "BdiDirtied:         %10lu kB\n"
  80                   "BdiWritten:         %10lu kB\n"
  81                   "BdiWriteBandwidth:  %10lu kBps\n"
  82                   "b_dirty:            %10lu\n"
  83                   "b_io:               %10lu\n"
  84                   "b_more_io:          %10lu\n"
  85                   "b_dirty_time:       %10lu\n"
  86                   "bdi_list:           %10u\n"
  87                   "state:              %10lx\n",
  88                   (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
  89                   (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
  90                   K(wb_thresh),
  91                   K(dirty_thresh),
  92                   K(background_thresh),
  93                   (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
  94                   (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
  95                   (unsigned long) K(wb->write_bandwidth),
  96                   nr_dirty,
  97                   nr_io,
  98                   nr_more_io,
  99                   nr_dirty_time,
 100                   !list_empty(&bdi->bdi_list), bdi->wb.state);
 101#undef K
 102
 103        return 0;
 104}
 105
 106static int bdi_debug_stats_open(struct inode *inode, struct file *file)
 107{
 108        return single_open(file, bdi_debug_stats_show, inode->i_private);
 109}
 110
 111static const struct file_operations bdi_debug_stats_fops = {
 112        .open           = bdi_debug_stats_open,
 113        .read           = seq_read,
 114        .llseek         = seq_lseek,
 115        .release        = single_release,
 116};
 117
 118static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
 119{
 120        bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
 121        bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
 122                                               bdi, &bdi_debug_stats_fops);
 123}
 124
 125static void bdi_debug_unregister(struct backing_dev_info *bdi)
 126{
 127        debugfs_remove(bdi->debug_stats);
 128        debugfs_remove(bdi->debug_dir);
 129}
 130#else
 131static inline void bdi_debug_init(void)
 132{
 133}
 134static inline void bdi_debug_register(struct backing_dev_info *bdi,
 135                                      const char *name)
 136{
 137}
 138static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
 139{
 140}
 141#endif
 142
 143static ssize_t read_ahead_kb_store(struct device *dev,
 144                                  struct device_attribute *attr,
 145                                  const char *buf, size_t count)
 146{
 147        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 148        unsigned long read_ahead_kb;
 149        ssize_t ret;
 150
 151        ret = kstrtoul(buf, 10, &read_ahead_kb);
 152        if (ret < 0)
 153                return ret;
 154
 155        bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
 156
 157        return count;
 158}
 159
 160#define K(pages) ((pages) << (PAGE_SHIFT - 10))
 161
 162#define BDI_SHOW(name, expr)                                            \
 163static ssize_t name##_show(struct device *dev,                          \
 164                           struct device_attribute *attr, char *page)   \
 165{                                                                       \
 166        struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
 167                                                                        \
 168        return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
 169}                                                                       \
 170static DEVICE_ATTR_RW(name);
 171
 172BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
 173
 174static ssize_t min_ratio_store(struct device *dev,
 175                struct device_attribute *attr, const char *buf, size_t count)
 176{
 177        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 178        unsigned int ratio;
 179        ssize_t ret;
 180
 181        ret = kstrtouint(buf, 10, &ratio);
 182        if (ret < 0)
 183                return ret;
 184
 185        ret = bdi_set_min_ratio(bdi, ratio);
 186        if (!ret)
 187                ret = count;
 188
 189        return ret;
 190}
 191BDI_SHOW(min_ratio, bdi->min_ratio)
 192
 193static ssize_t max_ratio_store(struct device *dev,
 194                struct device_attribute *attr, const char *buf, size_t count)
 195{
 196        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 197        unsigned int ratio;
 198        ssize_t ret;
 199
 200        ret = kstrtouint(buf, 10, &ratio);
 201        if (ret < 0)
 202                return ret;
 203
 204        ret = bdi_set_max_ratio(bdi, ratio);
 205        if (!ret)
 206                ret = count;
 207
 208        return ret;
 209}
 210BDI_SHOW(max_ratio, bdi->max_ratio)
 211
 212static ssize_t stable_pages_required_show(struct device *dev,
 213                                          struct device_attribute *attr,
 214                                          char *page)
 215{
 216        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 217
 218        return snprintf(page, PAGE_SIZE-1, "%d\n",
 219                        bdi_cap_stable_pages_required(bdi) ? 1 : 0);
 220}
 221static DEVICE_ATTR_RO(stable_pages_required);
 222
 223static struct attribute *bdi_dev_attrs[] = {
 224        &dev_attr_read_ahead_kb.attr,
 225        &dev_attr_min_ratio.attr,
 226        &dev_attr_max_ratio.attr,
 227        &dev_attr_stable_pages_required.attr,
 228        NULL,
 229};
 230ATTRIBUTE_GROUPS(bdi_dev);
 231
 232static __init int bdi_class_init(void)
 233{
 234        bdi_class = class_create(THIS_MODULE, "bdi");
 235        if (IS_ERR(bdi_class))
 236                return PTR_ERR(bdi_class);
 237
 238        bdi_class->dev_groups = bdi_dev_groups;
 239        bdi_debug_init();
 240        return 0;
 241}
 242postcore_initcall(bdi_class_init);
 243
 244static int __init default_bdi_init(void)
 245{
 246        int err;
 247
 248        bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
 249                                              WQ_UNBOUND | WQ_SYSFS, 0);
 250        if (!bdi_wq)
 251                return -ENOMEM;
 252
 253        err = bdi_init(&noop_backing_dev_info);
 254
 255        return err;
 256}
 257subsys_initcall(default_bdi_init);
 258
 259/*
 260 * This function is used when the first inode for this wb is marked dirty. It
 261 * wakes-up the corresponding bdi thread which should then take care of the
 262 * periodic background write-out of dirty inodes. Since the write-out would
 263 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
 264 * set up a timer which wakes the bdi thread up later.
 265 *
 266 * Note, we wouldn't bother setting up the timer, but this function is on the
 267 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
 268 * by delaying the wake-up.
 269 *
 270 * We have to be careful not to postpone flush work if it is scheduled for
 271 * earlier. Thus we use queue_delayed_work().
 272 */
 273void wb_wakeup_delayed(struct bdi_writeback *wb)
 274{
 275        unsigned long timeout;
 276
 277        timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
 278        spin_lock_bh(&wb->work_lock);
 279        if (test_bit(WB_registered, &wb->state))
 280                queue_delayed_work(bdi_wq, &wb->dwork, timeout);
 281        spin_unlock_bh(&wb->work_lock);
 282}
 283
 284/*
 285 * Initial write bandwidth: 100 MB/s
 286 */
 287#define INIT_BW         (100 << (20 - PAGE_SHIFT))
 288
 289static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
 290                   int blkcg_id, gfp_t gfp)
 291{
 292        int i, err;
 293
 294        memset(wb, 0, sizeof(*wb));
 295
 296        wb->bdi = bdi;
 297        wb->last_old_flush = jiffies;
 298        INIT_LIST_HEAD(&wb->b_dirty);
 299        INIT_LIST_HEAD(&wb->b_io);
 300        INIT_LIST_HEAD(&wb->b_more_io);
 301        INIT_LIST_HEAD(&wb->b_dirty_time);
 302        spin_lock_init(&wb->list_lock);
 303
 304        wb->bw_time_stamp = jiffies;
 305        wb->balanced_dirty_ratelimit = INIT_BW;
 306        wb->dirty_ratelimit = INIT_BW;
 307        wb->write_bandwidth = INIT_BW;
 308        wb->avg_write_bandwidth = INIT_BW;
 309
 310        spin_lock_init(&wb->work_lock);
 311        INIT_LIST_HEAD(&wb->work_list);
 312        INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
 313
 314        wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
 315        if (!wb->congested)
 316                return -ENOMEM;
 317
 318        err = fprop_local_init_percpu(&wb->completions, gfp);
 319        if (err)
 320                goto out_put_cong;
 321
 322        for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
 323                err = percpu_counter_init(&wb->stat[i], 0, gfp);
 324                if (err)
 325                        goto out_destroy_stat;
 326        }
 327
 328        return 0;
 329
 330out_destroy_stat:
 331        while (i--)
 332                percpu_counter_destroy(&wb->stat[i]);
 333        fprop_local_destroy_percpu(&wb->completions);
 334out_put_cong:
 335        wb_congested_put(wb->congested);
 336        return err;
 337}
 338
 339/*
 340 * Remove bdi from the global list and shutdown any threads we have running
 341 */
 342static void wb_shutdown(struct bdi_writeback *wb)
 343{
 344        /* Make sure nobody queues further work */
 345        spin_lock_bh(&wb->work_lock);
 346        if (!test_and_clear_bit(WB_registered, &wb->state)) {
 347                spin_unlock_bh(&wb->work_lock);
 348                return;
 349        }
 350        spin_unlock_bh(&wb->work_lock);
 351
 352        /*
 353         * Drain work list and shutdown the delayed_work.  !WB_registered
 354         * tells wb_workfn() that @wb is dying and its work_list needs to
 355         * be drained no matter what.
 356         */
 357        mod_delayed_work(bdi_wq, &wb->dwork, 0);
 358        flush_delayed_work(&wb->dwork);
 359        WARN_ON(!list_empty(&wb->work_list));
 360}
 361
 362static void wb_exit(struct bdi_writeback *wb)
 363{
 364        int i;
 365
 366        WARN_ON(delayed_work_pending(&wb->dwork));
 367
 368        for (i = 0; i < NR_WB_STAT_ITEMS; i++)
 369                percpu_counter_destroy(&wb->stat[i]);
 370
 371        fprop_local_destroy_percpu(&wb->completions);
 372        wb_congested_put(wb->congested);
 373}
 374
 375#ifdef CONFIG_CGROUP_WRITEBACK
 376
 377#include <linux/memcontrol.h>
 378
 379/*
 380 * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
 381 * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
 382 * protected.  cgwb_release_wait is used to wait for the completion of cgwb
 383 * releases from bdi destruction path.
 384 */
 385static DEFINE_SPINLOCK(cgwb_lock);
 386static DECLARE_WAIT_QUEUE_HEAD(cgwb_release_wait);
 387
 388/**
 389 * wb_congested_get_create - get or create a wb_congested
 390 * @bdi: associated bdi
 391 * @blkcg_id: ID of the associated blkcg
 392 * @gfp: allocation mask
 393 *
 394 * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
 395 * The returned wb_congested has its reference count incremented.  Returns
 396 * NULL on failure.
 397 */
 398struct bdi_writeback_congested *
 399wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
 400{
 401        struct bdi_writeback_congested *new_congested = NULL, *congested;
 402        struct rb_node **node, *parent;
 403        unsigned long flags;
 404retry:
 405        spin_lock_irqsave(&cgwb_lock, flags);
 406
 407        node = &bdi->cgwb_congested_tree.rb_node;
 408        parent = NULL;
 409
 410        while (*node != NULL) {
 411                parent = *node;
 412                congested = container_of(parent, struct bdi_writeback_congested,
 413                                         rb_node);
 414                if (congested->blkcg_id < blkcg_id)
 415                        node = &parent->rb_left;
 416                else if (congested->blkcg_id > blkcg_id)
 417                        node = &parent->rb_right;
 418                else
 419                        goto found;
 420        }
 421
 422        if (new_congested) {
 423                /* !found and storage for new one already allocated, insert */
 424                congested = new_congested;
 425                new_congested = NULL;
 426                rb_link_node(&congested->rb_node, parent, node);
 427                rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
 428                goto found;
 429        }
 430
 431        spin_unlock_irqrestore(&cgwb_lock, flags);
 432
 433        /* allocate storage for new one and retry */
 434        new_congested = kzalloc(sizeof(*new_congested), gfp);
 435        if (!new_congested)
 436                return NULL;
 437
 438        atomic_set(&new_congested->refcnt, 0);
 439        new_congested->bdi = bdi;
 440        new_congested->blkcg_id = blkcg_id;
 441        goto retry;
 442
 443found:
 444        atomic_inc(&congested->refcnt);
 445        spin_unlock_irqrestore(&cgwb_lock, flags);
 446        kfree(new_congested);
 447        return congested;
 448}
 449
 450/**
 451 * wb_congested_put - put a wb_congested
 452 * @congested: wb_congested to put
 453 *
 454 * Put @congested and destroy it if the refcnt reaches zero.
 455 */
 456void wb_congested_put(struct bdi_writeback_congested *congested)
 457{
 458        unsigned long flags;
 459
 460        local_irq_save(flags);
 461        if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
 462                local_irq_restore(flags);
 463                return;
 464        }
 465
 466        /* bdi might already have been destroyed leaving @congested unlinked */
 467        if (congested->bdi) {
 468                rb_erase(&congested->rb_node,
 469                         &congested->bdi->cgwb_congested_tree);
 470                congested->bdi = NULL;
 471        }
 472
 473        spin_unlock_irqrestore(&cgwb_lock, flags);
 474        kfree(congested);
 475}
 476
 477static void cgwb_release_workfn(struct work_struct *work)
 478{
 479        struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
 480                                                release_work);
 481        struct backing_dev_info *bdi = wb->bdi;
 482
 483        spin_lock_irq(&cgwb_lock);
 484        list_del_rcu(&wb->bdi_node);
 485        spin_unlock_irq(&cgwb_lock);
 486
 487        wb_shutdown(wb);
 488
 489        css_put(wb->memcg_css);
 490        css_put(wb->blkcg_css);
 491
 492        fprop_local_destroy_percpu(&wb->memcg_completions);
 493        percpu_ref_exit(&wb->refcnt);
 494        wb_exit(wb);
 495        kfree_rcu(wb, rcu);
 496
 497        if (atomic_dec_and_test(&bdi->usage_cnt))
 498                wake_up_all(&cgwb_release_wait);
 499}
 500
 501static void cgwb_release(struct percpu_ref *refcnt)
 502{
 503        struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
 504                                                refcnt);
 505        schedule_work(&wb->release_work);
 506}
 507
 508static void cgwb_kill(struct bdi_writeback *wb)
 509{
 510        lockdep_assert_held(&cgwb_lock);
 511
 512        WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
 513        list_del(&wb->memcg_node);
 514        list_del(&wb->blkcg_node);
 515        percpu_ref_kill(&wb->refcnt);
 516}
 517
 518static int cgwb_create(struct backing_dev_info *bdi,
 519                       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
 520{
 521        struct mem_cgroup *memcg;
 522        struct cgroup_subsys_state *blkcg_css;
 523        struct blkcg *blkcg;
 524        struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
 525        struct bdi_writeback *wb;
 526        unsigned long flags;
 527        int ret = 0;
 528
 529        memcg = mem_cgroup_from_css(memcg_css);
 530        blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 531        blkcg = css_to_blkcg(blkcg_css);
 532        memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
 533        blkcg_cgwb_list = &blkcg->cgwb_list;
 534
 535        /* look up again under lock and discard on blkcg mismatch */
 536        spin_lock_irqsave(&cgwb_lock, flags);
 537        wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 538        if (wb && wb->blkcg_css != blkcg_css) {
 539                cgwb_kill(wb);
 540                wb = NULL;
 541        }
 542        spin_unlock_irqrestore(&cgwb_lock, flags);
 543        if (wb)
 544                goto out_put;
 545
 546        /* need to create a new one */
 547        wb = kmalloc(sizeof(*wb), gfp);
 548        if (!wb)
 549                return -ENOMEM;
 550
 551        ret = wb_init(wb, bdi, blkcg_css->id, gfp);
 552        if (ret)
 553                goto err_free;
 554
 555        ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
 556        if (ret)
 557                goto err_wb_exit;
 558
 559        ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
 560        if (ret)
 561                goto err_ref_exit;
 562
 563        wb->memcg_css = memcg_css;
 564        wb->blkcg_css = blkcg_css;
 565        INIT_WORK(&wb->release_work, cgwb_release_workfn);
 566        set_bit(WB_registered, &wb->state);
 567
 568        /*
 569         * The root wb determines the registered state of the whole bdi and
 570         * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
 571         * whether they're still online.  Don't link @wb if any is dead.
 572         * See wb_memcg_offline() and wb_blkcg_offline().
 573         */
 574        ret = -ENODEV;
 575        spin_lock_irqsave(&cgwb_lock, flags);
 576        if (test_bit(WB_registered, &bdi->wb.state) &&
 577            blkcg_cgwb_list->next && memcg_cgwb_list->next) {
 578                /* we might have raced another instance of this function */
 579                ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
 580                if (!ret) {
 581                        atomic_inc(&bdi->usage_cnt);
 582                        list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
 583                        list_add(&wb->memcg_node, memcg_cgwb_list);
 584                        list_add(&wb->blkcg_node, blkcg_cgwb_list);
 585                        css_get(memcg_css);
 586                        css_get(blkcg_css);
 587                }
 588        }
 589        spin_unlock_irqrestore(&cgwb_lock, flags);
 590        if (ret) {
 591                if (ret == -EEXIST)
 592                        ret = 0;
 593                goto err_fprop_exit;
 594        }
 595        goto out_put;
 596
 597err_fprop_exit:
 598        fprop_local_destroy_percpu(&wb->memcg_completions);
 599err_ref_exit:
 600        percpu_ref_exit(&wb->refcnt);
 601err_wb_exit:
 602        wb_exit(wb);
 603err_free:
 604        kfree(wb);
 605out_put:
 606        css_put(blkcg_css);
 607        return ret;
 608}
 609
 610/**
 611 * wb_get_create - get wb for a given memcg, create if necessary
 612 * @bdi: target bdi
 613 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 614 * @gfp: allocation mask to use
 615 *
 616 * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
 617 * create one.  The returned wb has its refcount incremented.
 618 *
 619 * This function uses css_get() on @memcg_css and thus expects its refcnt
 620 * to be positive on invocation.  IOW, rcu_read_lock() protection on
 621 * @memcg_css isn't enough.  try_get it before calling this function.
 622 *
 623 * A wb is keyed by its associated memcg.  As blkcg implicitly enables
 624 * memcg on the default hierarchy, memcg association is guaranteed to be
 625 * more specific (equal or descendant to the associated blkcg) and thus can
 626 * identify both the memcg and blkcg associations.
 627 *
 628 * Because the blkcg associated with a memcg may change as blkcg is enabled
 629 * and disabled closer to root in the hierarchy, each wb keeps track of
 630 * both the memcg and blkcg associated with it and verifies the blkcg on
 631 * each lookup.  On mismatch, the existing wb is discarded and a new one is
 632 * created.
 633 */
 634struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
 635                                    struct cgroup_subsys_state *memcg_css,
 636                                    gfp_t gfp)
 637{
 638        struct bdi_writeback *wb;
 639
 640        might_sleep_if(gfpflags_allow_blocking(gfp));
 641
 642        if (!memcg_css->parent)
 643                return &bdi->wb;
 644
 645        do {
 646                rcu_read_lock();
 647                wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 648                if (wb) {
 649                        struct cgroup_subsys_state *blkcg_css;
 650
 651                        /* see whether the blkcg association has changed */
 652                        blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
 653                                                     &io_cgrp_subsys);
 654                        if (unlikely(wb->blkcg_css != blkcg_css ||
 655                                     !wb_tryget(wb)))
 656                                wb = NULL;
 657                        css_put(blkcg_css);
 658                }
 659                rcu_read_unlock();
 660        } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
 661
 662        return wb;
 663}
 664
 665static int cgwb_bdi_init(struct backing_dev_info *bdi)
 666{
 667        int ret;
 668
 669        INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
 670        bdi->cgwb_congested_tree = RB_ROOT;
 671        atomic_set(&bdi->usage_cnt, 1);
 672
 673        ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 674        if (!ret) {
 675                bdi->wb.memcg_css = &root_mem_cgroup->css;
 676                bdi->wb.blkcg_css = blkcg_root_css;
 677        }
 678        return ret;
 679}
 680
 681static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
 682{
 683        struct radix_tree_iter iter;
 684        struct rb_node *rbn;
 685        void **slot;
 686
 687        WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 688
 689        spin_lock_irq(&cgwb_lock);
 690
 691        radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
 692                cgwb_kill(*slot);
 693
 694        while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
 695                struct bdi_writeback_congested *congested =
 696                        rb_entry(rbn, struct bdi_writeback_congested, rb_node);
 697
 698                rb_erase(rbn, &bdi->cgwb_congested_tree);
 699                congested->bdi = NULL;  /* mark @congested unlinked */
 700        }
 701
 702        spin_unlock_irq(&cgwb_lock);
 703
 704        /*
 705         * All cgwb's and their congested states must be shutdown and
 706         * released before returning.  Drain the usage counter to wait for
 707         * all cgwb's and cgwb_congested's ever created on @bdi.
 708         */
 709        atomic_dec(&bdi->usage_cnt);
 710        wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
 711}
 712
 713/**
 714 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
 715 * @memcg: memcg being offlined
 716 *
 717 * Also prevents creation of any new wb's associated with @memcg.
 718 */
 719void wb_memcg_offline(struct mem_cgroup *memcg)
 720{
 721        LIST_HEAD(to_destroy);
 722        struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
 723        struct bdi_writeback *wb, *next;
 724
 725        spin_lock_irq(&cgwb_lock);
 726        list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
 727                cgwb_kill(wb);
 728        memcg_cgwb_list->next = NULL;   /* prevent new wb's */
 729        spin_unlock_irq(&cgwb_lock);
 730}
 731
 732/**
 733 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
 734 * @blkcg: blkcg being offlined
 735 *
 736 * Also prevents creation of any new wb's associated with @blkcg.
 737 */
 738void wb_blkcg_offline(struct blkcg *blkcg)
 739{
 740        LIST_HEAD(to_destroy);
 741        struct bdi_writeback *wb, *next;
 742
 743        spin_lock_irq(&cgwb_lock);
 744        list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
 745                cgwb_kill(wb);
 746        blkcg->cgwb_list.next = NULL;   /* prevent new wb's */
 747        spin_unlock_irq(&cgwb_lock);
 748}
 749
 750#else   /* CONFIG_CGROUP_WRITEBACK */
 751
 752static int cgwb_bdi_init(struct backing_dev_info *bdi)
 753{
 754        int err;
 755
 756        bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
 757        if (!bdi->wb_congested)
 758                return -ENOMEM;
 759
 760        err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 761        if (err) {
 762                kfree(bdi->wb_congested);
 763                return err;
 764        }
 765        return 0;
 766}
 767
 768static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
 769
 770#endif  /* CONFIG_CGROUP_WRITEBACK */
 771
 772int bdi_init(struct backing_dev_info *bdi)
 773{
 774        int ret;
 775
 776        bdi->dev = NULL;
 777
 778        bdi->min_ratio = 0;
 779        bdi->max_ratio = 100;
 780        bdi->max_prop_frac = FPROP_FRAC_BASE;
 781        INIT_LIST_HEAD(&bdi->bdi_list);
 782        INIT_LIST_HEAD(&bdi->wb_list);
 783        init_waitqueue_head(&bdi->wb_waitq);
 784
 785        ret = cgwb_bdi_init(bdi);
 786
 787        list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 788
 789        return ret;
 790}
 791EXPORT_SYMBOL(bdi_init);
 792
 793int bdi_register(struct backing_dev_info *bdi, struct device *parent,
 794                const char *fmt, ...)
 795{
 796        va_list args;
 797        struct device *dev;
 798
 799        if (bdi->dev)   /* The driver needs to use separate queues per device */
 800                return 0;
 801
 802        va_start(args, fmt);
 803        dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
 804        va_end(args);
 805        if (IS_ERR(dev))
 806                return PTR_ERR(dev);
 807
 808        bdi->dev = dev;
 809
 810        bdi_debug_register(bdi, dev_name(dev));
 811        set_bit(WB_registered, &bdi->wb.state);
 812
 813        spin_lock_bh(&bdi_lock);
 814        list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
 815        spin_unlock_bh(&bdi_lock);
 816
 817        trace_writeback_bdi_register(bdi);
 818        return 0;
 819}
 820EXPORT_SYMBOL(bdi_register);
 821
 822int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
 823{
 824        return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
 825}
 826EXPORT_SYMBOL(bdi_register_dev);
 827
 828/*
 829 * Remove bdi from bdi_list, and ensure that it is no longer visible
 830 */
 831static void bdi_remove_from_list(struct backing_dev_info *bdi)
 832{
 833        spin_lock_bh(&bdi_lock);
 834        list_del_rcu(&bdi->bdi_list);
 835        spin_unlock_bh(&bdi_lock);
 836
 837        synchronize_rcu_expedited();
 838}
 839
 840void bdi_unregister(struct backing_dev_info *bdi)
 841{
 842        /* make sure nobody finds us on the bdi_list anymore */
 843        bdi_remove_from_list(bdi);
 844        wb_shutdown(&bdi->wb);
 845        cgwb_bdi_destroy(bdi);
 846
 847        if (bdi->dev) {
 848                bdi_debug_unregister(bdi);
 849                device_unregister(bdi->dev);
 850                bdi->dev = NULL;
 851        }
 852}
 853
 854void bdi_exit(struct backing_dev_info *bdi)
 855{
 856        WARN_ON_ONCE(bdi->dev);
 857        wb_exit(&bdi->wb);
 858}
 859
 860void bdi_destroy(struct backing_dev_info *bdi)
 861{
 862        bdi_unregister(bdi);
 863        bdi_exit(bdi);
 864}
 865EXPORT_SYMBOL(bdi_destroy);
 866
 867/*
 868 * For use from filesystems to quickly init and register a bdi associated
 869 * with dirty writeback
 870 */
 871int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
 872{
 873        int err;
 874
 875        bdi->name = name;
 876        bdi->capabilities = 0;
 877        err = bdi_init(bdi);
 878        if (err)
 879                return err;
 880
 881        err = bdi_register(bdi, NULL, "%.28s-%ld", name,
 882                           atomic_long_inc_return(&bdi_seq));
 883        if (err) {
 884                bdi_destroy(bdi);
 885                return err;
 886        }
 887
 888        return 0;
 889}
 890EXPORT_SYMBOL(bdi_setup_and_register);
 891
 892static wait_queue_head_t congestion_wqh[2] = {
 893                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
 894                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
 895        };
 896static atomic_t nr_wb_congested[2];
 897
 898void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
 899{
 900        wait_queue_head_t *wqh = &congestion_wqh[sync];
 901        enum wb_congested_state bit;
 902
 903        bit = sync ? WB_sync_congested : WB_async_congested;
 904        if (test_and_clear_bit(bit, &congested->state))
 905                atomic_dec(&nr_wb_congested[sync]);
 906        smp_mb__after_atomic();
 907        if (waitqueue_active(wqh))
 908                wake_up(wqh);
 909}
 910EXPORT_SYMBOL(clear_wb_congested);
 911
 912void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
 913{
 914        enum wb_congested_state bit;
 915
 916        bit = sync ? WB_sync_congested : WB_async_congested;
 917        if (!test_and_set_bit(bit, &congested->state))
 918                atomic_inc(&nr_wb_congested[sync]);
 919}
 920EXPORT_SYMBOL(set_wb_congested);
 921
 922/**
 923 * congestion_wait - wait for a backing_dev to become uncongested
 924 * @sync: SYNC or ASYNC IO
 925 * @timeout: timeout in jiffies
 926 *
 927 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
 928 * write congestion.  If no backing_devs are congested then just wait for the
 929 * next write to be completed.
 930 */
 931long congestion_wait(int sync, long timeout)
 932{
 933        long ret;
 934        unsigned long start = jiffies;
 935        DEFINE_WAIT(wait);
 936        wait_queue_head_t *wqh = &congestion_wqh[sync];
 937
 938        prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
 939        ret = io_schedule_timeout(timeout);
 940        finish_wait(wqh, &wait);
 941
 942        trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
 943                                        jiffies_to_usecs(jiffies - start));
 944
 945        return ret;
 946}
 947EXPORT_SYMBOL(congestion_wait);
 948
 949/**
 950 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
 951 * @zone: A zone to check if it is heavily congested
 952 * @sync: SYNC or ASYNC IO
 953 * @timeout: timeout in jiffies
 954 *
 955 * In the event of a congested backing_dev (any backing_dev) and the given
 956 * @zone has experienced recent congestion, this waits for up to @timeout
 957 * jiffies for either a BDI to exit congestion of the given @sync queue
 958 * or a write to complete.
 959 *
 960 * In the absence of zone congestion, a short sleep or a cond_resched is
 961 * performed to yield the processor and to allow other subsystems to make
 962 * a forward progress.
 963 *
 964 * The return value is 0 if the sleep is for the full timeout. Otherwise,
 965 * it is the number of jiffies that were still remaining when the function
 966 * returned. return_value == timeout implies the function did not sleep.
 967 */
 968long wait_iff_congested(struct zone *zone, int sync, long timeout)
 969{
 970        long ret;
 971        unsigned long start = jiffies;
 972        DEFINE_WAIT(wait);
 973        wait_queue_head_t *wqh = &congestion_wqh[sync];
 974
 975        /*
 976         * If there is no congestion, or heavy congestion is not being
 977         * encountered in the current zone, yield if necessary instead
 978         * of sleeping on the congestion queue
 979         */
 980        if (atomic_read(&nr_wb_congested[sync]) == 0 ||
 981            !test_bit(ZONE_CONGESTED, &zone->flags)) {
 982
 983                /*
 984                 * Memory allocation/reclaim might be called from a WQ
 985                 * context and the current implementation of the WQ
 986                 * concurrency control doesn't recognize that a particular
 987                 * WQ is congested if the worker thread is looping without
 988                 * ever sleeping. Therefore we have to do a short sleep
 989                 * here rather than calling cond_resched().
 990                 */
 991                if (current->flags & PF_WQ_WORKER)
 992                        schedule_timeout_uninterruptible(1);
 993                else
 994                        cond_resched();
 995
 996                /* In case we scheduled, work out time remaining */
 997                ret = timeout - (jiffies - start);
 998                if (ret < 0)
 999                        ret = 0;
1000
1001                goto out;
1002        }
1003
1004        /* Sleep until uncongested or a write happens */
1005        prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1006        ret = io_schedule_timeout(timeout);
1007        finish_wait(wqh, &wait);
1008
1009out:
1010        trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1011                                        jiffies_to_usecs(jiffies - start));
1012
1013        return ret;
1014}
1015EXPORT_SYMBOL(wait_iff_congested);
1016
1017int pdflush_proc_obsolete(struct ctl_table *table, int write,
1018                        void __user *buffer, size_t *lenp, loff_t *ppos)
1019{
1020        char kbuf[] = "0\n";
1021
1022        if (*ppos || *lenp < sizeof(kbuf)) {
1023                *lenp = 0;
1024                return 0;
1025        }
1026
1027        if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
1028                return -EFAULT;
1029        pr_warn_once("%s exported in /proc is scheduled for removal\n",
1030                     table->procname);
1031
1032        *lenp = 2;
1033        *ppos += *lenp;
1034        return 2;
1035}
1036