linux/mm/backing-dev.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2
   3#include <linux/wait.h>
   4#include <linux/rbtree.h>
   5#include <linux/backing-dev.h>
   6#include <linux/kthread.h>
   7#include <linux/freezer.h>
   8#include <linux/fs.h>
   9#include <linux/pagemap.h>
  10#include <linux/mm.h>
  11#include <linux/sched.h>
  12#include <linux/module.h>
  13#include <linux/writeback.h>
  14#include <linux/device.h>
  15#include <trace/events/writeback.h>
  16
  17struct backing_dev_info noop_backing_dev_info = {
  18        .name           = "noop",
  19        .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
  20};
  21EXPORT_SYMBOL_GPL(noop_backing_dev_info);
  22
  23static struct class *bdi_class;
  24
  25/*
  26 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
  27 * reader side locking.
  28 */
  29DEFINE_SPINLOCK(bdi_lock);
  30static u64 bdi_id_cursor;
  31static struct rb_root bdi_tree = RB_ROOT;
  32LIST_HEAD(bdi_list);
  33
  34/* bdi_wq serves all asynchronous writeback tasks */
  35struct workqueue_struct *bdi_wq;
  36
  37#ifdef CONFIG_DEBUG_FS
  38#include <linux/debugfs.h>
  39#include <linux/seq_file.h>
  40
  41static struct dentry *bdi_debug_root;
  42
  43static void bdi_debug_init(void)
  44{
  45        bdi_debug_root = debugfs_create_dir("bdi", NULL);
  46}
  47
  48static int bdi_debug_stats_show(struct seq_file *m, void *v)
  49{
  50        struct backing_dev_info *bdi = m->private;
  51        struct bdi_writeback *wb = &bdi->wb;
  52        unsigned long background_thresh;
  53        unsigned long dirty_thresh;
  54        unsigned long wb_thresh;
  55        unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
  56        struct inode *inode;
  57
  58        nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
  59        spin_lock(&wb->list_lock);
  60        list_for_each_entry(inode, &wb->b_dirty, i_io_list)
  61                nr_dirty++;
  62        list_for_each_entry(inode, &wb->b_io, i_io_list)
  63                nr_io++;
  64        list_for_each_entry(inode, &wb->b_more_io, i_io_list)
  65                nr_more_io++;
  66        list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
  67                if (inode->i_state & I_DIRTY_TIME)
  68                        nr_dirty_time++;
  69        spin_unlock(&wb->list_lock);
  70
  71        global_dirty_limits(&background_thresh, &dirty_thresh);
  72        wb_thresh = wb_calc_thresh(wb, dirty_thresh);
  73
  74#define K(x) ((x) << (PAGE_SHIFT - 10))
  75        seq_printf(m,
  76                   "BdiWriteback:       %10lu kB\n"
  77                   "BdiReclaimable:     %10lu kB\n"
  78                   "BdiDirtyThresh:     %10lu kB\n"
  79                   "DirtyThresh:        %10lu kB\n"
  80                   "BackgroundThresh:   %10lu kB\n"
  81                   "BdiDirtied:         %10lu kB\n"
  82                   "BdiWritten:         %10lu kB\n"
  83                   "BdiWriteBandwidth:  %10lu kBps\n"
  84                   "b_dirty:            %10lu\n"
  85                   "b_io:               %10lu\n"
  86                   "b_more_io:          %10lu\n"
  87                   "b_dirty_time:       %10lu\n"
  88                   "bdi_list:           %10u\n"
  89                   "state:              %10lx\n",
  90                   (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
  91                   (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
  92                   K(wb_thresh),
  93                   K(dirty_thresh),
  94                   K(background_thresh),
  95                   (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
  96                   (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
  97                   (unsigned long) K(wb->write_bandwidth),
  98                   nr_dirty,
  99                   nr_io,
 100                   nr_more_io,
 101                   nr_dirty_time,
 102                   !list_empty(&bdi->bdi_list), bdi->wb.state);
 103#undef K
 104
 105        return 0;
 106}
 107DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
 108
 109static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
 110{
 111        bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
 112
 113        debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
 114                            &bdi_debug_stats_fops);
 115}
 116
 117static void bdi_debug_unregister(struct backing_dev_info *bdi)
 118{
 119        debugfs_remove_recursive(bdi->debug_dir);
 120}
 121#else
 122static inline void bdi_debug_init(void)
 123{
 124}
 125static inline void bdi_debug_register(struct backing_dev_info *bdi,
 126                                      const char *name)
 127{
 128}
 129static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
 130{
 131}
 132#endif
 133
 134static ssize_t read_ahead_kb_store(struct device *dev,
 135                                  struct device_attribute *attr,
 136                                  const char *buf, size_t count)
 137{
 138        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 139        unsigned long read_ahead_kb;
 140        ssize_t ret;
 141
 142        ret = kstrtoul(buf, 10, &read_ahead_kb);
 143        if (ret < 0)
 144                return ret;
 145
 146        bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
 147
 148        return count;
 149}
 150
 151#define K(pages) ((pages) << (PAGE_SHIFT - 10))
 152
 153#define BDI_SHOW(name, expr)                                            \
 154static ssize_t name##_show(struct device *dev,                          \
 155                           struct device_attribute *attr, char *page)   \
 156{                                                                       \
 157        struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
 158                                                                        \
 159        return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
 160}                                                                       \
 161static DEVICE_ATTR_RW(name);
 162
 163BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
 164
 165static ssize_t min_ratio_store(struct device *dev,
 166                struct device_attribute *attr, const char *buf, size_t count)
 167{
 168        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 169        unsigned int ratio;
 170        ssize_t ret;
 171
 172        ret = kstrtouint(buf, 10, &ratio);
 173        if (ret < 0)
 174                return ret;
 175
 176        ret = bdi_set_min_ratio(bdi, ratio);
 177        if (!ret)
 178                ret = count;
 179
 180        return ret;
 181}
 182BDI_SHOW(min_ratio, bdi->min_ratio)
 183
 184static ssize_t max_ratio_store(struct device *dev,
 185                struct device_attribute *attr, const char *buf, size_t count)
 186{
 187        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 188        unsigned int ratio;
 189        ssize_t ret;
 190
 191        ret = kstrtouint(buf, 10, &ratio);
 192        if (ret < 0)
 193                return ret;
 194
 195        ret = bdi_set_max_ratio(bdi, ratio);
 196        if (!ret)
 197                ret = count;
 198
 199        return ret;
 200}
 201BDI_SHOW(max_ratio, bdi->max_ratio)
 202
 203static ssize_t stable_pages_required_show(struct device *dev,
 204                                          struct device_attribute *attr,
 205                                          char *page)
 206{
 207        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 208
 209        return snprintf(page, PAGE_SIZE-1, "%d\n",
 210                        bdi_cap_stable_pages_required(bdi) ? 1 : 0);
 211}
 212static DEVICE_ATTR_RO(stable_pages_required);
 213
 214static struct attribute *bdi_dev_attrs[] = {
 215        &dev_attr_read_ahead_kb.attr,
 216        &dev_attr_min_ratio.attr,
 217        &dev_attr_max_ratio.attr,
 218        &dev_attr_stable_pages_required.attr,
 219        NULL,
 220};
 221ATTRIBUTE_GROUPS(bdi_dev);
 222
 223static __init int bdi_class_init(void)
 224{
 225        bdi_class = class_create(THIS_MODULE, "bdi");
 226        if (IS_ERR(bdi_class))
 227                return PTR_ERR(bdi_class);
 228
 229        bdi_class->dev_groups = bdi_dev_groups;
 230        bdi_debug_init();
 231
 232        return 0;
 233}
 234postcore_initcall(bdi_class_init);
 235
 236static int bdi_init(struct backing_dev_info *bdi);
 237
 238static int __init default_bdi_init(void)
 239{
 240        int err;
 241
 242        bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
 243                                 WQ_SYSFS, 0);
 244        if (!bdi_wq)
 245                return -ENOMEM;
 246
 247        err = bdi_init(&noop_backing_dev_info);
 248
 249        return err;
 250}
 251subsys_initcall(default_bdi_init);
 252
 253/*
 254 * This function is used when the first inode for this wb is marked dirty. It
 255 * wakes-up the corresponding bdi thread which should then take care of the
 256 * periodic background write-out of dirty inodes. Since the write-out would
 257 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
 258 * set up a timer which wakes the bdi thread up later.
 259 *
 260 * Note, we wouldn't bother setting up the timer, but this function is on the
 261 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
 262 * by delaying the wake-up.
 263 *
 264 * We have to be careful not to postpone flush work if it is scheduled for
 265 * earlier. Thus we use queue_delayed_work().
 266 */
 267void wb_wakeup_delayed(struct bdi_writeback *wb)
 268{
 269        unsigned long timeout;
 270
 271        timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
 272        spin_lock_bh(&wb->work_lock);
 273        if (test_bit(WB_registered, &wb->state))
 274                queue_delayed_work(bdi_wq, &wb->dwork, timeout);
 275        spin_unlock_bh(&wb->work_lock);
 276}
 277
 278/*
 279 * Initial write bandwidth: 100 MB/s
 280 */
 281#define INIT_BW         (100 << (20 - PAGE_SHIFT))
 282
 283static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
 284                   int blkcg_id, gfp_t gfp)
 285{
 286        int i, err;
 287
 288        memset(wb, 0, sizeof(*wb));
 289
 290        if (wb != &bdi->wb)
 291                bdi_get(bdi);
 292        wb->bdi = bdi;
 293        wb->last_old_flush = jiffies;
 294        INIT_LIST_HEAD(&wb->b_dirty);
 295        INIT_LIST_HEAD(&wb->b_io);
 296        INIT_LIST_HEAD(&wb->b_more_io);
 297        INIT_LIST_HEAD(&wb->b_dirty_time);
 298        spin_lock_init(&wb->list_lock);
 299
 300        wb->bw_time_stamp = jiffies;
 301        wb->balanced_dirty_ratelimit = INIT_BW;
 302        wb->dirty_ratelimit = INIT_BW;
 303        wb->write_bandwidth = INIT_BW;
 304        wb->avg_write_bandwidth = INIT_BW;
 305
 306        spin_lock_init(&wb->work_lock);
 307        INIT_LIST_HEAD(&wb->work_list);
 308        INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
 309        wb->dirty_sleep = jiffies;
 310
 311        wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
 312        if (!wb->congested) {
 313                err = -ENOMEM;
 314                goto out_put_bdi;
 315        }
 316
 317        err = fprop_local_init_percpu(&wb->completions, gfp);
 318        if (err)
 319                goto out_put_cong;
 320
 321        for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
 322                err = percpu_counter_init(&wb->stat[i], 0, gfp);
 323                if (err)
 324                        goto out_destroy_stat;
 325        }
 326
 327        return 0;
 328
 329out_destroy_stat:
 330        while (i--)
 331                percpu_counter_destroy(&wb->stat[i]);
 332        fprop_local_destroy_percpu(&wb->completions);
 333out_put_cong:
 334        wb_congested_put(wb->congested);
 335out_put_bdi:
 336        if (wb != &bdi->wb)
 337                bdi_put(bdi);
 338        return err;
 339}
 340
 341static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
 342
 343/*
 344 * Remove bdi from the global list and shutdown any threads we have running
 345 */
 346static void wb_shutdown(struct bdi_writeback *wb)
 347{
 348        /* Make sure nobody queues further work */
 349        spin_lock_bh(&wb->work_lock);
 350        if (!test_and_clear_bit(WB_registered, &wb->state)) {
 351                spin_unlock_bh(&wb->work_lock);
 352                return;
 353        }
 354        spin_unlock_bh(&wb->work_lock);
 355
 356        cgwb_remove_from_bdi_list(wb);
 357        /*
 358         * Drain work list and shutdown the delayed_work.  !WB_registered
 359         * tells wb_workfn() that @wb is dying and its work_list needs to
 360         * be drained no matter what.
 361         */
 362        mod_delayed_work(bdi_wq, &wb->dwork, 0);
 363        flush_delayed_work(&wb->dwork);
 364        WARN_ON(!list_empty(&wb->work_list));
 365}
 366
 367static void wb_exit(struct bdi_writeback *wb)
 368{
 369        int i;
 370
 371        WARN_ON(delayed_work_pending(&wb->dwork));
 372
 373        for (i = 0; i < NR_WB_STAT_ITEMS; i++)
 374                percpu_counter_destroy(&wb->stat[i]);
 375
 376        fprop_local_destroy_percpu(&wb->completions);
 377        wb_congested_put(wb->congested);
 378        if (wb != &wb->bdi->wb)
 379                bdi_put(wb->bdi);
 380}
 381
 382#ifdef CONFIG_CGROUP_WRITEBACK
 383
 384#include <linux/memcontrol.h>
 385
 386/*
 387 * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
 388 * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
 389 * protected.
 390 */
 391static DEFINE_SPINLOCK(cgwb_lock);
 392static struct workqueue_struct *cgwb_release_wq;
 393
 394/**
 395 * wb_congested_get_create - get or create a wb_congested
 396 * @bdi: associated bdi
 397 * @blkcg_id: ID of the associated blkcg
 398 * @gfp: allocation mask
 399 *
 400 * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
 401 * The returned wb_congested has its reference count incremented.  Returns
 402 * NULL on failure.
 403 */
 404struct bdi_writeback_congested *
 405wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
 406{
 407        struct bdi_writeback_congested *new_congested = NULL, *congested;
 408        struct rb_node **node, *parent;
 409        unsigned long flags;
 410retry:
 411        spin_lock_irqsave(&cgwb_lock, flags);
 412
 413        node = &bdi->cgwb_congested_tree.rb_node;
 414        parent = NULL;
 415
 416        while (*node != NULL) {
 417                parent = *node;
 418                congested = rb_entry(parent, struct bdi_writeback_congested,
 419                                     rb_node);
 420                if (congested->blkcg_id < blkcg_id)
 421                        node = &parent->rb_left;
 422                else if (congested->blkcg_id > blkcg_id)
 423                        node = &parent->rb_right;
 424                else
 425                        goto found;
 426        }
 427
 428        if (new_congested) {
 429                /* !found and storage for new one already allocated, insert */
 430                congested = new_congested;
 431                rb_link_node(&congested->rb_node, parent, node);
 432                rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
 433                spin_unlock_irqrestore(&cgwb_lock, flags);
 434                return congested;
 435        }
 436
 437        spin_unlock_irqrestore(&cgwb_lock, flags);
 438
 439        /* allocate storage for new one and retry */
 440        new_congested = kzalloc(sizeof(*new_congested), gfp);
 441        if (!new_congested)
 442                return NULL;
 443
 444        refcount_set(&new_congested->refcnt, 1);
 445        new_congested->__bdi = bdi;
 446        new_congested->blkcg_id = blkcg_id;
 447        goto retry;
 448
 449found:
 450        refcount_inc(&congested->refcnt);
 451        spin_unlock_irqrestore(&cgwb_lock, flags);
 452        kfree(new_congested);
 453        return congested;
 454}
 455
 456/**
 457 * wb_congested_put - put a wb_congested
 458 * @congested: wb_congested to put
 459 *
 460 * Put @congested and destroy it if the refcnt reaches zero.
 461 */
 462void wb_congested_put(struct bdi_writeback_congested *congested)
 463{
 464        unsigned long flags;
 465
 466        if (!refcount_dec_and_lock_irqsave(&congested->refcnt, &cgwb_lock, &flags))
 467                return;
 468
 469        /* bdi might already have been destroyed leaving @congested unlinked */
 470        if (congested->__bdi) {
 471                rb_erase(&congested->rb_node,
 472                         &congested->__bdi->cgwb_congested_tree);
 473                congested->__bdi = NULL;
 474        }
 475
 476        spin_unlock_irqrestore(&cgwb_lock, flags);
 477        kfree(congested);
 478}
 479
 480static void cgwb_release_workfn(struct work_struct *work)
 481{
 482        struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
 483                                                release_work);
 484        struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
 485
 486        mutex_lock(&wb->bdi->cgwb_release_mutex);
 487        wb_shutdown(wb);
 488
 489        css_put(wb->memcg_css);
 490        css_put(wb->blkcg_css);
 491        mutex_unlock(&wb->bdi->cgwb_release_mutex);
 492
 493        /* triggers blkg destruction if cgwb_refcnt becomes zero */
 494        blkcg_cgwb_put(blkcg);
 495
 496        fprop_local_destroy_percpu(&wb->memcg_completions);
 497        percpu_ref_exit(&wb->refcnt);
 498        wb_exit(wb);
 499        kfree_rcu(wb, rcu);
 500}
 501
 502static void cgwb_release(struct percpu_ref *refcnt)
 503{
 504        struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
 505                                                refcnt);
 506        queue_work(cgwb_release_wq, &wb->release_work);
 507}
 508
 509static void cgwb_kill(struct bdi_writeback *wb)
 510{
 511        lockdep_assert_held(&cgwb_lock);
 512
 513        WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
 514        list_del(&wb->memcg_node);
 515        list_del(&wb->blkcg_node);
 516        percpu_ref_kill(&wb->refcnt);
 517}
 518
 519static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 520{
 521        spin_lock_irq(&cgwb_lock);
 522        list_del_rcu(&wb->bdi_node);
 523        spin_unlock_irq(&cgwb_lock);
 524}
 525
 526static int cgwb_create(struct backing_dev_info *bdi,
 527                       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
 528{
 529        struct mem_cgroup *memcg;
 530        struct cgroup_subsys_state *blkcg_css;
 531        struct blkcg *blkcg;
 532        struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
 533        struct bdi_writeback *wb;
 534        unsigned long flags;
 535        int ret = 0;
 536
 537        memcg = mem_cgroup_from_css(memcg_css);
 538        blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 539        blkcg = css_to_blkcg(blkcg_css);
 540        memcg_cgwb_list = &memcg->cgwb_list;
 541        blkcg_cgwb_list = &blkcg->cgwb_list;
 542
 543        /* look up again under lock and discard on blkcg mismatch */
 544        spin_lock_irqsave(&cgwb_lock, flags);
 545        wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 546        if (wb && wb->blkcg_css != blkcg_css) {
 547                cgwb_kill(wb);
 548                wb = NULL;
 549        }
 550        spin_unlock_irqrestore(&cgwb_lock, flags);
 551        if (wb)
 552                goto out_put;
 553
 554        /* need to create a new one */
 555        wb = kmalloc(sizeof(*wb), gfp);
 556        if (!wb) {
 557                ret = -ENOMEM;
 558                goto out_put;
 559        }
 560
 561        ret = wb_init(wb, bdi, blkcg_css->id, gfp);
 562        if (ret)
 563                goto err_free;
 564
 565        ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
 566        if (ret)
 567                goto err_wb_exit;
 568
 569        ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
 570        if (ret)
 571                goto err_ref_exit;
 572
 573        wb->memcg_css = memcg_css;
 574        wb->blkcg_css = blkcg_css;
 575        INIT_WORK(&wb->release_work, cgwb_release_workfn);
 576        set_bit(WB_registered, &wb->state);
 577
 578        /*
 579         * The root wb determines the registered state of the whole bdi and
 580         * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
 581         * whether they're still online.  Don't link @wb if any is dead.
 582         * See wb_memcg_offline() and wb_blkcg_offline().
 583         */
 584        ret = -ENODEV;
 585        spin_lock_irqsave(&cgwb_lock, flags);
 586        if (test_bit(WB_registered, &bdi->wb.state) &&
 587            blkcg_cgwb_list->next && memcg_cgwb_list->next) {
 588                /* we might have raced another instance of this function */
 589                ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
 590                if (!ret) {
 591                        list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
 592                        list_add(&wb->memcg_node, memcg_cgwb_list);
 593                        list_add(&wb->blkcg_node, blkcg_cgwb_list);
 594                        blkcg_cgwb_get(blkcg);
 595                        css_get(memcg_css);
 596                        css_get(blkcg_css);
 597                }
 598        }
 599        spin_unlock_irqrestore(&cgwb_lock, flags);
 600        if (ret) {
 601                if (ret == -EEXIST)
 602                        ret = 0;
 603                goto err_fprop_exit;
 604        }
 605        goto out_put;
 606
 607err_fprop_exit:
 608        fprop_local_destroy_percpu(&wb->memcg_completions);
 609err_ref_exit:
 610        percpu_ref_exit(&wb->refcnt);
 611err_wb_exit:
 612        wb_exit(wb);
 613err_free:
 614        kfree(wb);
 615out_put:
 616        css_put(blkcg_css);
 617        return ret;
 618}
 619
 620/**
 621 * wb_get_lookup - get wb for a given memcg
 622 * @bdi: target bdi
 623 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 624 *
 625 * Try to get the wb for @memcg_css on @bdi.  The returned wb has its
 626 * refcount incremented.
 627 *
 628 * This function uses css_get() on @memcg_css and thus expects its refcnt
 629 * to be positive on invocation.  IOW, rcu_read_lock() protection on
 630 * @memcg_css isn't enough.  try_get it before calling this function.
 631 *
 632 * A wb is keyed by its associated memcg.  As blkcg implicitly enables
 633 * memcg on the default hierarchy, memcg association is guaranteed to be
 634 * more specific (equal or descendant to the associated blkcg) and thus can
 635 * identify both the memcg and blkcg associations.
 636 *
 637 * Because the blkcg associated with a memcg may change as blkcg is enabled
 638 * and disabled closer to root in the hierarchy, each wb keeps track of
 639 * both the memcg and blkcg associated with it and verifies the blkcg on
 640 * each lookup.  On mismatch, the existing wb is discarded and a new one is
 641 * created.
 642 */
 643struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
 644                                    struct cgroup_subsys_state *memcg_css)
 645{
 646        struct bdi_writeback *wb;
 647
 648        if (!memcg_css->parent)
 649                return &bdi->wb;
 650
 651        rcu_read_lock();
 652        wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 653        if (wb) {
 654                struct cgroup_subsys_state *blkcg_css;
 655
 656                /* see whether the blkcg association has changed */
 657                blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 658                if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb)))
 659                        wb = NULL;
 660                css_put(blkcg_css);
 661        }
 662        rcu_read_unlock();
 663
 664        return wb;
 665}
 666
 667/**
 668 * wb_get_create - get wb for a given memcg, create if necessary
 669 * @bdi: target bdi
 670 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 671 * @gfp: allocation mask to use
 672 *
 673 * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
 674 * create one.  See wb_get_lookup() for more details.
 675 */
 676struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
 677                                    struct cgroup_subsys_state *memcg_css,
 678                                    gfp_t gfp)
 679{
 680        struct bdi_writeback *wb;
 681
 682        might_sleep_if(gfpflags_allow_blocking(gfp));
 683
 684        if (!memcg_css->parent)
 685                return &bdi->wb;
 686
 687        do {
 688                wb = wb_get_lookup(bdi, memcg_css);
 689        } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
 690
 691        return wb;
 692}
 693
 694static int cgwb_bdi_init(struct backing_dev_info *bdi)
 695{
 696        int ret;
 697
 698        INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
 699        bdi->cgwb_congested_tree = RB_ROOT;
 700        mutex_init(&bdi->cgwb_release_mutex);
 701        init_rwsem(&bdi->wb_switch_rwsem);
 702
 703        ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 704        if (!ret) {
 705                bdi->wb.memcg_css = &root_mem_cgroup->css;
 706                bdi->wb.blkcg_css = blkcg_root_css;
 707        }
 708        return ret;
 709}
 710
 711static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
 712{
 713        struct radix_tree_iter iter;
 714        void **slot;
 715        struct bdi_writeback *wb;
 716
 717        WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 718
 719        spin_lock_irq(&cgwb_lock);
 720        radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
 721                cgwb_kill(*slot);
 722        spin_unlock_irq(&cgwb_lock);
 723
 724        mutex_lock(&bdi->cgwb_release_mutex);
 725        spin_lock_irq(&cgwb_lock);
 726        while (!list_empty(&bdi->wb_list)) {
 727                wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
 728                                      bdi_node);
 729                spin_unlock_irq(&cgwb_lock);
 730                wb_shutdown(wb);
 731                spin_lock_irq(&cgwb_lock);
 732        }
 733        spin_unlock_irq(&cgwb_lock);
 734        mutex_unlock(&bdi->cgwb_release_mutex);
 735}
 736
 737/**
 738 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
 739 * @memcg: memcg being offlined
 740 *
 741 * Also prevents creation of any new wb's associated with @memcg.
 742 */
 743void wb_memcg_offline(struct mem_cgroup *memcg)
 744{
 745        struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
 746        struct bdi_writeback *wb, *next;
 747
 748        spin_lock_irq(&cgwb_lock);
 749        list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
 750                cgwb_kill(wb);
 751        memcg_cgwb_list->next = NULL;   /* prevent new wb's */
 752        spin_unlock_irq(&cgwb_lock);
 753}
 754
 755/**
 756 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
 757 * @blkcg: blkcg being offlined
 758 *
 759 * Also prevents creation of any new wb's associated with @blkcg.
 760 */
 761void wb_blkcg_offline(struct blkcg *blkcg)
 762{
 763        struct bdi_writeback *wb, *next;
 764
 765        spin_lock_irq(&cgwb_lock);
 766        list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
 767                cgwb_kill(wb);
 768        blkcg->cgwb_list.next = NULL;   /* prevent new wb's */
 769        spin_unlock_irq(&cgwb_lock);
 770}
 771
 772static void cgwb_bdi_exit(struct backing_dev_info *bdi)
 773{
 774        struct rb_node *rbn;
 775
 776        spin_lock_irq(&cgwb_lock);
 777        while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
 778                struct bdi_writeback_congested *congested =
 779                        rb_entry(rbn, struct bdi_writeback_congested, rb_node);
 780
 781                rb_erase(rbn, &bdi->cgwb_congested_tree);
 782                congested->__bdi = NULL;        /* mark @congested unlinked */
 783        }
 784        spin_unlock_irq(&cgwb_lock);
 785}
 786
 787static void cgwb_bdi_register(struct backing_dev_info *bdi)
 788{
 789        spin_lock_irq(&cgwb_lock);
 790        list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 791        spin_unlock_irq(&cgwb_lock);
 792}
 793
 794static int __init cgwb_init(void)
 795{
 796        /*
 797         * There can be many concurrent release work items overwhelming
 798         * system_wq.  Put them in a separate wq and limit concurrency.
 799         * There's no point in executing many of these in parallel.
 800         */
 801        cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
 802        if (!cgwb_release_wq)
 803                return -ENOMEM;
 804
 805        return 0;
 806}
 807subsys_initcall(cgwb_init);
 808
 809#else   /* CONFIG_CGROUP_WRITEBACK */
 810
 811static int cgwb_bdi_init(struct backing_dev_info *bdi)
 812{
 813        int err;
 814
 815        bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
 816        if (!bdi->wb_congested)
 817                return -ENOMEM;
 818
 819        refcount_set(&bdi->wb_congested->refcnt, 1);
 820
 821        err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 822        if (err) {
 823                wb_congested_put(bdi->wb_congested);
 824                return err;
 825        }
 826        return 0;
 827}
 828
 829static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
 830
 831static void cgwb_bdi_exit(struct backing_dev_info *bdi)
 832{
 833        wb_congested_put(bdi->wb_congested);
 834}
 835
 836static void cgwb_bdi_register(struct backing_dev_info *bdi)
 837{
 838        list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 839}
 840
 841static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 842{
 843        list_del_rcu(&wb->bdi_node);
 844}
 845
 846#endif  /* CONFIG_CGROUP_WRITEBACK */
 847
 848static int bdi_init(struct backing_dev_info *bdi)
 849{
 850        int ret;
 851
 852        bdi->dev = NULL;
 853
 854        kref_init(&bdi->refcnt);
 855        bdi->min_ratio = 0;
 856        bdi->max_ratio = 100;
 857        bdi->max_prop_frac = FPROP_FRAC_BASE;
 858        INIT_LIST_HEAD(&bdi->bdi_list);
 859        INIT_LIST_HEAD(&bdi->wb_list);
 860        init_waitqueue_head(&bdi->wb_waitq);
 861
 862        ret = cgwb_bdi_init(bdi);
 863
 864        return ret;
 865}
 866
 867struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
 868{
 869        struct backing_dev_info *bdi;
 870
 871        bdi = kmalloc_node(sizeof(struct backing_dev_info),
 872                           gfp_mask | __GFP_ZERO, node_id);
 873        if (!bdi)
 874                return NULL;
 875
 876        if (bdi_init(bdi)) {
 877                kfree(bdi);
 878                return NULL;
 879        }
 880        return bdi;
 881}
 882EXPORT_SYMBOL(bdi_alloc_node);
 883
 884static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
 885{
 886        struct rb_node **p = &bdi_tree.rb_node;
 887        struct rb_node *parent = NULL;
 888        struct backing_dev_info *bdi;
 889
 890        lockdep_assert_held(&bdi_lock);
 891
 892        while (*p) {
 893                parent = *p;
 894                bdi = rb_entry(parent, struct backing_dev_info, rb_node);
 895
 896                if (bdi->id > id)
 897                        p = &(*p)->rb_left;
 898                else if (bdi->id < id)
 899                        p = &(*p)->rb_right;
 900                else
 901                        break;
 902        }
 903
 904        if (parentp)
 905                *parentp = parent;
 906        return p;
 907}
 908
 909/**
 910 * bdi_get_by_id - lookup and get bdi from its id
 911 * @id: bdi id to lookup
 912 *
 913 * Find bdi matching @id and get it.  Returns NULL if the matching bdi
 914 * doesn't exist or is already unregistered.
 915 */
 916struct backing_dev_info *bdi_get_by_id(u64 id)
 917{
 918        struct backing_dev_info *bdi = NULL;
 919        struct rb_node **p;
 920
 921        spin_lock_bh(&bdi_lock);
 922        p = bdi_lookup_rb_node(id, NULL);
 923        if (*p) {
 924                bdi = rb_entry(*p, struct backing_dev_info, rb_node);
 925                bdi_get(bdi);
 926        }
 927        spin_unlock_bh(&bdi_lock);
 928
 929        return bdi;
 930}
 931
 932int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
 933{
 934        struct device *dev;
 935        struct rb_node *parent, **p;
 936
 937        if (bdi->dev)   /* The driver needs to use separate queues per device */
 938                return 0;
 939
 940        dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
 941        if (IS_ERR(dev))
 942                return PTR_ERR(dev);
 943
 944        cgwb_bdi_register(bdi);
 945        bdi->dev = dev;
 946
 947        bdi_debug_register(bdi, dev_name(dev));
 948        set_bit(WB_registered, &bdi->wb.state);
 949
 950        spin_lock_bh(&bdi_lock);
 951
 952        bdi->id = ++bdi_id_cursor;
 953
 954        p = bdi_lookup_rb_node(bdi->id, &parent);
 955        rb_link_node(&bdi->rb_node, parent, p);
 956        rb_insert_color(&bdi->rb_node, &bdi_tree);
 957
 958        list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
 959
 960        spin_unlock_bh(&bdi_lock);
 961
 962        trace_writeback_bdi_register(bdi);
 963        return 0;
 964}
 965EXPORT_SYMBOL(bdi_register_va);
 966
 967int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
 968{
 969        va_list args;
 970        int ret;
 971
 972        va_start(args, fmt);
 973        ret = bdi_register_va(bdi, fmt, args);
 974        va_end(args);
 975        return ret;
 976}
 977EXPORT_SYMBOL(bdi_register);
 978
 979int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
 980{
 981        int rc;
 982
 983        rc = bdi_register(bdi, "%u:%u", MAJOR(owner->devt), MINOR(owner->devt));
 984        if (rc)
 985                return rc;
 986        /* Leaking owner reference... */
 987        WARN_ON(bdi->owner);
 988        bdi->owner = owner;
 989        get_device(owner);
 990        return 0;
 991}
 992EXPORT_SYMBOL(bdi_register_owner);
 993
 994/*
 995 * Remove bdi from bdi_list, and ensure that it is no longer visible
 996 */
 997static void bdi_remove_from_list(struct backing_dev_info *bdi)
 998{
 999        spin_lock_bh(&bdi_lock);
1000        rb_erase(&bdi->rb_node, &bdi_tree);
1001        list_del_rcu(&bdi->bdi_list);
1002        spin_unlock_bh(&bdi_lock);
1003
1004        synchronize_rcu_expedited();
1005}
1006
1007void bdi_unregister(struct backing_dev_info *bdi)
1008{
1009        /* make sure nobody finds us on the bdi_list anymore */
1010        bdi_remove_from_list(bdi);
1011        wb_shutdown(&bdi->wb);
1012        cgwb_bdi_unregister(bdi);
1013
1014        if (bdi->dev) {
1015                bdi_debug_unregister(bdi);
1016                device_unregister(bdi->dev);
1017                bdi->dev = NULL;
1018        }
1019
1020        if (bdi->owner) {
1021                put_device(bdi->owner);
1022                bdi->owner = NULL;
1023        }
1024}
1025
1026static void release_bdi(struct kref *ref)
1027{
1028        struct backing_dev_info *bdi =
1029                        container_of(ref, struct backing_dev_info, refcnt);
1030
1031        if (test_bit(WB_registered, &bdi->wb.state))
1032                bdi_unregister(bdi);
1033        WARN_ON_ONCE(bdi->dev);
1034        wb_exit(&bdi->wb);
1035        cgwb_bdi_exit(bdi);
1036        kfree(bdi);
1037}
1038
1039void bdi_put(struct backing_dev_info *bdi)
1040{
1041        kref_put(&bdi->refcnt, release_bdi);
1042}
1043EXPORT_SYMBOL(bdi_put);
1044
1045static wait_queue_head_t congestion_wqh[2] = {
1046                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
1047                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
1048        };
1049static atomic_t nr_wb_congested[2];
1050
1051void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
1052{
1053        wait_queue_head_t *wqh = &congestion_wqh[sync];
1054        enum wb_congested_state bit;
1055
1056        bit = sync ? WB_sync_congested : WB_async_congested;
1057        if (test_and_clear_bit(bit, &congested->state))
1058                atomic_dec(&nr_wb_congested[sync]);
1059        smp_mb__after_atomic();
1060        if (waitqueue_active(wqh))
1061                wake_up(wqh);
1062}
1063EXPORT_SYMBOL(clear_wb_congested);
1064
1065void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
1066{
1067        enum wb_congested_state bit;
1068
1069        bit = sync ? WB_sync_congested : WB_async_congested;
1070        if (!test_and_set_bit(bit, &congested->state))
1071                atomic_inc(&nr_wb_congested[sync]);
1072}
1073EXPORT_SYMBOL(set_wb_congested);
1074
1075/**
1076 * congestion_wait - wait for a backing_dev to become uncongested
1077 * @sync: SYNC or ASYNC IO
1078 * @timeout: timeout in jiffies
1079 *
1080 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1081 * write congestion.  If no backing_devs are congested then just wait for the
1082 * next write to be completed.
1083 */
1084long congestion_wait(int sync, long timeout)
1085{
1086        long ret;
1087        unsigned long start = jiffies;
1088        DEFINE_WAIT(wait);
1089        wait_queue_head_t *wqh = &congestion_wqh[sync];
1090
1091        prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1092        ret = io_schedule_timeout(timeout);
1093        finish_wait(wqh, &wait);
1094
1095        trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1096                                        jiffies_to_usecs(jiffies - start));
1097
1098        return ret;
1099}
1100EXPORT_SYMBOL(congestion_wait);
1101
1102/**
1103 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1104 * @sync: SYNC or ASYNC IO
1105 * @timeout: timeout in jiffies
1106 *
1107 * In the event of a congested backing_dev (any backing_dev) this waits
1108 * for up to @timeout jiffies for either a BDI to exit congestion of the
1109 * given @sync queue or a write to complete.
1110 *
1111 * The return value is 0 if the sleep is for the full timeout. Otherwise,
1112 * it is the number of jiffies that were still remaining when the function
1113 * returned. return_value == timeout implies the function did not sleep.
1114 */
1115long wait_iff_congested(int sync, long timeout)
1116{
1117        long ret;
1118        unsigned long start = jiffies;
1119        DEFINE_WAIT(wait);
1120        wait_queue_head_t *wqh = &congestion_wqh[sync];
1121
1122        /*
1123         * If there is no congestion, yield if necessary instead
1124         * of sleeping on the congestion queue
1125         */
1126        if (atomic_read(&nr_wb_congested[sync]) == 0) {
1127                cond_resched();
1128
1129                /* In case we scheduled, work out time remaining */
1130                ret = timeout - (jiffies - start);
1131                if (ret < 0)
1132                        ret = 0;
1133
1134                goto out;
1135        }
1136
1137        /* Sleep until uncongested or a write happens */
1138        prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1139        ret = io_schedule_timeout(timeout);
1140        finish_wait(wqh, &wait);
1141
1142out:
1143        trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1144                                        jiffies_to_usecs(jiffies - start));
1145
1146        return ret;
1147}
1148EXPORT_SYMBOL(wait_iff_congested);
1149