linux/mm/backing-dev.c
<<
>>
Prefs
   1
   2#include <linux/wait.h>
   3#include <linux/backing-dev.h>
   4#include <linux/kthread.h>
   5#include <linux/freezer.h>
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
   8#include <linux/mm.h>
   9#include <linux/sched.h>
  10#include <linux/module.h>
  11#include <linux/writeback.h>
  12#include <linux/device.h>
  13#include <trace/events/writeback.h>
  14
  15struct backing_dev_info noop_backing_dev_info = {
  16        .name           = "noop",
  17        .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
  18};
  19EXPORT_SYMBOL_GPL(noop_backing_dev_info);
  20
  21static struct class *bdi_class;
  22
  23/*
  24 * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
  25 * locking.
  26 */
  27DEFINE_SPINLOCK(bdi_lock);
  28LIST_HEAD(bdi_list);
  29
  30/* bdi_wq serves all asynchronous writeback tasks */
  31struct workqueue_struct *bdi_wq;
  32
  33#ifdef CONFIG_DEBUG_FS
  34#include <linux/debugfs.h>
  35#include <linux/seq_file.h>
  36
  37static struct dentry *bdi_debug_root;
  38
  39static void bdi_debug_init(void)
  40{
  41        bdi_debug_root = debugfs_create_dir("bdi", NULL);
  42}
  43
  44static int bdi_debug_stats_show(struct seq_file *m, void *v)
  45{
  46        struct backing_dev_info *bdi = m->private;
  47        struct bdi_writeback *wb = &bdi->wb;
  48        unsigned long background_thresh;
  49        unsigned long dirty_thresh;
  50        unsigned long wb_thresh;
  51        unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
  52        struct inode *inode;
  53
  54        nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
  55        spin_lock(&wb->list_lock);
  56        list_for_each_entry(inode, &wb->b_dirty, i_io_list)
  57                nr_dirty++;
  58        list_for_each_entry(inode, &wb->b_io, i_io_list)
  59                nr_io++;
  60        list_for_each_entry(inode, &wb->b_more_io, i_io_list)
  61                nr_more_io++;
  62        list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
  63                if (inode->i_state & I_DIRTY_TIME)
  64                        nr_dirty_time++;
  65        spin_unlock(&wb->list_lock);
  66
  67        global_dirty_limits(&background_thresh, &dirty_thresh);
  68        wb_thresh = wb_calc_thresh(wb, dirty_thresh);
  69
  70#define K(x) ((x) << (PAGE_SHIFT - 10))
  71        seq_printf(m,
  72                   "BdiWriteback:       %10lu kB\n"
  73                   "BdiReclaimable:     %10lu kB\n"
  74                   "BdiDirtyThresh:     %10lu kB\n"
  75                   "DirtyThresh:        %10lu kB\n"
  76                   "BackgroundThresh:   %10lu kB\n"
  77                   "BdiDirtied:         %10lu kB\n"
  78                   "BdiWritten:         %10lu kB\n"
  79                   "BdiWriteBandwidth:  %10lu kBps\n"
  80                   "b_dirty:            %10lu\n"
  81                   "b_io:               %10lu\n"
  82                   "b_more_io:          %10lu\n"
  83                   "b_dirty_time:       %10lu\n"
  84                   "bdi_list:           %10u\n"
  85                   "state:              %10lx\n",
  86                   (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
  87                   (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
  88                   K(wb_thresh),
  89                   K(dirty_thresh),
  90                   K(background_thresh),
  91                   (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
  92                   (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
  93                   (unsigned long) K(wb->write_bandwidth),
  94                   nr_dirty,
  95                   nr_io,
  96                   nr_more_io,
  97                   nr_dirty_time,
  98                   !list_empty(&bdi->bdi_list), bdi->wb.state);
  99#undef K
 100
 101        return 0;
 102}
 103
 104static int bdi_debug_stats_open(struct inode *inode, struct file *file)
 105{
 106        return single_open(file, bdi_debug_stats_show, inode->i_private);
 107}
 108
 109static const struct file_operations bdi_debug_stats_fops = {
 110        .open           = bdi_debug_stats_open,
 111        .read           = seq_read,
 112        .llseek         = seq_lseek,
 113        .release        = single_release,
 114};
 115
 116static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
 117{
 118        bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
 119        bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
 120                                               bdi, &bdi_debug_stats_fops);
 121}
 122
 123static void bdi_debug_unregister(struct backing_dev_info *bdi)
 124{
 125        debugfs_remove(bdi->debug_stats);
 126        debugfs_remove(bdi->debug_dir);
 127}
 128#else
 129static inline void bdi_debug_init(void)
 130{
 131}
 132static inline void bdi_debug_register(struct backing_dev_info *bdi,
 133                                      const char *name)
 134{
 135}
 136static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
 137{
 138}
 139#endif
 140
 141static ssize_t read_ahead_kb_store(struct device *dev,
 142                                  struct device_attribute *attr,
 143                                  const char *buf, size_t count)
 144{
 145        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 146        unsigned long read_ahead_kb;
 147        ssize_t ret;
 148
 149        ret = kstrtoul(buf, 10, &read_ahead_kb);
 150        if (ret < 0)
 151                return ret;
 152
 153        bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
 154
 155        return count;
 156}
 157
 158#define K(pages) ((pages) << (PAGE_SHIFT - 10))
 159
 160#define BDI_SHOW(name, expr)                                            \
 161static ssize_t name##_show(struct device *dev,                          \
 162                           struct device_attribute *attr, char *page)   \
 163{                                                                       \
 164        struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
 165                                                                        \
 166        return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
 167}                                                                       \
 168static DEVICE_ATTR_RW(name);
 169
 170BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
 171
 172static ssize_t min_ratio_store(struct device *dev,
 173                struct device_attribute *attr, const char *buf, size_t count)
 174{
 175        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 176        unsigned int ratio;
 177        ssize_t ret;
 178
 179        ret = kstrtouint(buf, 10, &ratio);
 180        if (ret < 0)
 181                return ret;
 182
 183        ret = bdi_set_min_ratio(bdi, ratio);
 184        if (!ret)
 185                ret = count;
 186
 187        return ret;
 188}
 189BDI_SHOW(min_ratio, bdi->min_ratio)
 190
 191static ssize_t max_ratio_store(struct device *dev,
 192                struct device_attribute *attr, const char *buf, size_t count)
 193{
 194        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 195        unsigned int ratio;
 196        ssize_t ret;
 197
 198        ret = kstrtouint(buf, 10, &ratio);
 199        if (ret < 0)
 200                return ret;
 201
 202        ret = bdi_set_max_ratio(bdi, ratio);
 203        if (!ret)
 204                ret = count;
 205
 206        return ret;
 207}
 208BDI_SHOW(max_ratio, bdi->max_ratio)
 209
 210static ssize_t stable_pages_required_show(struct device *dev,
 211                                          struct device_attribute *attr,
 212                                          char *page)
 213{
 214        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 215
 216        return snprintf(page, PAGE_SIZE-1, "%d\n",
 217                        bdi_cap_stable_pages_required(bdi) ? 1 : 0);
 218}
 219static DEVICE_ATTR_RO(stable_pages_required);
 220
 221static struct attribute *bdi_dev_attrs[] = {
 222        &dev_attr_read_ahead_kb.attr,
 223        &dev_attr_min_ratio.attr,
 224        &dev_attr_max_ratio.attr,
 225        &dev_attr_stable_pages_required.attr,
 226        NULL,
 227};
 228ATTRIBUTE_GROUPS(bdi_dev);
 229
 230static __init int bdi_class_init(void)
 231{
 232        bdi_class = class_create(THIS_MODULE, "bdi");
 233        if (IS_ERR(bdi_class))
 234                return PTR_ERR(bdi_class);
 235
 236        bdi_class->dev_groups = bdi_dev_groups;
 237        bdi_debug_init();
 238
 239        return 0;
 240}
 241postcore_initcall(bdi_class_init);
 242
 243static int bdi_init(struct backing_dev_info *bdi);
 244
 245static int __init default_bdi_init(void)
 246{
 247        int err;
 248
 249        bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
 250                                              WQ_UNBOUND | WQ_SYSFS, 0);
 251        if (!bdi_wq)
 252                return -ENOMEM;
 253
 254        err = bdi_init(&noop_backing_dev_info);
 255
 256        return err;
 257}
 258subsys_initcall(default_bdi_init);
 259
 260/*
 261 * This function is used when the first inode for this wb is marked dirty. It
 262 * wakes-up the corresponding bdi thread which should then take care of the
 263 * periodic background write-out of dirty inodes. Since the write-out would
 264 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
 265 * set up a timer which wakes the bdi thread up later.
 266 *
 267 * Note, we wouldn't bother setting up the timer, but this function is on the
 268 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
 269 * by delaying the wake-up.
 270 *
 271 * We have to be careful not to postpone flush work if it is scheduled for
 272 * earlier. Thus we use queue_delayed_work().
 273 */
 274void wb_wakeup_delayed(struct bdi_writeback *wb)
 275{
 276        unsigned long timeout;
 277
 278        timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
 279        spin_lock_bh(&wb->work_lock);
 280        if (test_bit(WB_registered, &wb->state))
 281                queue_delayed_work(bdi_wq, &wb->dwork, timeout);
 282        spin_unlock_bh(&wb->work_lock);
 283}
 284
 285/*
 286 * Initial write bandwidth: 100 MB/s
 287 */
 288#define INIT_BW         (100 << (20 - PAGE_SHIFT))
 289
 290static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
 291                   int blkcg_id, gfp_t gfp)
 292{
 293        int i, err;
 294
 295        memset(wb, 0, sizeof(*wb));
 296
 297        if (wb != &bdi->wb)
 298                bdi_get(bdi);
 299        wb->bdi = bdi;
 300        wb->last_old_flush = jiffies;
 301        INIT_LIST_HEAD(&wb->b_dirty);
 302        INIT_LIST_HEAD(&wb->b_io);
 303        INIT_LIST_HEAD(&wb->b_more_io);
 304        INIT_LIST_HEAD(&wb->b_dirty_time);
 305        spin_lock_init(&wb->list_lock);
 306
 307        wb->bw_time_stamp = jiffies;
 308        wb->balanced_dirty_ratelimit = INIT_BW;
 309        wb->dirty_ratelimit = INIT_BW;
 310        wb->write_bandwidth = INIT_BW;
 311        wb->avg_write_bandwidth = INIT_BW;
 312
 313        spin_lock_init(&wb->work_lock);
 314        INIT_LIST_HEAD(&wb->work_list);
 315        INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
 316        wb->dirty_sleep = jiffies;
 317
 318        wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
 319        if (!wb->congested) {
 320                err = -ENOMEM;
 321                goto out_put_bdi;
 322        }
 323
 324        err = fprop_local_init_percpu(&wb->completions, gfp);
 325        if (err)
 326                goto out_put_cong;
 327
 328        for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
 329                err = percpu_counter_init(&wb->stat[i], 0, gfp);
 330                if (err)
 331                        goto out_destroy_stat;
 332        }
 333
 334        return 0;
 335
 336out_destroy_stat:
 337        while (i--)
 338                percpu_counter_destroy(&wb->stat[i]);
 339        fprop_local_destroy_percpu(&wb->completions);
 340out_put_cong:
 341        wb_congested_put(wb->congested);
 342out_put_bdi:
 343        if (wb != &bdi->wb)
 344                bdi_put(bdi);
 345        return err;
 346}
 347
 348static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
 349
 350/*
 351 * Remove bdi from the global list and shutdown any threads we have running
 352 */
 353static void wb_shutdown(struct bdi_writeback *wb)
 354{
 355        /* Make sure nobody queues further work */
 356        spin_lock_bh(&wb->work_lock);
 357        if (!test_and_clear_bit(WB_registered, &wb->state)) {
 358                spin_unlock_bh(&wb->work_lock);
 359                /*
 360                 * Wait for wb shutdown to finish if someone else is just
 361                 * running wb_shutdown(). Otherwise we could proceed to wb /
 362                 * bdi destruction before wb_shutdown() is finished.
 363                 */
 364                wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE);
 365                return;
 366        }
 367        set_bit(WB_shutting_down, &wb->state);
 368        spin_unlock_bh(&wb->work_lock);
 369
 370        cgwb_remove_from_bdi_list(wb);
 371        /*
 372         * Drain work list and shutdown the delayed_work.  !WB_registered
 373         * tells wb_workfn() that @wb is dying and its work_list needs to
 374         * be drained no matter what.
 375         */
 376        mod_delayed_work(bdi_wq, &wb->dwork, 0);
 377        flush_delayed_work(&wb->dwork);
 378        WARN_ON(!list_empty(&wb->work_list));
 379        /*
 380         * Make sure bit gets cleared after shutdown is finished. Matches with
 381         * the barrier provided by test_and_clear_bit() above.
 382         */
 383        smp_wmb();
 384        clear_bit(WB_shutting_down, &wb->state);
 385}
 386
 387static void wb_exit(struct bdi_writeback *wb)
 388{
 389        int i;
 390
 391        WARN_ON(delayed_work_pending(&wb->dwork));
 392
 393        for (i = 0; i < NR_WB_STAT_ITEMS; i++)
 394                percpu_counter_destroy(&wb->stat[i]);
 395
 396        fprop_local_destroy_percpu(&wb->completions);
 397        wb_congested_put(wb->congested);
 398        if (wb != &wb->bdi->wb)
 399                bdi_put(wb->bdi);
 400}
 401
 402#ifdef CONFIG_CGROUP_WRITEBACK
 403
 404#include <linux/memcontrol.h>
 405
 406/*
 407 * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
 408 * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
 409 * protected.
 410 */
 411static DEFINE_SPINLOCK(cgwb_lock);
 412
 413/**
 414 * wb_congested_get_create - get or create a wb_congested
 415 * @bdi: associated bdi
 416 * @blkcg_id: ID of the associated blkcg
 417 * @gfp: allocation mask
 418 *
 419 * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
 420 * The returned wb_congested has its reference count incremented.  Returns
 421 * NULL on failure.
 422 */
 423struct bdi_writeback_congested *
 424wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
 425{
 426        struct bdi_writeback_congested *new_congested = NULL, *congested;
 427        struct rb_node **node, *parent;
 428        unsigned long flags;
 429retry:
 430        spin_lock_irqsave(&cgwb_lock, flags);
 431
 432        node = &bdi->cgwb_congested_tree.rb_node;
 433        parent = NULL;
 434
 435        while (*node != NULL) {
 436                parent = *node;
 437                congested = rb_entry(parent, struct bdi_writeback_congested,
 438                                     rb_node);
 439                if (congested->blkcg_id < blkcg_id)
 440                        node = &parent->rb_left;
 441                else if (congested->blkcg_id > blkcg_id)
 442                        node = &parent->rb_right;
 443                else
 444                        goto found;
 445        }
 446
 447        if (new_congested) {
 448                /* !found and storage for new one already allocated, insert */
 449                congested = new_congested;
 450                new_congested = NULL;
 451                rb_link_node(&congested->rb_node, parent, node);
 452                rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
 453                goto found;
 454        }
 455
 456        spin_unlock_irqrestore(&cgwb_lock, flags);
 457
 458        /* allocate storage for new one and retry */
 459        new_congested = kzalloc(sizeof(*new_congested), gfp);
 460        if (!new_congested)
 461                return NULL;
 462
 463        atomic_set(&new_congested->refcnt, 0);
 464        new_congested->__bdi = bdi;
 465        new_congested->blkcg_id = blkcg_id;
 466        goto retry;
 467
 468found:
 469        atomic_inc(&congested->refcnt);
 470        spin_unlock_irqrestore(&cgwb_lock, flags);
 471        kfree(new_congested);
 472        return congested;
 473}
 474
 475/**
 476 * wb_congested_put - put a wb_congested
 477 * @congested: wb_congested to put
 478 *
 479 * Put @congested and destroy it if the refcnt reaches zero.
 480 */
 481void wb_congested_put(struct bdi_writeback_congested *congested)
 482{
 483        unsigned long flags;
 484
 485        local_irq_save(flags);
 486        if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
 487                local_irq_restore(flags);
 488                return;
 489        }
 490
 491        /* bdi might already have been destroyed leaving @congested unlinked */
 492        if (congested->__bdi) {
 493                rb_erase(&congested->rb_node,
 494                         &congested->__bdi->cgwb_congested_tree);
 495                congested->__bdi = NULL;
 496        }
 497
 498        spin_unlock_irqrestore(&cgwb_lock, flags);
 499        kfree(congested);
 500}
 501
 502static void cgwb_release_workfn(struct work_struct *work)
 503{
 504        struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
 505                                                release_work);
 506
 507        wb_shutdown(wb);
 508
 509        css_put(wb->memcg_css);
 510        css_put(wb->blkcg_css);
 511
 512        fprop_local_destroy_percpu(&wb->memcg_completions);
 513        percpu_ref_exit(&wb->refcnt);
 514        wb_exit(wb);
 515        kfree_rcu(wb, rcu);
 516}
 517
 518static void cgwb_release(struct percpu_ref *refcnt)
 519{
 520        struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
 521                                                refcnt);
 522        schedule_work(&wb->release_work);
 523}
 524
 525static void cgwb_kill(struct bdi_writeback *wb)
 526{
 527        lockdep_assert_held(&cgwb_lock);
 528
 529        WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
 530        list_del(&wb->memcg_node);
 531        list_del(&wb->blkcg_node);
 532        percpu_ref_kill(&wb->refcnt);
 533}
 534
 535static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 536{
 537        spin_lock_irq(&cgwb_lock);
 538        list_del_rcu(&wb->bdi_node);
 539        spin_unlock_irq(&cgwb_lock);
 540}
 541
 542static int cgwb_create(struct backing_dev_info *bdi,
 543                       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
 544{
 545        struct mem_cgroup *memcg;
 546        struct cgroup_subsys_state *blkcg_css;
 547        struct blkcg *blkcg;
 548        struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
 549        struct bdi_writeback *wb;
 550        unsigned long flags;
 551        int ret = 0;
 552
 553        memcg = mem_cgroup_from_css(memcg_css);
 554        blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 555        blkcg = css_to_blkcg(blkcg_css);
 556        memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
 557        blkcg_cgwb_list = &blkcg->cgwb_list;
 558
 559        /* look up again under lock and discard on blkcg mismatch */
 560        spin_lock_irqsave(&cgwb_lock, flags);
 561        wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 562        if (wb && wb->blkcg_css != blkcg_css) {
 563                cgwb_kill(wb);
 564                wb = NULL;
 565        }
 566        spin_unlock_irqrestore(&cgwb_lock, flags);
 567        if (wb)
 568                goto out_put;
 569
 570        /* need to create a new one */
 571        wb = kmalloc(sizeof(*wb), gfp);
 572        if (!wb)
 573                return -ENOMEM;
 574
 575        ret = wb_init(wb, bdi, blkcg_css->id, gfp);
 576        if (ret)
 577                goto err_free;
 578
 579        ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
 580        if (ret)
 581                goto err_wb_exit;
 582
 583        ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
 584        if (ret)
 585                goto err_ref_exit;
 586
 587        wb->memcg_css = memcg_css;
 588        wb->blkcg_css = blkcg_css;
 589        INIT_WORK(&wb->release_work, cgwb_release_workfn);
 590        set_bit(WB_registered, &wb->state);
 591
 592        /*
 593         * The root wb determines the registered state of the whole bdi and
 594         * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
 595         * whether they're still online.  Don't link @wb if any is dead.
 596         * See wb_memcg_offline() and wb_blkcg_offline().
 597         */
 598        ret = -ENODEV;
 599        spin_lock_irqsave(&cgwb_lock, flags);
 600        if (test_bit(WB_registered, &bdi->wb.state) &&
 601            blkcg_cgwb_list->next && memcg_cgwb_list->next) {
 602                /* we might have raced another instance of this function */
 603                ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
 604                if (!ret) {
 605                        list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
 606                        list_add(&wb->memcg_node, memcg_cgwb_list);
 607                        list_add(&wb->blkcg_node, blkcg_cgwb_list);
 608                        css_get(memcg_css);
 609                        css_get(blkcg_css);
 610                }
 611        }
 612        spin_unlock_irqrestore(&cgwb_lock, flags);
 613        if (ret) {
 614                if (ret == -EEXIST)
 615                        ret = 0;
 616                goto err_fprop_exit;
 617        }
 618        goto out_put;
 619
 620err_fprop_exit:
 621        fprop_local_destroy_percpu(&wb->memcg_completions);
 622err_ref_exit:
 623        percpu_ref_exit(&wb->refcnt);
 624err_wb_exit:
 625        wb_exit(wb);
 626err_free:
 627        kfree(wb);
 628out_put:
 629        css_put(blkcg_css);
 630        return ret;
 631}
 632
 633/**
 634 * wb_get_create - get wb for a given memcg, create if necessary
 635 * @bdi: target bdi
 636 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 637 * @gfp: allocation mask to use
 638 *
 639 * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
 640 * create one.  The returned wb has its refcount incremented.
 641 *
 642 * This function uses css_get() on @memcg_css and thus expects its refcnt
 643 * to be positive on invocation.  IOW, rcu_read_lock() protection on
 644 * @memcg_css isn't enough.  try_get it before calling this function.
 645 *
 646 * A wb is keyed by its associated memcg.  As blkcg implicitly enables
 647 * memcg on the default hierarchy, memcg association is guaranteed to be
 648 * more specific (equal or descendant to the associated blkcg) and thus can
 649 * identify both the memcg and blkcg associations.
 650 *
 651 * Because the blkcg associated with a memcg may change as blkcg is enabled
 652 * and disabled closer to root in the hierarchy, each wb keeps track of
 653 * both the memcg and blkcg associated with it and verifies the blkcg on
 654 * each lookup.  On mismatch, the existing wb is discarded and a new one is
 655 * created.
 656 */
 657struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
 658                                    struct cgroup_subsys_state *memcg_css,
 659                                    gfp_t gfp)
 660{
 661        struct bdi_writeback *wb;
 662
 663        might_sleep_if(gfpflags_allow_blocking(gfp));
 664
 665        if (!memcg_css->parent)
 666                return &bdi->wb;
 667
 668        do {
 669                rcu_read_lock();
 670                wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 671                if (wb) {
 672                        struct cgroup_subsys_state *blkcg_css;
 673
 674                        /* see whether the blkcg association has changed */
 675                        blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
 676                                                     &io_cgrp_subsys);
 677                        if (unlikely(wb->blkcg_css != blkcg_css ||
 678                                     !wb_tryget(wb)))
 679                                wb = NULL;
 680                        css_put(blkcg_css);
 681                }
 682                rcu_read_unlock();
 683        } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
 684
 685        return wb;
 686}
 687
 688static int cgwb_bdi_init(struct backing_dev_info *bdi)
 689{
 690        int ret;
 691
 692        INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
 693        bdi->cgwb_congested_tree = RB_ROOT;
 694
 695        ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 696        if (!ret) {
 697                bdi->wb.memcg_css = &root_mem_cgroup->css;
 698                bdi->wb.blkcg_css = blkcg_root_css;
 699        }
 700        return ret;
 701}
 702
 703static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
 704{
 705        struct radix_tree_iter iter;
 706        void **slot;
 707        struct bdi_writeback *wb;
 708
 709        WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 710
 711        spin_lock_irq(&cgwb_lock);
 712        radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
 713                cgwb_kill(*slot);
 714
 715        while (!list_empty(&bdi->wb_list)) {
 716                wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
 717                                      bdi_node);
 718                spin_unlock_irq(&cgwb_lock);
 719                wb_shutdown(wb);
 720                spin_lock_irq(&cgwb_lock);
 721        }
 722        spin_unlock_irq(&cgwb_lock);
 723}
 724
 725/**
 726 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
 727 * @memcg: memcg being offlined
 728 *
 729 * Also prevents creation of any new wb's associated with @memcg.
 730 */
 731void wb_memcg_offline(struct mem_cgroup *memcg)
 732{
 733        LIST_HEAD(to_destroy);
 734        struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
 735        struct bdi_writeback *wb, *next;
 736
 737        spin_lock_irq(&cgwb_lock);
 738        list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
 739                cgwb_kill(wb);
 740        memcg_cgwb_list->next = NULL;   /* prevent new wb's */
 741        spin_unlock_irq(&cgwb_lock);
 742}
 743
 744/**
 745 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
 746 * @blkcg: blkcg being offlined
 747 *
 748 * Also prevents creation of any new wb's associated with @blkcg.
 749 */
 750void wb_blkcg_offline(struct blkcg *blkcg)
 751{
 752        LIST_HEAD(to_destroy);
 753        struct bdi_writeback *wb, *next;
 754
 755        spin_lock_irq(&cgwb_lock);
 756        list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
 757                cgwb_kill(wb);
 758        blkcg->cgwb_list.next = NULL;   /* prevent new wb's */
 759        spin_unlock_irq(&cgwb_lock);
 760}
 761
 762static void cgwb_bdi_exit(struct backing_dev_info *bdi)
 763{
 764        struct rb_node *rbn;
 765
 766        spin_lock_irq(&cgwb_lock);
 767        while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
 768                struct bdi_writeback_congested *congested =
 769                        rb_entry(rbn, struct bdi_writeback_congested, rb_node);
 770
 771                rb_erase(rbn, &bdi->cgwb_congested_tree);
 772                congested->__bdi = NULL;        /* mark @congested unlinked */
 773        }
 774        spin_unlock_irq(&cgwb_lock);
 775}
 776
 777static void cgwb_bdi_register(struct backing_dev_info *bdi)
 778{
 779        spin_lock_irq(&cgwb_lock);
 780        list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 781        spin_unlock_irq(&cgwb_lock);
 782}
 783
 784#else   /* CONFIG_CGROUP_WRITEBACK */
 785
 786static int cgwb_bdi_init(struct backing_dev_info *bdi)
 787{
 788        int err;
 789
 790        bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
 791        if (!bdi->wb_congested)
 792                return -ENOMEM;
 793
 794        atomic_set(&bdi->wb_congested->refcnt, 1);
 795
 796        err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 797        if (err) {
 798                wb_congested_put(bdi->wb_congested);
 799                return err;
 800        }
 801        return 0;
 802}
 803
 804static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
 805
 806static void cgwb_bdi_exit(struct backing_dev_info *bdi)
 807{
 808        wb_congested_put(bdi->wb_congested);
 809}
 810
 811static void cgwb_bdi_register(struct backing_dev_info *bdi)
 812{
 813        list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 814}
 815
 816static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 817{
 818        list_del_rcu(&wb->bdi_node);
 819}
 820
 821#endif  /* CONFIG_CGROUP_WRITEBACK */
 822
 823static int bdi_init(struct backing_dev_info *bdi)
 824{
 825        int ret;
 826
 827        bdi->dev = NULL;
 828
 829        kref_init(&bdi->refcnt);
 830        bdi->min_ratio = 0;
 831        bdi->max_ratio = 100;
 832        bdi->max_prop_frac = FPROP_FRAC_BASE;
 833        INIT_LIST_HEAD(&bdi->bdi_list);
 834        INIT_LIST_HEAD(&bdi->wb_list);
 835        init_waitqueue_head(&bdi->wb_waitq);
 836
 837        ret = cgwb_bdi_init(bdi);
 838
 839        return ret;
 840}
 841
 842struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
 843{
 844        struct backing_dev_info *bdi;
 845
 846        bdi = kmalloc_node(sizeof(struct backing_dev_info),
 847                           gfp_mask | __GFP_ZERO, node_id);
 848        if (!bdi)
 849                return NULL;
 850
 851        if (bdi_init(bdi)) {
 852                kfree(bdi);
 853                return NULL;
 854        }
 855        return bdi;
 856}
 857EXPORT_SYMBOL(bdi_alloc_node);
 858
 859int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
 860{
 861        struct device *dev;
 862
 863        if (bdi->dev)   /* The driver needs to use separate queues per device */
 864                return 0;
 865
 866        dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
 867        if (IS_ERR(dev))
 868                return PTR_ERR(dev);
 869
 870        cgwb_bdi_register(bdi);
 871        bdi->dev = dev;
 872
 873        bdi_debug_register(bdi, dev_name(dev));
 874        set_bit(WB_registered, &bdi->wb.state);
 875
 876        spin_lock_bh(&bdi_lock);
 877        list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
 878        spin_unlock_bh(&bdi_lock);
 879
 880        trace_writeback_bdi_register(bdi);
 881        return 0;
 882}
 883EXPORT_SYMBOL(bdi_register_va);
 884
 885int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
 886{
 887        va_list args;
 888        int ret;
 889
 890        va_start(args, fmt);
 891        ret = bdi_register_va(bdi, fmt, args);
 892        va_end(args);
 893        return ret;
 894}
 895EXPORT_SYMBOL(bdi_register);
 896
 897int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
 898{
 899        int rc;
 900
 901        rc = bdi_register(bdi, "%u:%u", MAJOR(owner->devt), MINOR(owner->devt));
 902        if (rc)
 903                return rc;
 904        /* Leaking owner reference... */
 905        WARN_ON(bdi->owner);
 906        bdi->owner = owner;
 907        get_device(owner);
 908        return 0;
 909}
 910EXPORT_SYMBOL(bdi_register_owner);
 911
 912/*
 913 * Remove bdi from bdi_list, and ensure that it is no longer visible
 914 */
 915static void bdi_remove_from_list(struct backing_dev_info *bdi)
 916{
 917        spin_lock_bh(&bdi_lock);
 918        list_del_rcu(&bdi->bdi_list);
 919        spin_unlock_bh(&bdi_lock);
 920
 921        synchronize_rcu_expedited();
 922}
 923
 924void bdi_unregister(struct backing_dev_info *bdi)
 925{
 926        /* make sure nobody finds us on the bdi_list anymore */
 927        bdi_remove_from_list(bdi);
 928        wb_shutdown(&bdi->wb);
 929        cgwb_bdi_unregister(bdi);
 930
 931        if (bdi->dev) {
 932                bdi_debug_unregister(bdi);
 933                device_unregister(bdi->dev);
 934                bdi->dev = NULL;
 935        }
 936
 937        if (bdi->owner) {
 938                put_device(bdi->owner);
 939                bdi->owner = NULL;
 940        }
 941}
 942
 943static void release_bdi(struct kref *ref)
 944{
 945        struct backing_dev_info *bdi =
 946                        container_of(ref, struct backing_dev_info, refcnt);
 947
 948        if (test_bit(WB_registered, &bdi->wb.state))
 949                bdi_unregister(bdi);
 950        WARN_ON_ONCE(bdi->dev);
 951        wb_exit(&bdi->wb);
 952        cgwb_bdi_exit(bdi);
 953        kfree(bdi);
 954}
 955
 956void bdi_put(struct backing_dev_info *bdi)
 957{
 958        kref_put(&bdi->refcnt, release_bdi);
 959}
 960EXPORT_SYMBOL(bdi_put);
 961
 962static wait_queue_head_t congestion_wqh[2] = {
 963                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
 964                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
 965        };
 966static atomic_t nr_wb_congested[2];
 967
 968void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
 969{
 970        wait_queue_head_t *wqh = &congestion_wqh[sync];
 971        enum wb_congested_state bit;
 972
 973        bit = sync ? WB_sync_congested : WB_async_congested;
 974        if (test_and_clear_bit(bit, &congested->state))
 975                atomic_dec(&nr_wb_congested[sync]);
 976        smp_mb__after_atomic();
 977        if (waitqueue_active(wqh))
 978                wake_up(wqh);
 979}
 980EXPORT_SYMBOL(clear_wb_congested);
 981
 982void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
 983{
 984        enum wb_congested_state bit;
 985
 986        bit = sync ? WB_sync_congested : WB_async_congested;
 987        if (!test_and_set_bit(bit, &congested->state))
 988                atomic_inc(&nr_wb_congested[sync]);
 989}
 990EXPORT_SYMBOL(set_wb_congested);
 991
 992/**
 993 * congestion_wait - wait for a backing_dev to become uncongested
 994 * @sync: SYNC or ASYNC IO
 995 * @timeout: timeout in jiffies
 996 *
 997 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
 998 * write congestion.  If no backing_devs are congested then just wait for the
 999 * next write to be completed.
1000 */
1001long congestion_wait(int sync, long timeout)
1002{
1003        long ret;
1004        unsigned long start = jiffies;
1005        DEFINE_WAIT(wait);
1006        wait_queue_head_t *wqh = &congestion_wqh[sync];
1007
1008        prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1009        ret = io_schedule_timeout(timeout);
1010        finish_wait(wqh, &wait);
1011
1012        trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1013                                        jiffies_to_usecs(jiffies - start));
1014
1015        return ret;
1016}
1017EXPORT_SYMBOL(congestion_wait);
1018
1019/**
1020 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1021 * @pgdat: A pgdat to check if it is heavily congested
1022 * @sync: SYNC or ASYNC IO
1023 * @timeout: timeout in jiffies
1024 *
1025 * In the event of a congested backing_dev (any backing_dev) and the given
1026 * @pgdat has experienced recent congestion, this waits for up to @timeout
1027 * jiffies for either a BDI to exit congestion of the given @sync queue
1028 * or a write to complete.
1029 *
1030 * In the absence of pgdat congestion, cond_resched() is called to yield
1031 * the processor if necessary but otherwise does not sleep.
1032 *
1033 * The return value is 0 if the sleep is for the full timeout. Otherwise,
1034 * it is the number of jiffies that were still remaining when the function
1035 * returned. return_value == timeout implies the function did not sleep.
1036 */
1037long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout)
1038{
1039        long ret;
1040        unsigned long start = jiffies;
1041        DEFINE_WAIT(wait);
1042        wait_queue_head_t *wqh = &congestion_wqh[sync];
1043
1044        /*
1045         * If there is no congestion, or heavy congestion is not being
1046         * encountered in the current pgdat, yield if necessary instead
1047         * of sleeping on the congestion queue
1048         */
1049        if (atomic_read(&nr_wb_congested[sync]) == 0 ||
1050            !test_bit(PGDAT_CONGESTED, &pgdat->flags)) {
1051                cond_resched();
1052
1053                /* In case we scheduled, work out time remaining */
1054                ret = timeout - (jiffies - start);
1055                if (ret < 0)
1056                        ret = 0;
1057
1058                goto out;
1059        }
1060
1061        /* Sleep until uncongested or a write happens */
1062        prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1063        ret = io_schedule_timeout(timeout);
1064        finish_wait(wqh, &wait);
1065
1066out:
1067        trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1068                                        jiffies_to_usecs(jiffies - start));
1069
1070        return ret;
1071}
1072EXPORT_SYMBOL(wait_iff_congested);
1073
1074int pdflush_proc_obsolete(struct ctl_table *table, int write,
1075                        void __user *buffer, size_t *lenp, loff_t *ppos)
1076{
1077        char kbuf[] = "0\n";
1078
1079        if (*ppos || *lenp < sizeof(kbuf)) {
1080                *lenp = 0;
1081                return 0;
1082        }
1083
1084        if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
1085                return -EFAULT;
1086        pr_warn_once("%s exported in /proc is scheduled for removal\n",
1087                     table->procname);
1088
1089        *lenp = 2;
1090        *ppos += *lenp;
1091        return 2;
1092}
1093