linux/fs/btrfs/ordered-data.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/slab.h>
   7#include <linux/blkdev.h>
   8#include <linux/writeback.h>
   9#include <linux/sched/mm.h>
  10#include "misc.h"
  11#include "ctree.h"
  12#include "transaction.h"
  13#include "btrfs_inode.h"
  14#include "extent_io.h"
  15#include "disk-io.h"
  16#include "compression.h"
  17#include "delalloc-space.h"
  18#include "qgroup.h"
  19
  20static struct kmem_cache *btrfs_ordered_extent_cache;
  21
  22static u64 entry_end(struct btrfs_ordered_extent *entry)
  23{
  24        if (entry->file_offset + entry->num_bytes < entry->file_offset)
  25                return (u64)-1;
  26        return entry->file_offset + entry->num_bytes;
  27}
  28
  29/* returns NULL if the insertion worked, or it returns the node it did find
  30 * in the tree
  31 */
  32static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
  33                                   struct rb_node *node)
  34{
  35        struct rb_node **p = &root->rb_node;
  36        struct rb_node *parent = NULL;
  37        struct btrfs_ordered_extent *entry;
  38
  39        while (*p) {
  40                parent = *p;
  41                entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
  42
  43                if (file_offset < entry->file_offset)
  44                        p = &(*p)->rb_left;
  45                else if (file_offset >= entry_end(entry))
  46                        p = &(*p)->rb_right;
  47                else
  48                        return parent;
  49        }
  50
  51        rb_link_node(node, parent, p);
  52        rb_insert_color(node, root);
  53        return NULL;
  54}
  55
  56/*
  57 * look for a given offset in the tree, and if it can't be found return the
  58 * first lesser offset
  59 */
  60static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
  61                                     struct rb_node **prev_ret)
  62{
  63        struct rb_node *n = root->rb_node;
  64        struct rb_node *prev = NULL;
  65        struct rb_node *test;
  66        struct btrfs_ordered_extent *entry;
  67        struct btrfs_ordered_extent *prev_entry = NULL;
  68
  69        while (n) {
  70                entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  71                prev = n;
  72                prev_entry = entry;
  73
  74                if (file_offset < entry->file_offset)
  75                        n = n->rb_left;
  76                else if (file_offset >= entry_end(entry))
  77                        n = n->rb_right;
  78                else
  79                        return n;
  80        }
  81        if (!prev_ret)
  82                return NULL;
  83
  84        while (prev && file_offset >= entry_end(prev_entry)) {
  85                test = rb_next(prev);
  86                if (!test)
  87                        break;
  88                prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  89                                      rb_node);
  90                if (file_offset < entry_end(prev_entry))
  91                        break;
  92
  93                prev = test;
  94        }
  95        if (prev)
  96                prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
  97                                      rb_node);
  98        while (prev && file_offset < entry_end(prev_entry)) {
  99                test = rb_prev(prev);
 100                if (!test)
 101                        break;
 102                prev_entry = rb_entry(test, struct btrfs_ordered_extent,
 103                                      rb_node);
 104                prev = test;
 105        }
 106        *prev_ret = prev;
 107        return NULL;
 108}
 109
 110static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
 111                          u64 len)
 112{
 113        if (file_offset + len <= entry->file_offset ||
 114            entry->file_offset + entry->num_bytes <= file_offset)
 115                return 0;
 116        return 1;
 117}
 118
 119/*
 120 * look find the first ordered struct that has this offset, otherwise
 121 * the first one less than this offset
 122 */
 123static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
 124                                          u64 file_offset)
 125{
 126        struct rb_root *root = &tree->tree;
 127        struct rb_node *prev = NULL;
 128        struct rb_node *ret;
 129        struct btrfs_ordered_extent *entry;
 130
 131        if (tree->last) {
 132                entry = rb_entry(tree->last, struct btrfs_ordered_extent,
 133                                 rb_node);
 134                if (in_range(file_offset, entry->file_offset, entry->num_bytes))
 135                        return tree->last;
 136        }
 137        ret = __tree_search(root, file_offset, &prev);
 138        if (!ret)
 139                ret = prev;
 140        if (ret)
 141                tree->last = ret;
 142        return ret;
 143}
 144
 145/*
 146 * Allocate and add a new ordered_extent into the per-inode tree.
 147 *
 148 * The tree is given a single reference on the ordered extent that was
 149 * inserted.
 150 */
 151static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
 152                                      u64 disk_bytenr, u64 num_bytes,
 153                                      u64 disk_num_bytes, int type, int dio,
 154                                      int compress_type)
 155{
 156        struct btrfs_root *root = inode->root;
 157        struct btrfs_fs_info *fs_info = root->fs_info;
 158        struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
 159        struct rb_node *node;
 160        struct btrfs_ordered_extent *entry;
 161        int ret;
 162
 163        if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
 164                /* For nocow write, we can release the qgroup rsv right now */
 165                ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
 166                if (ret < 0)
 167                        return ret;
 168                ret = 0;
 169        } else {
 170                /*
 171                 * The ordered extent has reserved qgroup space, release now
 172                 * and pass the reserved number for qgroup_record to free.
 173                 */
 174                ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
 175                if (ret < 0)
 176                        return ret;
 177        }
 178        entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
 179        if (!entry)
 180                return -ENOMEM;
 181
 182        entry->file_offset = file_offset;
 183        entry->disk_bytenr = disk_bytenr;
 184        entry->num_bytes = num_bytes;
 185        entry->disk_num_bytes = disk_num_bytes;
 186        entry->bytes_left = num_bytes;
 187        entry->inode = igrab(&inode->vfs_inode);
 188        entry->compress_type = compress_type;
 189        entry->truncated_len = (u64)-1;
 190        entry->qgroup_rsv = ret;
 191        entry->physical = (u64)-1;
 192        entry->disk = NULL;
 193        entry->partno = (u8)-1;
 194
 195        ASSERT(type == BTRFS_ORDERED_REGULAR ||
 196               type == BTRFS_ORDERED_NOCOW ||
 197               type == BTRFS_ORDERED_PREALLOC ||
 198               type == BTRFS_ORDERED_COMPRESSED);
 199        set_bit(type, &entry->flags);
 200
 201        percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
 202                                 fs_info->delalloc_batch);
 203
 204        if (dio)
 205                set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
 206
 207        /* one ref for the tree */
 208        refcount_set(&entry->refs, 1);
 209        init_waitqueue_head(&entry->wait);
 210        INIT_LIST_HEAD(&entry->list);
 211        INIT_LIST_HEAD(&entry->log_list);
 212        INIT_LIST_HEAD(&entry->root_extent_list);
 213        INIT_LIST_HEAD(&entry->work_list);
 214        init_completion(&entry->completion);
 215
 216        trace_btrfs_ordered_extent_add(inode, entry);
 217
 218        spin_lock_irq(&tree->lock);
 219        node = tree_insert(&tree->tree, file_offset,
 220                           &entry->rb_node);
 221        if (node)
 222                btrfs_panic(fs_info, -EEXIST,
 223                                "inconsistency in ordered tree at offset %llu",
 224                                file_offset);
 225        spin_unlock_irq(&tree->lock);
 226
 227        spin_lock(&root->ordered_extent_lock);
 228        list_add_tail(&entry->root_extent_list,
 229                      &root->ordered_extents);
 230        root->nr_ordered_extents++;
 231        if (root->nr_ordered_extents == 1) {
 232                spin_lock(&fs_info->ordered_root_lock);
 233                BUG_ON(!list_empty(&root->ordered_root));
 234                list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
 235                spin_unlock(&fs_info->ordered_root_lock);
 236        }
 237        spin_unlock(&root->ordered_extent_lock);
 238
 239        /*
 240         * We don't need the count_max_extents here, we can assume that all of
 241         * that work has been done at higher layers, so this is truly the
 242         * smallest the extent is going to get.
 243         */
 244        spin_lock(&inode->lock);
 245        btrfs_mod_outstanding_extents(inode, 1);
 246        spin_unlock(&inode->lock);
 247
 248        return 0;
 249}
 250
 251int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
 252                             u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
 253                             int type)
 254{
 255        ASSERT(type == BTRFS_ORDERED_REGULAR ||
 256               type == BTRFS_ORDERED_NOCOW ||
 257               type == BTRFS_ORDERED_PREALLOC);
 258        return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
 259                                          num_bytes, disk_num_bytes, type, 0,
 260                                          BTRFS_COMPRESS_NONE);
 261}
 262
 263int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
 264                                 u64 disk_bytenr, u64 num_bytes,
 265                                 u64 disk_num_bytes, int type)
 266{
 267        ASSERT(type == BTRFS_ORDERED_REGULAR ||
 268               type == BTRFS_ORDERED_NOCOW ||
 269               type == BTRFS_ORDERED_PREALLOC);
 270        return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
 271                                          num_bytes, disk_num_bytes, type, 1,
 272                                          BTRFS_COMPRESS_NONE);
 273}
 274
 275int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
 276                                      u64 disk_bytenr, u64 num_bytes,
 277                                      u64 disk_num_bytes, int compress_type)
 278{
 279        ASSERT(compress_type != BTRFS_COMPRESS_NONE);
 280        return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
 281                                          num_bytes, disk_num_bytes,
 282                                          BTRFS_ORDERED_COMPRESSED, 0,
 283                                          compress_type);
 284}
 285
 286/*
 287 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
 288 * when an ordered extent is finished.  If the list covers more than one
 289 * ordered extent, it is split across multiples.
 290 */
 291void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
 292                           struct btrfs_ordered_sum *sum)
 293{
 294        struct btrfs_ordered_inode_tree *tree;
 295
 296        tree = &BTRFS_I(entry->inode)->ordered_tree;
 297        spin_lock_irq(&tree->lock);
 298        list_add_tail(&sum->list, &entry->list);
 299        spin_unlock_irq(&tree->lock);
 300}
 301
 302/*
 303 * Finish IO for one ordered extent across a given range.  The range can
 304 * contain several ordered extents.
 305 *
 306 * @found_ret:   Return the finished ordered extent
 307 * @file_offset: File offset for the finished IO
 308 *               Will also be updated to one byte past the range that is
 309 *               recordered as finished. This allows caller to walk forward.
 310 * @io_size:     Length of the finish IO range
 311 * @uptodate:    If the IO finished without problem
 312 *
 313 * Return true if any ordered extent is finished in the range, and update
 314 * @found_ret and @file_offset.
 315 * Return false otherwise.
 316 *
 317 * NOTE: Although The range can cross multiple ordered extents, only one
 318 * ordered extent will be updated during one call. The caller is responsible to
 319 * iterate all ordered extents in the range.
 320 */
 321bool btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
 322                                   struct btrfs_ordered_extent **finished_ret,
 323                                   u64 *file_offset, u64 io_size, int uptodate)
 324{
 325        struct btrfs_fs_info *fs_info = inode->root->fs_info;
 326        struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
 327        struct rb_node *node;
 328        struct btrfs_ordered_extent *entry = NULL;
 329        bool finished = false;
 330        unsigned long flags;
 331        u64 dec_end;
 332        u64 dec_start;
 333        u64 to_dec;
 334
 335        spin_lock_irqsave(&tree->lock, flags);
 336        node = tree_search(tree, *file_offset);
 337        if (!node)
 338                goto out;
 339
 340        entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 341        if (!in_range(*file_offset, entry->file_offset, entry->num_bytes))
 342                goto out;
 343
 344        dec_start = max(*file_offset, entry->file_offset);
 345        dec_end = min(*file_offset + io_size,
 346                      entry->file_offset + entry->num_bytes);
 347        *file_offset = dec_end;
 348        if (dec_start > dec_end) {
 349                btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
 350                           dec_start, dec_end);
 351        }
 352        to_dec = dec_end - dec_start;
 353        if (to_dec > entry->bytes_left) {
 354                btrfs_crit(fs_info,
 355                           "bad ordered accounting left %llu size %llu",
 356                           entry->bytes_left, to_dec);
 357        }
 358        entry->bytes_left -= to_dec;
 359        if (!uptodate)
 360                set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
 361
 362        if (entry->bytes_left == 0) {
 363                /*
 364                 * Ensure only one caller can set the flag and finished_ret
 365                 * accordingly
 366                 */
 367                finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 368                /* test_and_set_bit implies a barrier */
 369                cond_wake_up_nomb(&entry->wait);
 370        }
 371out:
 372        if (finished && finished_ret && entry) {
 373                *finished_ret = entry;
 374                refcount_inc(&entry->refs);
 375        }
 376        spin_unlock_irqrestore(&tree->lock, flags);
 377        return finished;
 378}
 379
 380/*
 381 * Finish IO for one ordered extent across a given range.  The range can only
 382 * contain one ordered extent.
 383 *
 384 * @cached:      The cached ordered extent. If not NULL, we can skip the tree
 385 *               search and use the ordered extent directly.
 386 *               Will be also used to store the finished ordered extent.
 387 * @file_offset: File offset for the finished IO
 388 * @io_size:     Length of the finish IO range
 389 * @uptodate:    If the IO finishes without problem
 390 *
 391 * Return true if the ordered extent is finished in the range, and update
 392 * @cached.
 393 * Return false otherwise.
 394 *
 395 * NOTE: The range can NOT cross multiple ordered extents.
 396 * Thus caller should ensure the range doesn't cross ordered extents.
 397 */
 398bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
 399                                    struct btrfs_ordered_extent **cached,
 400                                    u64 file_offset, u64 io_size, int uptodate)
 401{
 402        struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
 403        struct rb_node *node;
 404        struct btrfs_ordered_extent *entry = NULL;
 405        unsigned long flags;
 406        bool finished = false;
 407
 408        spin_lock_irqsave(&tree->lock, flags);
 409        if (cached && *cached) {
 410                entry = *cached;
 411                goto have_entry;
 412        }
 413
 414        node = tree_search(tree, file_offset);
 415        if (!node)
 416                goto out;
 417
 418        entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 419have_entry:
 420        if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
 421                goto out;
 422
 423        if (io_size > entry->bytes_left)
 424                btrfs_crit(inode->root->fs_info,
 425                           "bad ordered accounting left %llu size %llu",
 426                       entry->bytes_left, io_size);
 427
 428        entry->bytes_left -= io_size;
 429        if (!uptodate)
 430                set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
 431
 432        if (entry->bytes_left == 0) {
 433                /*
 434                 * Ensure only one caller can set the flag and finished_ret
 435                 * accordingly
 436                 */
 437                finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 438                /* test_and_set_bit implies a barrier */
 439                cond_wake_up_nomb(&entry->wait);
 440        }
 441out:
 442        if (finished && cached && entry) {
 443                *cached = entry;
 444                refcount_inc(&entry->refs);
 445        }
 446        spin_unlock_irqrestore(&tree->lock, flags);
 447        return finished;
 448}
 449
 450/*
 451 * used to drop a reference on an ordered extent.  This will free
 452 * the extent if the last reference is dropped
 453 */
 454void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 455{
 456        struct list_head *cur;
 457        struct btrfs_ordered_sum *sum;
 458
 459        trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
 460
 461        if (refcount_dec_and_test(&entry->refs)) {
 462                ASSERT(list_empty(&entry->root_extent_list));
 463                ASSERT(list_empty(&entry->log_list));
 464                ASSERT(RB_EMPTY_NODE(&entry->rb_node));
 465                if (entry->inode)
 466                        btrfs_add_delayed_iput(entry->inode);
 467                while (!list_empty(&entry->list)) {
 468                        cur = entry->list.next;
 469                        sum = list_entry(cur, struct btrfs_ordered_sum, list);
 470                        list_del(&sum->list);
 471                        kvfree(sum);
 472                }
 473                kmem_cache_free(btrfs_ordered_extent_cache, entry);
 474        }
 475}
 476
 477/*
 478 * remove an ordered extent from the tree.  No references are dropped
 479 * and waiters are woken up.
 480 */
 481void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
 482                                 struct btrfs_ordered_extent *entry)
 483{
 484        struct btrfs_ordered_inode_tree *tree;
 485        struct btrfs_root *root = btrfs_inode->root;
 486        struct btrfs_fs_info *fs_info = root->fs_info;
 487        struct rb_node *node;
 488        bool pending;
 489
 490        /* This is paired with btrfs_add_ordered_extent. */
 491        spin_lock(&btrfs_inode->lock);
 492        btrfs_mod_outstanding_extents(btrfs_inode, -1);
 493        spin_unlock(&btrfs_inode->lock);
 494        if (root != fs_info->tree_root)
 495                btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
 496                                                false);
 497
 498        percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
 499                                 fs_info->delalloc_batch);
 500
 501        tree = &btrfs_inode->ordered_tree;
 502        spin_lock_irq(&tree->lock);
 503        node = &entry->rb_node;
 504        rb_erase(node, &tree->tree);
 505        RB_CLEAR_NODE(node);
 506        if (tree->last == node)
 507                tree->last = NULL;
 508        set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
 509        pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
 510        spin_unlock_irq(&tree->lock);
 511
 512        /*
 513         * The current running transaction is waiting on us, we need to let it
 514         * know that we're complete and wake it up.
 515         */
 516        if (pending) {
 517                struct btrfs_transaction *trans;
 518
 519                /*
 520                 * The checks for trans are just a formality, it should be set,
 521                 * but if it isn't we don't want to deref/assert under the spin
 522                 * lock, so be nice and check if trans is set, but ASSERT() so
 523                 * if it isn't set a developer will notice.
 524                 */
 525                spin_lock(&fs_info->trans_lock);
 526                trans = fs_info->running_transaction;
 527                if (trans)
 528                        refcount_inc(&trans->use_count);
 529                spin_unlock(&fs_info->trans_lock);
 530
 531                ASSERT(trans);
 532                if (trans) {
 533                        if (atomic_dec_and_test(&trans->pending_ordered))
 534                                wake_up(&trans->pending_wait);
 535                        btrfs_put_transaction(trans);
 536                }
 537        }
 538
 539        spin_lock(&root->ordered_extent_lock);
 540        list_del_init(&entry->root_extent_list);
 541        root->nr_ordered_extents--;
 542
 543        trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
 544
 545        if (!root->nr_ordered_extents) {
 546                spin_lock(&fs_info->ordered_root_lock);
 547                BUG_ON(list_empty(&root->ordered_root));
 548                list_del_init(&root->ordered_root);
 549                spin_unlock(&fs_info->ordered_root_lock);
 550        }
 551        spin_unlock(&root->ordered_extent_lock);
 552        wake_up(&entry->wait);
 553}
 554
 555static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
 556{
 557        struct btrfs_ordered_extent *ordered;
 558
 559        ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
 560        btrfs_start_ordered_extent(ordered, 1);
 561        complete(&ordered->completion);
 562}
 563
 564/*
 565 * wait for all the ordered extents in a root.  This is done when balancing
 566 * space between drives.
 567 */
 568u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
 569                               const u64 range_start, const u64 range_len)
 570{
 571        struct btrfs_fs_info *fs_info = root->fs_info;
 572        LIST_HEAD(splice);
 573        LIST_HEAD(skipped);
 574        LIST_HEAD(works);
 575        struct btrfs_ordered_extent *ordered, *next;
 576        u64 count = 0;
 577        const u64 range_end = range_start + range_len;
 578
 579        mutex_lock(&root->ordered_extent_mutex);
 580        spin_lock(&root->ordered_extent_lock);
 581        list_splice_init(&root->ordered_extents, &splice);
 582        while (!list_empty(&splice) && nr) {
 583                ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
 584                                           root_extent_list);
 585
 586                if (range_end <= ordered->disk_bytenr ||
 587                    ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
 588                        list_move_tail(&ordered->root_extent_list, &skipped);
 589                        cond_resched_lock(&root->ordered_extent_lock);
 590                        continue;
 591                }
 592
 593                list_move_tail(&ordered->root_extent_list,
 594                               &root->ordered_extents);
 595                refcount_inc(&ordered->refs);
 596                spin_unlock(&root->ordered_extent_lock);
 597
 598                btrfs_init_work(&ordered->flush_work,
 599                                btrfs_run_ordered_extent_work, NULL, NULL);
 600                list_add_tail(&ordered->work_list, &works);
 601                btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
 602
 603                cond_resched();
 604                spin_lock(&root->ordered_extent_lock);
 605                if (nr != U64_MAX)
 606                        nr--;
 607                count++;
 608        }
 609        list_splice_tail(&skipped, &root->ordered_extents);
 610        list_splice_tail(&splice, &root->ordered_extents);
 611        spin_unlock(&root->ordered_extent_lock);
 612
 613        list_for_each_entry_safe(ordered, next, &works, work_list) {
 614                list_del_init(&ordered->work_list);
 615                wait_for_completion(&ordered->completion);
 616                btrfs_put_ordered_extent(ordered);
 617                cond_resched();
 618        }
 619        mutex_unlock(&root->ordered_extent_mutex);
 620
 621        return count;
 622}
 623
 624void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
 625                             const u64 range_start, const u64 range_len)
 626{
 627        struct btrfs_root *root;
 628        struct list_head splice;
 629        u64 done;
 630
 631        INIT_LIST_HEAD(&splice);
 632
 633        mutex_lock(&fs_info->ordered_operations_mutex);
 634        spin_lock(&fs_info->ordered_root_lock);
 635        list_splice_init(&fs_info->ordered_roots, &splice);
 636        while (!list_empty(&splice) && nr) {
 637                root = list_first_entry(&splice, struct btrfs_root,
 638                                        ordered_root);
 639                root = btrfs_grab_root(root);
 640                BUG_ON(!root);
 641                list_move_tail(&root->ordered_root,
 642                               &fs_info->ordered_roots);
 643                spin_unlock(&fs_info->ordered_root_lock);
 644
 645                done = btrfs_wait_ordered_extents(root, nr,
 646                                                  range_start, range_len);
 647                btrfs_put_root(root);
 648
 649                spin_lock(&fs_info->ordered_root_lock);
 650                if (nr != U64_MAX) {
 651                        nr -= done;
 652                }
 653        }
 654        list_splice_tail(&splice, &fs_info->ordered_roots);
 655        spin_unlock(&fs_info->ordered_root_lock);
 656        mutex_unlock(&fs_info->ordered_operations_mutex);
 657}
 658
 659/*
 660 * Used to start IO or wait for a given ordered extent to finish.
 661 *
 662 * If wait is one, this effectively waits on page writeback for all the pages
 663 * in the extent, and it waits on the io completion code to insert
 664 * metadata into the btree corresponding to the extent
 665 */
 666void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
 667{
 668        u64 start = entry->file_offset;
 669        u64 end = start + entry->num_bytes - 1;
 670        struct btrfs_inode *inode = BTRFS_I(entry->inode);
 671
 672        trace_btrfs_ordered_extent_start(inode, entry);
 673
 674        /*
 675         * pages in the range can be dirty, clean or writeback.  We
 676         * start IO on any dirty ones so the wait doesn't stall waiting
 677         * for the flusher thread to find them
 678         */
 679        if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
 680                filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
 681        if (wait) {
 682                wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
 683                                                 &entry->flags));
 684        }
 685}
 686
 687/*
 688 * Used to wait on ordered extents across a large range of bytes.
 689 */
 690int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
 691{
 692        int ret = 0;
 693        int ret_wb = 0;
 694        u64 end;
 695        u64 orig_end;
 696        struct btrfs_ordered_extent *ordered;
 697
 698        if (start + len < start) {
 699                orig_end = INT_LIMIT(loff_t);
 700        } else {
 701                orig_end = start + len - 1;
 702                if (orig_end > INT_LIMIT(loff_t))
 703                        orig_end = INT_LIMIT(loff_t);
 704        }
 705
 706        /* start IO across the range first to instantiate any delalloc
 707         * extents
 708         */
 709        ret = btrfs_fdatawrite_range(inode, start, orig_end);
 710        if (ret)
 711                return ret;
 712
 713        /*
 714         * If we have a writeback error don't return immediately. Wait first
 715         * for any ordered extents that haven't completed yet. This is to make
 716         * sure no one can dirty the same page ranges and call writepages()
 717         * before the ordered extents complete - to avoid failures (-EEXIST)
 718         * when adding the new ordered extents to the ordered tree.
 719         */
 720        ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 721
 722        end = orig_end;
 723        while (1) {
 724                ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
 725                if (!ordered)
 726                        break;
 727                if (ordered->file_offset > orig_end) {
 728                        btrfs_put_ordered_extent(ordered);
 729                        break;
 730                }
 731                if (ordered->file_offset + ordered->num_bytes <= start) {
 732                        btrfs_put_ordered_extent(ordered);
 733                        break;
 734                }
 735                btrfs_start_ordered_extent(ordered, 1);
 736                end = ordered->file_offset;
 737                /*
 738                 * If the ordered extent had an error save the error but don't
 739                 * exit without waiting first for all other ordered extents in
 740                 * the range to complete.
 741                 */
 742                if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
 743                        ret = -EIO;
 744                btrfs_put_ordered_extent(ordered);
 745                if (end == 0 || end == start)
 746                        break;
 747                end--;
 748        }
 749        return ret_wb ? ret_wb : ret;
 750}
 751
 752/*
 753 * find an ordered extent corresponding to file_offset.  return NULL if
 754 * nothing is found, otherwise take a reference on the extent and return it
 755 */
 756struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
 757                                                         u64 file_offset)
 758{
 759        struct btrfs_ordered_inode_tree *tree;
 760        struct rb_node *node;
 761        struct btrfs_ordered_extent *entry = NULL;
 762        unsigned long flags;
 763
 764        tree = &inode->ordered_tree;
 765        spin_lock_irqsave(&tree->lock, flags);
 766        node = tree_search(tree, file_offset);
 767        if (!node)
 768                goto out;
 769
 770        entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 771        if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
 772                entry = NULL;
 773        if (entry)
 774                refcount_inc(&entry->refs);
 775out:
 776        spin_unlock_irqrestore(&tree->lock, flags);
 777        return entry;
 778}
 779
 780/* Since the DIO code tries to lock a wide area we need to look for any ordered
 781 * extents that exist in the range, rather than just the start of the range.
 782 */
 783struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
 784                struct btrfs_inode *inode, u64 file_offset, u64 len)
 785{
 786        struct btrfs_ordered_inode_tree *tree;
 787        struct rb_node *node;
 788        struct btrfs_ordered_extent *entry = NULL;
 789
 790        tree = &inode->ordered_tree;
 791        spin_lock_irq(&tree->lock);
 792        node = tree_search(tree, file_offset);
 793        if (!node) {
 794                node = tree_search(tree, file_offset + len);
 795                if (!node)
 796                        goto out;
 797        }
 798
 799        while (1) {
 800                entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 801                if (range_overlaps(entry, file_offset, len))
 802                        break;
 803
 804                if (entry->file_offset >= file_offset + len) {
 805                        entry = NULL;
 806                        break;
 807                }
 808                entry = NULL;
 809                node = rb_next(node);
 810                if (!node)
 811                        break;
 812        }
 813out:
 814        if (entry)
 815                refcount_inc(&entry->refs);
 816        spin_unlock_irq(&tree->lock);
 817        return entry;
 818}
 819
 820/*
 821 * Adds all ordered extents to the given list. The list ends up sorted by the
 822 * file_offset of the ordered extents.
 823 */
 824void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
 825                                           struct list_head *list)
 826{
 827        struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
 828        struct rb_node *n;
 829
 830        ASSERT(inode_is_locked(&inode->vfs_inode));
 831
 832        spin_lock_irq(&tree->lock);
 833        for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
 834                struct btrfs_ordered_extent *ordered;
 835
 836                ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
 837
 838                if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
 839                        continue;
 840
 841                ASSERT(list_empty(&ordered->log_list));
 842                list_add_tail(&ordered->log_list, list);
 843                refcount_inc(&ordered->refs);
 844        }
 845        spin_unlock_irq(&tree->lock);
 846}
 847
 848/*
 849 * lookup and return any extent before 'file_offset'.  NULL is returned
 850 * if none is found
 851 */
 852struct btrfs_ordered_extent *
 853btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
 854{
 855        struct btrfs_ordered_inode_tree *tree;
 856        struct rb_node *node;
 857        struct btrfs_ordered_extent *entry = NULL;
 858
 859        tree = &inode->ordered_tree;
 860        spin_lock_irq(&tree->lock);
 861        node = tree_search(tree, file_offset);
 862        if (!node)
 863                goto out;
 864
 865        entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 866        refcount_inc(&entry->refs);
 867out:
 868        spin_unlock_irq(&tree->lock);
 869        return entry;
 870}
 871
 872/*
 873 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
 874 * ordered extents in it are run to completion.
 875 *
 876 * @inode:        Inode whose ordered tree is to be searched
 877 * @start:        Beginning of range to flush
 878 * @end:          Last byte of range to lock
 879 * @cached_state: If passed, will return the extent state responsible for the
 880 * locked range. It's the caller's responsibility to free the cached state.
 881 *
 882 * This function always returns with the given range locked, ensuring after it's
 883 * called no order extent can be pending.
 884 */
 885void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
 886                                        u64 end,
 887                                        struct extent_state **cached_state)
 888{
 889        struct btrfs_ordered_extent *ordered;
 890        struct extent_state *cache = NULL;
 891        struct extent_state **cachedp = &cache;
 892
 893        if (cached_state)
 894                cachedp = cached_state;
 895
 896        while (1) {
 897                lock_extent_bits(&inode->io_tree, start, end, cachedp);
 898                ordered = btrfs_lookup_ordered_range(inode, start,
 899                                                     end - start + 1);
 900                if (!ordered) {
 901                        /*
 902                         * If no external cached_state has been passed then
 903                         * decrement the extra ref taken for cachedp since we
 904                         * aren't exposing it outside of this function
 905                         */
 906                        if (!cached_state)
 907                                refcount_dec(&cache->refs);
 908                        break;
 909                }
 910                unlock_extent_cached(&inode->io_tree, start, end, cachedp);
 911                btrfs_start_ordered_extent(ordered, 1);
 912                btrfs_put_ordered_extent(ordered);
 913        }
 914}
 915
 916static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
 917                                u64 len)
 918{
 919        struct inode *inode = ordered->inode;
 920        u64 file_offset = ordered->file_offset + pos;
 921        u64 disk_bytenr = ordered->disk_bytenr + pos;
 922        u64 num_bytes = len;
 923        u64 disk_num_bytes = len;
 924        int type;
 925        unsigned long flags_masked = ordered->flags & ~(1 << BTRFS_ORDERED_DIRECT);
 926        int compress_type = ordered->compress_type;
 927        unsigned long weight;
 928        int ret;
 929
 930        weight = hweight_long(flags_masked);
 931        WARN_ON_ONCE(weight > 1);
 932        if (!weight)
 933                type = 0;
 934        else
 935                type = __ffs(flags_masked);
 936
 937        if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered->flags)) {
 938                WARN_ON_ONCE(1);
 939                ret = btrfs_add_ordered_extent_compress(BTRFS_I(inode),
 940                                file_offset, disk_bytenr, num_bytes,
 941                                disk_num_bytes, compress_type);
 942        } else if (test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
 943                ret = btrfs_add_ordered_extent_dio(BTRFS_I(inode), file_offset,
 944                                disk_bytenr, num_bytes, disk_num_bytes, type);
 945        } else {
 946                ret = btrfs_add_ordered_extent(BTRFS_I(inode), file_offset,
 947                                disk_bytenr, num_bytes, disk_num_bytes, type);
 948        }
 949
 950        return ret;
 951}
 952
 953int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
 954                                u64 post)
 955{
 956        struct inode *inode = ordered->inode;
 957        struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
 958        struct rb_node *node;
 959        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 960        int ret = 0;
 961
 962        spin_lock_irq(&tree->lock);
 963        /* Remove from tree once */
 964        node = &ordered->rb_node;
 965        rb_erase(node, &tree->tree);
 966        RB_CLEAR_NODE(node);
 967        if (tree->last == node)
 968                tree->last = NULL;
 969
 970        ordered->file_offset += pre;
 971        ordered->disk_bytenr += pre;
 972        ordered->num_bytes -= (pre + post);
 973        ordered->disk_num_bytes -= (pre + post);
 974        ordered->bytes_left -= (pre + post);
 975
 976        /* Re-insert the node */
 977        node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
 978        if (node)
 979                btrfs_panic(fs_info, -EEXIST,
 980                        "zoned: inconsistency in ordered tree at offset %llu",
 981                            ordered->file_offset);
 982
 983        spin_unlock_irq(&tree->lock);
 984
 985        if (pre)
 986                ret = clone_ordered_extent(ordered, 0, pre);
 987        if (ret == 0 && post)
 988                ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
 989                                           post);
 990
 991        return ret;
 992}
 993
 994int __init ordered_data_init(void)
 995{
 996        btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
 997                                     sizeof(struct btrfs_ordered_extent), 0,
 998                                     SLAB_MEM_SPREAD,
 999                                     NULL);
1000        if (!btrfs_ordered_extent_cache)
1001                return -ENOMEM;
1002
1003        return 0;
1004}
1005
1006void __cold ordered_data_exit(void)
1007{
1008        kmem_cache_destroy(btrfs_ordered_extent_cache);
1009}
1010