linux/fs/btrfs/transaction.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/slab.h>
   8#include <linux/sched.h>
   9#include <linux/writeback.h>
  10#include <linux/pagemap.h>
  11#include <linux/blkdev.h>
  12#include <linux/uuid.h>
  13#include "misc.h"
  14#include "ctree.h"
  15#include "disk-io.h"
  16#include "transaction.h"
  17#include "locking.h"
  18#include "tree-log.h"
  19#include "inode-map.h"
  20#include "volumes.h"
  21#include "dev-replace.h"
  22#include "qgroup.h"
  23#include "block-group.h"
  24#include "space-info.h"
  25
  26#define BTRFS_ROOT_TRANS_TAG 0
  27
  28/*
  29 * Transaction states and transitions
  30 *
  31 * No running transaction (fs tree blocks are not modified)
  32 * |
  33 * | To next stage:
  34 * |  Call start_transaction() variants. Except btrfs_join_transaction_nostart().
  35 * V
  36 * Transaction N [[TRANS_STATE_RUNNING]]
  37 * |
  38 * | New trans handles can be attached to transaction N by calling all
  39 * | start_transaction() variants.
  40 * |
  41 * | To next stage:
  42 * |  Call btrfs_commit_transaction() on any trans handle attached to
  43 * |  transaction N
  44 * V
  45 * Transaction N [[TRANS_STATE_COMMIT_START]]
  46 * |
  47 * | Will wait for previous running transaction to completely finish if there
  48 * | is one
  49 * |
  50 * | Then one of the following happes:
  51 * | - Wait for all other trans handle holders to release.
  52 * |   The btrfs_commit_transaction() caller will do the commit work.
  53 * | - Wait for current transaction to be committed by others.
  54 * |   Other btrfs_commit_transaction() caller will do the commit work.
  55 * |
  56 * | At this stage, only btrfs_join_transaction*() variants can attach
  57 * | to this running transaction.
  58 * | All other variants will wait for current one to finish and attach to
  59 * | transaction N+1.
  60 * |
  61 * | To next stage:
  62 * |  Caller is chosen to commit transaction N, and all other trans handle
  63 * |  haven been released.
  64 * V
  65 * Transaction N [[TRANS_STATE_COMMIT_DOING]]
  66 * |
  67 * | The heavy lifting transaction work is started.
  68 * | From running delayed refs (modifying extent tree) to creating pending
  69 * | snapshots, running qgroups.
  70 * | In short, modify supporting trees to reflect modifications of subvolume
  71 * | trees.
  72 * |
  73 * | At this stage, all start_transaction() calls will wait for this
  74 * | transaction to finish and attach to transaction N+1.
  75 * |
  76 * | To next stage:
  77 * |  Until all supporting trees are updated.
  78 * V
  79 * Transaction N [[TRANS_STATE_UNBLOCKED]]
  80 * |                                                Transaction N+1
  81 * | All needed trees are modified, thus we only    [[TRANS_STATE_RUNNING]]
  82 * | need to write them back to disk and update     |
  83 * | super blocks.                                  |
  84 * |                                                |
  85 * | At this stage, new transaction is allowed to   |
  86 * | start.                                         |
  87 * | All new start_transaction() calls will be      |
  88 * | attached to transid N+1.                       |
  89 * |                                                |
  90 * | To next stage:                                 |
  91 * |  Until all tree blocks are super blocks are    |
  92 * |  written to block devices                      |
  93 * V                                                |
  94 * Transaction N [[TRANS_STATE_COMPLETED]]          V
  95 *   All tree blocks and super blocks are written.  Transaction N+1
  96 *   This transaction is finished and all its       [[TRANS_STATE_COMMIT_START]]
  97 *   data structures will be cleaned up.            | Life goes on
  98 */
  99static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
 100        [TRANS_STATE_RUNNING]           = 0U,
 101        [TRANS_STATE_COMMIT_START]      = (__TRANS_START | __TRANS_ATTACH),
 102        [TRANS_STATE_COMMIT_DOING]      = (__TRANS_START |
 103                                           __TRANS_ATTACH |
 104                                           __TRANS_JOIN |
 105                                           __TRANS_JOIN_NOSTART),
 106        [TRANS_STATE_UNBLOCKED]         = (__TRANS_START |
 107                                           __TRANS_ATTACH |
 108                                           __TRANS_JOIN |
 109                                           __TRANS_JOIN_NOLOCK |
 110                                           __TRANS_JOIN_NOSTART),
 111        [TRANS_STATE_COMPLETED]         = (__TRANS_START |
 112                                           __TRANS_ATTACH |
 113                                           __TRANS_JOIN |
 114                                           __TRANS_JOIN_NOLOCK |
 115                                           __TRANS_JOIN_NOSTART),
 116};
 117
 118void btrfs_put_transaction(struct btrfs_transaction *transaction)
 119{
 120        WARN_ON(refcount_read(&transaction->use_count) == 0);
 121        if (refcount_dec_and_test(&transaction->use_count)) {
 122                BUG_ON(!list_empty(&transaction->list));
 123                WARN_ON(!RB_EMPTY_ROOT(
 124                                &transaction->delayed_refs.href_root.rb_root));
 125                WARN_ON(!RB_EMPTY_ROOT(
 126                                &transaction->delayed_refs.dirty_extent_root));
 127                if (transaction->delayed_refs.pending_csums)
 128                        btrfs_err(transaction->fs_info,
 129                                  "pending csums is %llu",
 130                                  transaction->delayed_refs.pending_csums);
 131                /*
 132                 * If any block groups are found in ->deleted_bgs then it's
 133                 * because the transaction was aborted and a commit did not
 134                 * happen (things failed before writing the new superblock
 135                 * and calling btrfs_finish_extent_commit()), so we can not
 136                 * discard the physical locations of the block groups.
 137                 */
 138                while (!list_empty(&transaction->deleted_bgs)) {
 139                        struct btrfs_block_group *cache;
 140
 141                        cache = list_first_entry(&transaction->deleted_bgs,
 142                                                 struct btrfs_block_group,
 143                                                 bg_list);
 144                        list_del_init(&cache->bg_list);
 145                        btrfs_unfreeze_block_group(cache);
 146                        btrfs_put_block_group(cache);
 147                }
 148                WARN_ON(!list_empty(&transaction->dev_update_list));
 149                kfree(transaction);
 150        }
 151}
 152
 153static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
 154{
 155        struct btrfs_transaction *cur_trans = trans->transaction;
 156        struct btrfs_fs_info *fs_info = trans->fs_info;
 157        struct btrfs_root *root, *tmp;
 158
 159        down_write(&fs_info->commit_root_sem);
 160        list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits,
 161                                 dirty_list) {
 162                list_del_init(&root->dirty_list);
 163                free_extent_buffer(root->commit_root);
 164                root->commit_root = btrfs_root_node(root);
 165                if (is_fstree(root->root_key.objectid))
 166                        btrfs_unpin_free_ino(root);
 167                extent_io_tree_release(&root->dirty_log_pages);
 168                btrfs_qgroup_clean_swapped_blocks(root);
 169        }
 170
 171        /* We can free old roots now. */
 172        spin_lock(&cur_trans->dropped_roots_lock);
 173        while (!list_empty(&cur_trans->dropped_roots)) {
 174                root = list_first_entry(&cur_trans->dropped_roots,
 175                                        struct btrfs_root, root_list);
 176                list_del_init(&root->root_list);
 177                spin_unlock(&cur_trans->dropped_roots_lock);
 178                btrfs_free_log(trans, root);
 179                btrfs_drop_and_free_fs_root(fs_info, root);
 180                spin_lock(&cur_trans->dropped_roots_lock);
 181        }
 182        spin_unlock(&cur_trans->dropped_roots_lock);
 183        up_write(&fs_info->commit_root_sem);
 184}
 185
 186static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
 187                                         unsigned int type)
 188{
 189        if (type & TRANS_EXTWRITERS)
 190                atomic_inc(&trans->num_extwriters);
 191}
 192
 193static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
 194                                         unsigned int type)
 195{
 196        if (type & TRANS_EXTWRITERS)
 197                atomic_dec(&trans->num_extwriters);
 198}
 199
 200static inline void extwriter_counter_init(struct btrfs_transaction *trans,
 201                                          unsigned int type)
 202{
 203        atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
 204}
 205
 206static inline int extwriter_counter_read(struct btrfs_transaction *trans)
 207{
 208        return atomic_read(&trans->num_extwriters);
 209}
 210
 211/*
 212 * To be called after all the new block groups attached to the transaction
 213 * handle have been created (btrfs_create_pending_block_groups()).
 214 */
 215void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
 216{
 217        struct btrfs_fs_info *fs_info = trans->fs_info;
 218
 219        if (!trans->chunk_bytes_reserved)
 220                return;
 221
 222        WARN_ON_ONCE(!list_empty(&trans->new_bgs));
 223
 224        btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
 225                                trans->chunk_bytes_reserved, NULL);
 226        trans->chunk_bytes_reserved = 0;
 227}
 228
 229/*
 230 * either allocate a new transaction or hop into the existing one
 231 */
 232static noinline int join_transaction(struct btrfs_fs_info *fs_info,
 233                                     unsigned int type)
 234{
 235        struct btrfs_transaction *cur_trans;
 236
 237        spin_lock(&fs_info->trans_lock);
 238loop:
 239        /* The file system has been taken offline. No new transactions. */
 240        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
 241                spin_unlock(&fs_info->trans_lock);
 242                return -EROFS;
 243        }
 244
 245        cur_trans = fs_info->running_transaction;
 246        if (cur_trans) {
 247                if (TRANS_ABORTED(cur_trans)) {
 248                        spin_unlock(&fs_info->trans_lock);
 249                        return cur_trans->aborted;
 250                }
 251                if (btrfs_blocked_trans_types[cur_trans->state] & type) {
 252                        spin_unlock(&fs_info->trans_lock);
 253                        return -EBUSY;
 254                }
 255                refcount_inc(&cur_trans->use_count);
 256                atomic_inc(&cur_trans->num_writers);
 257                extwriter_counter_inc(cur_trans, type);
 258                spin_unlock(&fs_info->trans_lock);
 259                return 0;
 260        }
 261        spin_unlock(&fs_info->trans_lock);
 262
 263        /*
 264         * If we are ATTACH, we just want to catch the current transaction,
 265         * and commit it. If there is no transaction, just return ENOENT.
 266         */
 267        if (type == TRANS_ATTACH)
 268                return -ENOENT;
 269
 270        /*
 271         * JOIN_NOLOCK only happens during the transaction commit, so
 272         * it is impossible that ->running_transaction is NULL
 273         */
 274        BUG_ON(type == TRANS_JOIN_NOLOCK);
 275
 276        cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS);
 277        if (!cur_trans)
 278                return -ENOMEM;
 279
 280        spin_lock(&fs_info->trans_lock);
 281        if (fs_info->running_transaction) {
 282                /*
 283                 * someone started a transaction after we unlocked.  Make sure
 284                 * to redo the checks above
 285                 */
 286                kfree(cur_trans);
 287                goto loop;
 288        } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
 289                spin_unlock(&fs_info->trans_lock);
 290                kfree(cur_trans);
 291                return -EROFS;
 292        }
 293
 294        cur_trans->fs_info = fs_info;
 295        atomic_set(&cur_trans->num_writers, 1);
 296        extwriter_counter_init(cur_trans, type);
 297        init_waitqueue_head(&cur_trans->writer_wait);
 298        init_waitqueue_head(&cur_trans->commit_wait);
 299        cur_trans->state = TRANS_STATE_RUNNING;
 300        /*
 301         * One for this trans handle, one so it will live on until we
 302         * commit the transaction.
 303         */
 304        refcount_set(&cur_trans->use_count, 2);
 305        cur_trans->flags = 0;
 306        cur_trans->start_time = ktime_get_seconds();
 307
 308        memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
 309
 310        cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
 311        cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
 312        atomic_set(&cur_trans->delayed_refs.num_entries, 0);
 313
 314        /*
 315         * although the tree mod log is per file system and not per transaction,
 316         * the log must never go across transaction boundaries.
 317         */
 318        smp_mb();
 319        if (!list_empty(&fs_info->tree_mod_seq_list))
 320                WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
 321        if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
 322                WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
 323        atomic64_set(&fs_info->tree_mod_seq, 0);
 324
 325        spin_lock_init(&cur_trans->delayed_refs.lock);
 326
 327        INIT_LIST_HEAD(&cur_trans->pending_snapshots);
 328        INIT_LIST_HEAD(&cur_trans->dev_update_list);
 329        INIT_LIST_HEAD(&cur_trans->switch_commits);
 330        INIT_LIST_HEAD(&cur_trans->dirty_bgs);
 331        INIT_LIST_HEAD(&cur_trans->io_bgs);
 332        INIT_LIST_HEAD(&cur_trans->dropped_roots);
 333        mutex_init(&cur_trans->cache_write_mutex);
 334        spin_lock_init(&cur_trans->dirty_bgs_lock);
 335        INIT_LIST_HEAD(&cur_trans->deleted_bgs);
 336        spin_lock_init(&cur_trans->dropped_roots_lock);
 337        list_add_tail(&cur_trans->list, &fs_info->trans_list);
 338        extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
 339                        IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
 340        extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
 341                        IO_TREE_FS_PINNED_EXTENTS, NULL);
 342        fs_info->generation++;
 343        cur_trans->transid = fs_info->generation;
 344        fs_info->running_transaction = cur_trans;
 345        cur_trans->aborted = 0;
 346        spin_unlock(&fs_info->trans_lock);
 347
 348        return 0;
 349}
 350
 351/*
 352 * This does all the record keeping required to make sure that a shareable root
 353 * is properly recorded in a given transaction.  This is required to make sure
 354 * the old root from before we joined the transaction is deleted when the
 355 * transaction commits.
 356 */
 357static int record_root_in_trans(struct btrfs_trans_handle *trans,
 358                               struct btrfs_root *root,
 359                               int force)
 360{
 361        struct btrfs_fs_info *fs_info = root->fs_info;
 362
 363        if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
 364            root->last_trans < trans->transid) || force) {
 365                WARN_ON(root == fs_info->extent_root);
 366                WARN_ON(!force && root->commit_root != root->node);
 367
 368                /*
 369                 * see below for IN_TRANS_SETUP usage rules
 370                 * we have the reloc mutex held now, so there
 371                 * is only one writer in this function
 372                 */
 373                set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
 374
 375                /* make sure readers find IN_TRANS_SETUP before
 376                 * they find our root->last_trans update
 377                 */
 378                smp_wmb();
 379
 380                spin_lock(&fs_info->fs_roots_radix_lock);
 381                if (root->last_trans == trans->transid && !force) {
 382                        spin_unlock(&fs_info->fs_roots_radix_lock);
 383                        return 0;
 384                }
 385                radix_tree_tag_set(&fs_info->fs_roots_radix,
 386                                   (unsigned long)root->root_key.objectid,
 387                                   BTRFS_ROOT_TRANS_TAG);
 388                spin_unlock(&fs_info->fs_roots_radix_lock);
 389                root->last_trans = trans->transid;
 390
 391                /* this is pretty tricky.  We don't want to
 392                 * take the relocation lock in btrfs_record_root_in_trans
 393                 * unless we're really doing the first setup for this root in
 394                 * this transaction.
 395                 *
 396                 * Normally we'd use root->last_trans as a flag to decide
 397                 * if we want to take the expensive mutex.
 398                 *
 399                 * But, we have to set root->last_trans before we
 400                 * init the relocation root, otherwise, we trip over warnings
 401                 * in ctree.c.  The solution used here is to flag ourselves
 402                 * with root IN_TRANS_SETUP.  When this is 1, we're still
 403                 * fixing up the reloc trees and everyone must wait.
 404                 *
 405                 * When this is zero, they can trust root->last_trans and fly
 406                 * through btrfs_record_root_in_trans without having to take the
 407                 * lock.  smp_wmb() makes sure that all the writes above are
 408                 * done before we pop in the zero below
 409                 */
 410                btrfs_init_reloc_root(trans, root);
 411                smp_mb__before_atomic();
 412                clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
 413        }
 414        return 0;
 415}
 416
 417
 418void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
 419                            struct btrfs_root *root)
 420{
 421        struct btrfs_fs_info *fs_info = root->fs_info;
 422        struct btrfs_transaction *cur_trans = trans->transaction;
 423
 424        /* Add ourselves to the transaction dropped list */
 425        spin_lock(&cur_trans->dropped_roots_lock);
 426        list_add_tail(&root->root_list, &cur_trans->dropped_roots);
 427        spin_unlock(&cur_trans->dropped_roots_lock);
 428
 429        /* Make sure we don't try to update the root at commit time */
 430        spin_lock(&fs_info->fs_roots_radix_lock);
 431        radix_tree_tag_clear(&fs_info->fs_roots_radix,
 432                             (unsigned long)root->root_key.objectid,
 433                             BTRFS_ROOT_TRANS_TAG);
 434        spin_unlock(&fs_info->fs_roots_radix_lock);
 435}
 436
 437int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
 438                               struct btrfs_root *root)
 439{
 440        struct btrfs_fs_info *fs_info = root->fs_info;
 441
 442        if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
 443                return 0;
 444
 445        /*
 446         * see record_root_in_trans for comments about IN_TRANS_SETUP usage
 447         * and barriers
 448         */
 449        smp_rmb();
 450        if (root->last_trans == trans->transid &&
 451            !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
 452                return 0;
 453
 454        mutex_lock(&fs_info->reloc_mutex);
 455        record_root_in_trans(trans, root, 0);
 456        mutex_unlock(&fs_info->reloc_mutex);
 457
 458        return 0;
 459}
 460
 461static inline int is_transaction_blocked(struct btrfs_transaction *trans)
 462{
 463        return (trans->state >= TRANS_STATE_COMMIT_START &&
 464                trans->state < TRANS_STATE_UNBLOCKED &&
 465                !TRANS_ABORTED(trans));
 466}
 467
 468/* wait for commit against the current transaction to become unblocked
 469 * when this is done, it is safe to start a new transaction, but the current
 470 * transaction might not be fully on disk.
 471 */
 472static void wait_current_trans(struct btrfs_fs_info *fs_info)
 473{
 474        struct btrfs_transaction *cur_trans;
 475
 476        spin_lock(&fs_info->trans_lock);
 477        cur_trans = fs_info->running_transaction;
 478        if (cur_trans && is_transaction_blocked(cur_trans)) {
 479                refcount_inc(&cur_trans->use_count);
 480                spin_unlock(&fs_info->trans_lock);
 481
 482                wait_event(fs_info->transaction_wait,
 483                           cur_trans->state >= TRANS_STATE_UNBLOCKED ||
 484                           TRANS_ABORTED(cur_trans));
 485                btrfs_put_transaction(cur_trans);
 486        } else {
 487                spin_unlock(&fs_info->trans_lock);
 488        }
 489}
 490
 491static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
 492{
 493        if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
 494                return 0;
 495
 496        if (type == TRANS_START)
 497                return 1;
 498
 499        return 0;
 500}
 501
 502static inline bool need_reserve_reloc_root(struct btrfs_root *root)
 503{
 504        struct btrfs_fs_info *fs_info = root->fs_info;
 505
 506        if (!fs_info->reloc_ctl ||
 507            !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
 508            root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
 509            root->reloc_root)
 510                return false;
 511
 512        return true;
 513}
 514
 515static struct btrfs_trans_handle *
 516start_transaction(struct btrfs_root *root, unsigned int num_items,
 517                  unsigned int type, enum btrfs_reserve_flush_enum flush,
 518                  bool enforce_qgroups)
 519{
 520        struct btrfs_fs_info *fs_info = root->fs_info;
 521        struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
 522        struct btrfs_trans_handle *h;
 523        struct btrfs_transaction *cur_trans;
 524        u64 num_bytes = 0;
 525        u64 qgroup_reserved = 0;
 526        bool reloc_reserved = false;
 527        bool do_chunk_alloc = false;
 528        int ret;
 529
 530        /* Send isn't supposed to start transactions. */
 531        ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
 532
 533        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
 534                return ERR_PTR(-EROFS);
 535
 536        if (current->journal_info) {
 537                WARN_ON(type & TRANS_EXTWRITERS);
 538                h = current->journal_info;
 539                refcount_inc(&h->use_count);
 540                WARN_ON(refcount_read(&h->use_count) > 2);
 541                h->orig_rsv = h->block_rsv;
 542                h->block_rsv = NULL;
 543                goto got_it;
 544        }
 545
 546        /*
 547         * Do the reservation before we join the transaction so we can do all
 548         * the appropriate flushing if need be.
 549         */
 550        if (num_items && root != fs_info->chunk_root) {
 551                struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
 552                u64 delayed_refs_bytes = 0;
 553
 554                qgroup_reserved = num_items * fs_info->nodesize;
 555                ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
 556                                enforce_qgroups);
 557                if (ret)
 558                        return ERR_PTR(ret);
 559
 560                /*
 561                 * We want to reserve all the bytes we may need all at once, so
 562                 * we only do 1 enospc flushing cycle per transaction start.  We
 563                 * accomplish this by simply assuming we'll do 2 x num_items
 564                 * worth of delayed refs updates in this trans handle, and
 565                 * refill that amount for whatever is missing in the reserve.
 566                 */
 567                num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
 568                if (flush == BTRFS_RESERVE_FLUSH_ALL &&
 569                    delayed_refs_rsv->full == 0) {
 570                        delayed_refs_bytes = num_bytes;
 571                        num_bytes <<= 1;
 572                }
 573
 574                /*
 575                 * Do the reservation for the relocation root creation
 576                 */
 577                if (need_reserve_reloc_root(root)) {
 578                        num_bytes += fs_info->nodesize;
 579                        reloc_reserved = true;
 580                }
 581
 582                ret = btrfs_block_rsv_add(root, rsv, num_bytes, flush);
 583                if (ret)
 584                        goto reserve_fail;
 585                if (delayed_refs_bytes) {
 586                        btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
 587                                                          delayed_refs_bytes);
 588                        num_bytes -= delayed_refs_bytes;
 589                }
 590
 591                if (rsv->space_info->force_alloc)
 592                        do_chunk_alloc = true;
 593        } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
 594                   !delayed_refs_rsv->full) {
 595                /*
 596                 * Some people call with btrfs_start_transaction(root, 0)
 597                 * because they can be throttled, but have some other mechanism
 598                 * for reserving space.  We still want these guys to refill the
 599                 * delayed block_rsv so just add 1 items worth of reservation
 600                 * here.
 601                 */
 602                ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
 603                if (ret)
 604                        goto reserve_fail;
 605        }
 606again:
 607        h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
 608        if (!h) {
 609                ret = -ENOMEM;
 610                goto alloc_fail;
 611        }
 612
 613        /*
 614         * If we are JOIN_NOLOCK we're already committing a transaction and
 615         * waiting on this guy, so we don't need to do the sb_start_intwrite
 616         * because we're already holding a ref.  We need this because we could
 617         * have raced in and did an fsync() on a file which can kick a commit
 618         * and then we deadlock with somebody doing a freeze.
 619         *
 620         * If we are ATTACH, it means we just want to catch the current
 621         * transaction and commit it, so we needn't do sb_start_intwrite(). 
 622         */
 623        if (type & __TRANS_FREEZABLE)
 624                sb_start_intwrite(fs_info->sb);
 625
 626        if (may_wait_transaction(fs_info, type))
 627                wait_current_trans(fs_info);
 628
 629        do {
 630                ret = join_transaction(fs_info, type);
 631                if (ret == -EBUSY) {
 632                        wait_current_trans(fs_info);
 633                        if (unlikely(type == TRANS_ATTACH ||
 634                                     type == TRANS_JOIN_NOSTART))
 635                                ret = -ENOENT;
 636                }
 637        } while (ret == -EBUSY);
 638
 639        if (ret < 0)
 640                goto join_fail;
 641
 642        cur_trans = fs_info->running_transaction;
 643
 644        h->transid = cur_trans->transid;
 645        h->transaction = cur_trans;
 646        h->root = root;
 647        refcount_set(&h->use_count, 1);
 648        h->fs_info = root->fs_info;
 649
 650        h->type = type;
 651        h->can_flush_pending_bgs = true;
 652        INIT_LIST_HEAD(&h->new_bgs);
 653
 654        smp_mb();
 655        if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
 656            may_wait_transaction(fs_info, type)) {
 657                current->journal_info = h;
 658                btrfs_commit_transaction(h);
 659                goto again;
 660        }
 661
 662        if (num_bytes) {
 663                trace_btrfs_space_reservation(fs_info, "transaction",
 664                                              h->transid, num_bytes, 1);
 665                h->block_rsv = &fs_info->trans_block_rsv;
 666                h->bytes_reserved = num_bytes;
 667                h->reloc_reserved = reloc_reserved;
 668        }
 669
 670got_it:
 671        if (!current->journal_info)
 672                current->journal_info = h;
 673
 674        /*
 675         * If the space_info is marked ALLOC_FORCE then we'll get upgraded to
 676         * ALLOC_FORCE the first run through, and then we won't allocate for
 677         * anybody else who races in later.  We don't care about the return
 678         * value here.
 679         */
 680        if (do_chunk_alloc && num_bytes) {
 681                u64 flags = h->block_rsv->space_info->flags;
 682
 683                btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags),
 684                                  CHUNK_ALLOC_NO_FORCE);
 685        }
 686
 687        /*
 688         * btrfs_record_root_in_trans() needs to alloc new extents, and may
 689         * call btrfs_join_transaction() while we're also starting a
 690         * transaction.
 691         *
 692         * Thus it need to be called after current->journal_info initialized,
 693         * or we can deadlock.
 694         */
 695        btrfs_record_root_in_trans(h, root);
 696
 697        return h;
 698
 699join_fail:
 700        if (type & __TRANS_FREEZABLE)
 701                sb_end_intwrite(fs_info->sb);
 702        kmem_cache_free(btrfs_trans_handle_cachep, h);
 703alloc_fail:
 704        if (num_bytes)
 705                btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
 706                                        num_bytes, NULL);
 707reserve_fail:
 708        btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
 709        return ERR_PTR(ret);
 710}
 711
 712struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
 713                                                   unsigned int num_items)
 714{
 715        return start_transaction(root, num_items, TRANS_START,
 716                                 BTRFS_RESERVE_FLUSH_ALL, true);
 717}
 718
 719struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
 720                                        struct btrfs_root *root,
 721                                        unsigned int num_items)
 722{
 723        return start_transaction(root, num_items, TRANS_START,
 724                                 BTRFS_RESERVE_FLUSH_ALL_STEAL, false);
 725}
 726
 727struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
 728{
 729        return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
 730                                 true);
 731}
 732
 733struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root)
 734{
 735        return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
 736                                 BTRFS_RESERVE_NO_FLUSH, true);
 737}
 738
 739/*
 740 * Similar to regular join but it never starts a transaction when none is
 741 * running or after waiting for the current one to finish.
 742 */
 743struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
 744{
 745        return start_transaction(root, 0, TRANS_JOIN_NOSTART,
 746                                 BTRFS_RESERVE_NO_FLUSH, true);
 747}
 748
 749/*
 750 * btrfs_attach_transaction() - catch the running transaction
 751 *
 752 * It is used when we want to commit the current the transaction, but
 753 * don't want to start a new one.
 754 *
 755 * Note: If this function return -ENOENT, it just means there is no
 756 * running transaction. But it is possible that the inactive transaction
 757 * is still in the memory, not fully on disk. If you hope there is no
 758 * inactive transaction in the fs when -ENOENT is returned, you should
 759 * invoke
 760 *     btrfs_attach_transaction_barrier()
 761 */
 762struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
 763{
 764        return start_transaction(root, 0, TRANS_ATTACH,
 765                                 BTRFS_RESERVE_NO_FLUSH, true);
 766}
 767
 768/*
 769 * btrfs_attach_transaction_barrier() - catch the running transaction
 770 *
 771 * It is similar to the above function, the difference is this one
 772 * will wait for all the inactive transactions until they fully
 773 * complete.
 774 */
 775struct btrfs_trans_handle *
 776btrfs_attach_transaction_barrier(struct btrfs_root *root)
 777{
 778        struct btrfs_trans_handle *trans;
 779
 780        trans = start_transaction(root, 0, TRANS_ATTACH,
 781                                  BTRFS_RESERVE_NO_FLUSH, true);
 782        if (trans == ERR_PTR(-ENOENT))
 783                btrfs_wait_for_commit(root->fs_info, 0);
 784
 785        return trans;
 786}
 787
 788/* wait for a transaction commit to be fully complete */
 789static noinline void wait_for_commit(struct btrfs_transaction *commit)
 790{
 791        wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
 792}
 793
 794int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
 795{
 796        struct btrfs_transaction *cur_trans = NULL, *t;
 797        int ret = 0;
 798
 799        if (transid) {
 800                if (transid <= fs_info->last_trans_committed)
 801                        goto out;
 802
 803                /* find specified transaction */
 804                spin_lock(&fs_info->trans_lock);
 805                list_for_each_entry(t, &fs_info->trans_list, list) {
 806                        if (t->transid == transid) {
 807                                cur_trans = t;
 808                                refcount_inc(&cur_trans->use_count);
 809                                ret = 0;
 810                                break;
 811                        }
 812                        if (t->transid > transid) {
 813                                ret = 0;
 814                                break;
 815                        }
 816                }
 817                spin_unlock(&fs_info->trans_lock);
 818
 819                /*
 820                 * The specified transaction doesn't exist, or we
 821                 * raced with btrfs_commit_transaction
 822                 */
 823                if (!cur_trans) {
 824                        if (transid > fs_info->last_trans_committed)
 825                                ret = -EINVAL;
 826                        goto out;
 827                }
 828        } else {
 829                /* find newest transaction that is committing | committed */
 830                spin_lock(&fs_info->trans_lock);
 831                list_for_each_entry_reverse(t, &fs_info->trans_list,
 832                                            list) {
 833                        if (t->state >= TRANS_STATE_COMMIT_START) {
 834                                if (t->state == TRANS_STATE_COMPLETED)
 835                                        break;
 836                                cur_trans = t;
 837                                refcount_inc(&cur_trans->use_count);
 838                                break;
 839                        }
 840                }
 841                spin_unlock(&fs_info->trans_lock);
 842                if (!cur_trans)
 843                        goto out;  /* nothing committing|committed */
 844        }
 845
 846        wait_for_commit(cur_trans);
 847        btrfs_put_transaction(cur_trans);
 848out:
 849        return ret;
 850}
 851
 852void btrfs_throttle(struct btrfs_fs_info *fs_info)
 853{
 854        wait_current_trans(fs_info);
 855}
 856
 857static int should_end_transaction(struct btrfs_trans_handle *trans)
 858{
 859        struct btrfs_fs_info *fs_info = trans->fs_info;
 860
 861        if (btrfs_check_space_for_delayed_refs(fs_info))
 862                return 1;
 863
 864        return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
 865}
 866
 867int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
 868{
 869        struct btrfs_transaction *cur_trans = trans->transaction;
 870
 871        smp_mb();
 872        if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
 873            cur_trans->delayed_refs.flushing)
 874                return 1;
 875
 876        return should_end_transaction(trans);
 877}
 878
 879static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
 880
 881{
 882        struct btrfs_fs_info *fs_info = trans->fs_info;
 883
 884        if (!trans->block_rsv) {
 885                ASSERT(!trans->bytes_reserved);
 886                return;
 887        }
 888
 889        if (!trans->bytes_reserved)
 890                return;
 891
 892        ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
 893        trace_btrfs_space_reservation(fs_info, "transaction",
 894                                      trans->transid, trans->bytes_reserved, 0);
 895        btrfs_block_rsv_release(fs_info, trans->block_rsv,
 896                                trans->bytes_reserved, NULL);
 897        trans->bytes_reserved = 0;
 898}
 899
 900static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 901                                   int throttle)
 902{
 903        struct btrfs_fs_info *info = trans->fs_info;
 904        struct btrfs_transaction *cur_trans = trans->transaction;
 905        int err = 0;
 906
 907        if (refcount_read(&trans->use_count) > 1) {
 908                refcount_dec(&trans->use_count);
 909                trans->block_rsv = trans->orig_rsv;
 910                return 0;
 911        }
 912
 913        btrfs_trans_release_metadata(trans);
 914        trans->block_rsv = NULL;
 915
 916        btrfs_create_pending_block_groups(trans);
 917
 918        btrfs_trans_release_chunk_metadata(trans);
 919
 920        if (trans->type & __TRANS_FREEZABLE)
 921                sb_end_intwrite(info->sb);
 922
 923        WARN_ON(cur_trans != info->running_transaction);
 924        WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
 925        atomic_dec(&cur_trans->num_writers);
 926        extwriter_counter_dec(cur_trans, trans->type);
 927
 928        cond_wake_up(&cur_trans->writer_wait);
 929        btrfs_put_transaction(cur_trans);
 930
 931        if (current->journal_info == trans)
 932                current->journal_info = NULL;
 933
 934        if (throttle)
 935                btrfs_run_delayed_iputs(info);
 936
 937        if (TRANS_ABORTED(trans) ||
 938            test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
 939                wake_up_process(info->transaction_kthread);
 940                if (TRANS_ABORTED(trans))
 941                        err = trans->aborted;
 942                else
 943                        err = -EROFS;
 944        }
 945
 946        kmem_cache_free(btrfs_trans_handle_cachep, trans);
 947        return err;
 948}
 949
 950int btrfs_end_transaction(struct btrfs_trans_handle *trans)
 951{
 952        return __btrfs_end_transaction(trans, 0);
 953}
 954
 955int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
 956{
 957        return __btrfs_end_transaction(trans, 1);
 958}
 959
 960/*
 961 * when btree blocks are allocated, they have some corresponding bits set for
 962 * them in one of two extent_io trees.  This is used to make sure all of
 963 * those extents are sent to disk but does not wait on them
 964 */
 965int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
 966                               struct extent_io_tree *dirty_pages, int mark)
 967{
 968        int err = 0;
 969        int werr = 0;
 970        struct address_space *mapping = fs_info->btree_inode->i_mapping;
 971        struct extent_state *cached_state = NULL;
 972        u64 start = 0;
 973        u64 end;
 974
 975        atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers);
 976        while (!find_first_extent_bit(dirty_pages, start, &start, &end,
 977                                      mark, &cached_state)) {
 978                bool wait_writeback = false;
 979
 980                err = convert_extent_bit(dirty_pages, start, end,
 981                                         EXTENT_NEED_WAIT,
 982                                         mark, &cached_state);
 983                /*
 984                 * convert_extent_bit can return -ENOMEM, which is most of the
 985                 * time a temporary error. So when it happens, ignore the error
 986                 * and wait for writeback of this range to finish - because we
 987                 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
 988                 * to __btrfs_wait_marked_extents() would not know that
 989                 * writeback for this range started and therefore wouldn't
 990                 * wait for it to finish - we don't want to commit a
 991                 * superblock that points to btree nodes/leafs for which
 992                 * writeback hasn't finished yet (and without errors).
 993                 * We cleanup any entries left in the io tree when committing
 994                 * the transaction (through extent_io_tree_release()).
 995                 */
 996                if (err == -ENOMEM) {
 997                        err = 0;
 998                        wait_writeback = true;
 999                }
1000                if (!err)
1001                        err = filemap_fdatawrite_range(mapping, start, end);
1002                if (err)
1003                        werr = err;
1004                else if (wait_writeback)
1005                        werr = filemap_fdatawait_range(mapping, start, end);
1006                free_extent_state(cached_state);
1007                cached_state = NULL;
1008                cond_resched();
1009                start = end + 1;
1010        }
1011        atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers);
1012        return werr;
1013}
1014
1015/*
1016 * when btree blocks are allocated, they have some corresponding bits set for
1017 * them in one of two extent_io trees.  This is used to make sure all of
1018 * those extents are on disk for transaction or log commit.  We wait
1019 * on all the pages and clear them from the dirty pages state tree
1020 */
1021static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
1022                                       struct extent_io_tree *dirty_pages)
1023{
1024        int err = 0;
1025        int werr = 0;
1026        struct address_space *mapping = fs_info->btree_inode->i_mapping;
1027        struct extent_state *cached_state = NULL;
1028        u64 start = 0;
1029        u64 end;
1030
1031        while (!find_first_extent_bit(dirty_pages, start, &start, &end,
1032                                      EXTENT_NEED_WAIT, &cached_state)) {
1033                /*
1034                 * Ignore -ENOMEM errors returned by clear_extent_bit().
1035                 * When committing the transaction, we'll remove any entries
1036                 * left in the io tree. For a log commit, we don't remove them
1037                 * after committing the log because the tree can be accessed
1038                 * concurrently - we do it only at transaction commit time when
1039                 * it's safe to do it (through extent_io_tree_release()).
1040                 */
1041                err = clear_extent_bit(dirty_pages, start, end,
1042                                       EXTENT_NEED_WAIT, 0, 0, &cached_state);
1043                if (err == -ENOMEM)
1044                        err = 0;
1045                if (!err)
1046                        err = filemap_fdatawait_range(mapping, start, end);
1047                if (err)
1048                        werr = err;
1049                free_extent_state(cached_state);
1050                cached_state = NULL;
1051                cond_resched();
1052                start = end + 1;
1053        }
1054        if (err)
1055                werr = err;
1056        return werr;
1057}
1058
1059static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
1060                       struct extent_io_tree *dirty_pages)
1061{
1062        bool errors = false;
1063        int err;
1064
1065        err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1066        if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
1067                errors = true;
1068
1069        if (errors && !err)
1070                err = -EIO;
1071        return err;
1072}
1073
1074int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
1075{
1076        struct btrfs_fs_info *fs_info = log_root->fs_info;
1077        struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
1078        bool errors = false;
1079        int err;
1080
1081        ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
1082
1083        err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1084        if ((mark & EXTENT_DIRTY) &&
1085            test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
1086                errors = true;
1087
1088        if ((mark & EXTENT_NEW) &&
1089            test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
1090                errors = true;
1091
1092        if (errors && !err)
1093                err = -EIO;
1094        return err;
1095}
1096
1097/*
1098 * When btree blocks are allocated the corresponding extents are marked dirty.
1099 * This function ensures such extents are persisted on disk for transaction or
1100 * log commit.
1101 *
1102 * @trans: transaction whose dirty pages we'd like to write
1103 */
1104static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
1105{
1106        int ret;
1107        int ret2;
1108        struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages;
1109        struct btrfs_fs_info *fs_info = trans->fs_info;
1110        struct blk_plug plug;
1111
1112        blk_start_plug(&plug);
1113        ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY);
1114        blk_finish_plug(&plug);
1115        ret2 = btrfs_wait_extents(fs_info, dirty_pages);
1116
1117        extent_io_tree_release(&trans->transaction->dirty_pages);
1118
1119        if (ret)
1120                return ret;
1121        else if (ret2)
1122                return ret2;
1123        else
1124                return 0;
1125}
1126
1127/*
1128 * this is used to update the root pointer in the tree of tree roots.
1129 *
1130 * But, in the case of the extent allocation tree, updating the root
1131 * pointer may allocate blocks which may change the root of the extent
1132 * allocation tree.
1133 *
1134 * So, this loops and repeats and makes sure the cowonly root didn't
1135 * change while the root pointer was being updated in the metadata.
1136 */
1137static int update_cowonly_root(struct btrfs_trans_handle *trans,
1138                               struct btrfs_root *root)
1139{
1140        int ret;
1141        u64 old_root_bytenr;
1142        u64 old_root_used;
1143        struct btrfs_fs_info *fs_info = root->fs_info;
1144        struct btrfs_root *tree_root = fs_info->tree_root;
1145
1146        old_root_used = btrfs_root_used(&root->root_item);
1147
1148        while (1) {
1149                old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1150                if (old_root_bytenr == root->node->start &&
1151                    old_root_used == btrfs_root_used(&root->root_item))
1152                        break;
1153
1154                btrfs_set_root_node(&root->root_item, root->node);
1155                ret = btrfs_update_root(trans, tree_root,
1156                                        &root->root_key,
1157                                        &root->root_item);
1158                if (ret)
1159                        return ret;
1160
1161                old_root_used = btrfs_root_used(&root->root_item);
1162        }
1163
1164        return 0;
1165}
1166
1167/*
1168 * update all the cowonly tree roots on disk
1169 *
1170 * The error handling in this function may not be obvious. Any of the
1171 * failures will cause the file system to go offline. We still need
1172 * to clean up the delayed refs.
1173 */
1174static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
1175{
1176        struct btrfs_fs_info *fs_info = trans->fs_info;
1177        struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1178        struct list_head *io_bgs = &trans->transaction->io_bgs;
1179        struct list_head *next;
1180        struct extent_buffer *eb;
1181        int ret;
1182
1183        eb = btrfs_lock_root_node(fs_info->tree_root);
1184        ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1185                              0, &eb);
1186        btrfs_tree_unlock(eb);
1187        free_extent_buffer(eb);
1188
1189        if (ret)
1190                return ret;
1191
1192        ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1193        if (ret)
1194                return ret;
1195
1196        ret = btrfs_run_dev_stats(trans);
1197        if (ret)
1198                return ret;
1199        ret = btrfs_run_dev_replace(trans);
1200        if (ret)
1201                return ret;
1202        ret = btrfs_run_qgroups(trans);
1203        if (ret)
1204                return ret;
1205
1206        ret = btrfs_setup_space_cache(trans);
1207        if (ret)
1208                return ret;
1209
1210        /* run_qgroups might have added some more refs */
1211        ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1212        if (ret)
1213                return ret;
1214again:
1215        while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1216                struct btrfs_root *root;
1217                next = fs_info->dirty_cowonly_roots.next;
1218                list_del_init(next);
1219                root = list_entry(next, struct btrfs_root, dirty_list);
1220                clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1221
1222                if (root != fs_info->extent_root)
1223                        list_add_tail(&root->dirty_list,
1224                                      &trans->transaction->switch_commits);
1225                ret = update_cowonly_root(trans, root);
1226                if (ret)
1227                        return ret;
1228                ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1229                if (ret)
1230                        return ret;
1231        }
1232
1233        while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1234                ret = btrfs_write_dirty_block_groups(trans);
1235                if (ret)
1236                        return ret;
1237                ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1238                if (ret)
1239                        return ret;
1240        }
1241
1242        if (!list_empty(&fs_info->dirty_cowonly_roots))
1243                goto again;
1244
1245        list_add_tail(&fs_info->extent_root->dirty_list,
1246                      &trans->transaction->switch_commits);
1247
1248        /* Update dev-replace pointer once everything is committed */
1249        fs_info->dev_replace.committed_cursor_left =
1250                fs_info->dev_replace.cursor_left_last_write_of_item;
1251
1252        return 0;
1253}
1254
1255/*
1256 * dead roots are old snapshots that need to be deleted.  This allocates
1257 * a dirty root struct and adds it into the list of dead roots that need to
1258 * be deleted
1259 */
1260void btrfs_add_dead_root(struct btrfs_root *root)
1261{
1262        struct btrfs_fs_info *fs_info = root->fs_info;
1263
1264        spin_lock(&fs_info->trans_lock);
1265        if (list_empty(&root->root_list)) {
1266                btrfs_grab_root(root);
1267                list_add_tail(&root->root_list, &fs_info->dead_roots);
1268        }
1269        spin_unlock(&fs_info->trans_lock);
1270}
1271
1272/*
1273 * update all the cowonly tree roots on disk
1274 */
1275static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
1276{
1277        struct btrfs_fs_info *fs_info = trans->fs_info;
1278        struct btrfs_root *gang[8];
1279        int i;
1280        int ret;
1281        int err = 0;
1282
1283        spin_lock(&fs_info->fs_roots_radix_lock);
1284        while (1) {
1285                ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1286                                                 (void **)gang, 0,
1287                                                 ARRAY_SIZE(gang),
1288                                                 BTRFS_ROOT_TRANS_TAG);
1289                if (ret == 0)
1290                        break;
1291                for (i = 0; i < ret; i++) {
1292                        struct btrfs_root *root = gang[i];
1293                        radix_tree_tag_clear(&fs_info->fs_roots_radix,
1294                                        (unsigned long)root->root_key.objectid,
1295                                        BTRFS_ROOT_TRANS_TAG);
1296                        spin_unlock(&fs_info->fs_roots_radix_lock);
1297
1298                        btrfs_free_log(trans, root);
1299                        btrfs_update_reloc_root(trans, root);
1300
1301                        btrfs_save_ino_cache(root, trans);
1302
1303                        /* see comments in should_cow_block() */
1304                        clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1305                        smp_mb__after_atomic();
1306
1307                        if (root->commit_root != root->node) {
1308                                list_add_tail(&root->dirty_list,
1309                                        &trans->transaction->switch_commits);
1310                                btrfs_set_root_node(&root->root_item,
1311                                                    root->node);
1312                        }
1313
1314                        err = btrfs_update_root(trans, fs_info->tree_root,
1315                                                &root->root_key,
1316                                                &root->root_item);
1317                        spin_lock(&fs_info->fs_roots_radix_lock);
1318                        if (err)
1319                                break;
1320                        btrfs_qgroup_free_meta_all_pertrans(root);
1321                }
1322        }
1323        spin_unlock(&fs_info->fs_roots_radix_lock);
1324        return err;
1325}
1326
1327/*
1328 * defrag a given btree.
1329 * Every leaf in the btree is read and defragged.
1330 */
1331int btrfs_defrag_root(struct btrfs_root *root)
1332{
1333        struct btrfs_fs_info *info = root->fs_info;
1334        struct btrfs_trans_handle *trans;
1335        int ret;
1336
1337        if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1338                return 0;
1339
1340        while (1) {
1341                trans = btrfs_start_transaction(root, 0);
1342                if (IS_ERR(trans))
1343                        return PTR_ERR(trans);
1344
1345                ret = btrfs_defrag_leaves(trans, root);
1346
1347                btrfs_end_transaction(trans);
1348                btrfs_btree_balance_dirty(info);
1349                cond_resched();
1350
1351                if (btrfs_fs_closing(info) || ret != -EAGAIN)
1352                        break;
1353
1354                if (btrfs_defrag_cancelled(info)) {
1355                        btrfs_debug(info, "defrag_root cancelled");
1356                        ret = -EAGAIN;
1357                        break;
1358                }
1359        }
1360        clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1361        return ret;
1362}
1363
1364/*
1365 * Do all special snapshot related qgroup dirty hack.
1366 *
1367 * Will do all needed qgroup inherit and dirty hack like switch commit
1368 * roots inside one transaction and write all btree into disk, to make
1369 * qgroup works.
1370 */
1371static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1372                                   struct btrfs_root *src,
1373                                   struct btrfs_root *parent,
1374                                   struct btrfs_qgroup_inherit *inherit,
1375                                   u64 dst_objectid)
1376{
1377        struct btrfs_fs_info *fs_info = src->fs_info;
1378        int ret;
1379
1380        /*
1381         * Save some performance in the case that qgroups are not
1382         * enabled. If this check races with the ioctl, rescan will
1383         * kick in anyway.
1384         */
1385        if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1386                return 0;
1387
1388        /*
1389         * Ensure dirty @src will be committed.  Or, after coming
1390         * commit_fs_roots() and switch_commit_roots(), any dirty but not
1391         * recorded root will never be updated again, causing an outdated root
1392         * item.
1393         */
1394        record_root_in_trans(trans, src, 1);
1395
1396        /*
1397         * We are going to commit transaction, see btrfs_commit_transaction()
1398         * comment for reason locking tree_log_mutex
1399         */
1400        mutex_lock(&fs_info->tree_log_mutex);
1401
1402        ret = commit_fs_roots(trans);
1403        if (ret)
1404                goto out;
1405        ret = btrfs_qgroup_account_extents(trans);
1406        if (ret < 0)
1407                goto out;
1408
1409        /* Now qgroup are all updated, we can inherit it to new qgroups */
1410        ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
1411                                   inherit);
1412        if (ret < 0)
1413                goto out;
1414
1415        /*
1416         * Now we do a simplified commit transaction, which will:
1417         * 1) commit all subvolume and extent tree
1418         *    To ensure all subvolume and extent tree have a valid
1419         *    commit_root to accounting later insert_dir_item()
1420         * 2) write all btree blocks onto disk
1421         *    This is to make sure later btree modification will be cowed
1422         *    Or commit_root can be populated and cause wrong qgroup numbers
1423         * In this simplified commit, we don't really care about other trees
1424         * like chunk and root tree, as they won't affect qgroup.
1425         * And we don't write super to avoid half committed status.
1426         */
1427        ret = commit_cowonly_roots(trans);
1428        if (ret)
1429                goto out;
1430        switch_commit_roots(trans);
1431        ret = btrfs_write_and_wait_transaction(trans);
1432        if (ret)
1433                btrfs_handle_fs_error(fs_info, ret,
1434                        "Error while writing out transaction for qgroup");
1435
1436out:
1437        mutex_unlock(&fs_info->tree_log_mutex);
1438
1439        /*
1440         * Force parent root to be updated, as we recorded it before so its
1441         * last_trans == cur_transid.
1442         * Or it won't be committed again onto disk after later
1443         * insert_dir_item()
1444         */
1445        if (!ret)
1446                record_root_in_trans(trans, parent, 1);
1447        return ret;
1448}
1449
1450/*
1451 * new snapshots need to be created at a very specific time in the
1452 * transaction commit.  This does the actual creation.
1453 *
1454 * Note:
1455 * If the error which may affect the commitment of the current transaction
1456 * happens, we should return the error number. If the error which just affect
1457 * the creation of the pending snapshots, just return 0.
1458 */
1459static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1460                                   struct btrfs_pending_snapshot *pending)
1461{
1462
1463        struct btrfs_fs_info *fs_info = trans->fs_info;
1464        struct btrfs_key key;
1465        struct btrfs_root_item *new_root_item;
1466        struct btrfs_root *tree_root = fs_info->tree_root;
1467        struct btrfs_root *root = pending->root;
1468        struct btrfs_root *parent_root;
1469        struct btrfs_block_rsv *rsv;
1470        struct inode *parent_inode;
1471        struct btrfs_path *path;
1472        struct btrfs_dir_item *dir_item;
1473        struct dentry *dentry;
1474        struct extent_buffer *tmp;
1475        struct extent_buffer *old;
1476        struct timespec64 cur_time;
1477        int ret = 0;
1478        u64 to_reserve = 0;
1479        u64 index = 0;
1480        u64 objectid;
1481        u64 root_flags;
1482
1483        ASSERT(pending->path);
1484        path = pending->path;
1485
1486        ASSERT(pending->root_item);
1487        new_root_item = pending->root_item;
1488
1489        pending->error = btrfs_find_free_objectid(tree_root, &objectid);
1490        if (pending->error)
1491                goto no_free_objectid;
1492
1493        /*
1494         * Make qgroup to skip current new snapshot's qgroupid, as it is
1495         * accounted by later btrfs_qgroup_inherit().
1496         */
1497        btrfs_set_skip_qgroup(trans, objectid);
1498
1499        btrfs_reloc_pre_snapshot(pending, &to_reserve);
1500
1501        if (to_reserve > 0) {
1502                pending->error = btrfs_block_rsv_add(root,
1503                                                     &pending->block_rsv,
1504                                                     to_reserve,
1505                                                     BTRFS_RESERVE_NO_FLUSH);
1506                if (pending->error)
1507                        goto clear_skip_qgroup;
1508        }
1509
1510        key.objectid = objectid;
1511        key.offset = (u64)-1;
1512        key.type = BTRFS_ROOT_ITEM_KEY;
1513
1514        rsv = trans->block_rsv;
1515        trans->block_rsv = &pending->block_rsv;
1516        trans->bytes_reserved = trans->block_rsv->reserved;
1517        trace_btrfs_space_reservation(fs_info, "transaction",
1518                                      trans->transid,
1519                                      trans->bytes_reserved, 1);
1520        dentry = pending->dentry;
1521        parent_inode = pending->dir;
1522        parent_root = BTRFS_I(parent_inode)->root;
1523        record_root_in_trans(trans, parent_root, 0);
1524
1525        cur_time = current_time(parent_inode);
1526
1527        /*
1528         * insert the directory item
1529         */
1530        ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
1531        BUG_ON(ret); /* -ENOMEM */
1532
1533        /* check if there is a file/dir which has the same name. */
1534        dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1535                                         btrfs_ino(BTRFS_I(parent_inode)),
1536                                         dentry->d_name.name,
1537                                         dentry->d_name.len, 0);
1538        if (dir_item != NULL && !IS_ERR(dir_item)) {
1539                pending->error = -EEXIST;
1540                goto dir_item_existed;
1541        } else if (IS_ERR(dir_item)) {
1542                ret = PTR_ERR(dir_item);
1543                btrfs_abort_transaction(trans, ret);
1544                goto fail;
1545        }
1546        btrfs_release_path(path);
1547
1548        /*
1549         * pull in the delayed directory update
1550         * and the delayed inode item
1551         * otherwise we corrupt the FS during
1552         * snapshot
1553         */
1554        ret = btrfs_run_delayed_items(trans);
1555        if (ret) {      /* Transaction aborted */
1556                btrfs_abort_transaction(trans, ret);
1557                goto fail;
1558        }
1559
1560        record_root_in_trans(trans, root, 0);
1561        btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1562        memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1563        btrfs_check_and_init_root_item(new_root_item);
1564
1565        root_flags = btrfs_root_flags(new_root_item);
1566        if (pending->readonly)
1567                root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1568        else
1569                root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1570        btrfs_set_root_flags(new_root_item, root_flags);
1571
1572        btrfs_set_root_generation_v2(new_root_item,
1573                        trans->transid);
1574        generate_random_guid(new_root_item->uuid);
1575        memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1576                        BTRFS_UUID_SIZE);
1577        if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1578                memset(new_root_item->received_uuid, 0,
1579                       sizeof(new_root_item->received_uuid));
1580                memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1581                memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1582                btrfs_set_root_stransid(new_root_item, 0);
1583                btrfs_set_root_rtransid(new_root_item, 0);
1584        }
1585        btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1586        btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1587        btrfs_set_root_otransid(new_root_item, trans->transid);
1588
1589        old = btrfs_lock_root_node(root);
1590        ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1591        if (ret) {
1592                btrfs_tree_unlock(old);
1593                free_extent_buffer(old);
1594                btrfs_abort_transaction(trans, ret);
1595                goto fail;
1596        }
1597
1598        btrfs_set_lock_blocking_write(old);
1599
1600        ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1601        /* clean up in any case */
1602        btrfs_tree_unlock(old);
1603        free_extent_buffer(old);
1604        if (ret) {
1605                btrfs_abort_transaction(trans, ret);
1606                goto fail;
1607        }
1608        /* see comments in should_cow_block() */
1609        set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1610        smp_wmb();
1611
1612        btrfs_set_root_node(new_root_item, tmp);
1613        /* record when the snapshot was created in key.offset */
1614        key.offset = trans->transid;
1615        ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1616        btrfs_tree_unlock(tmp);
1617        free_extent_buffer(tmp);
1618        if (ret) {
1619                btrfs_abort_transaction(trans, ret);
1620                goto fail;
1621        }
1622
1623        /*
1624         * insert root back/forward references
1625         */
1626        ret = btrfs_add_root_ref(trans, objectid,
1627                                 parent_root->root_key.objectid,
1628                                 btrfs_ino(BTRFS_I(parent_inode)), index,
1629                                 dentry->d_name.name, dentry->d_name.len);
1630        if (ret) {
1631                btrfs_abort_transaction(trans, ret);
1632                goto fail;
1633        }
1634
1635        key.offset = (u64)-1;
1636        pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
1637        if (IS_ERR(pending->snap)) {
1638                ret = PTR_ERR(pending->snap);
1639                pending->snap = NULL;
1640                btrfs_abort_transaction(trans, ret);
1641                goto fail;
1642        }
1643
1644        ret = btrfs_reloc_post_snapshot(trans, pending);
1645        if (ret) {
1646                btrfs_abort_transaction(trans, ret);
1647                goto fail;
1648        }
1649
1650        ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1651        if (ret) {
1652                btrfs_abort_transaction(trans, ret);
1653                goto fail;
1654        }
1655
1656        /*
1657         * Do special qgroup accounting for snapshot, as we do some qgroup
1658         * snapshot hack to do fast snapshot.
1659         * To co-operate with that hack, we do hack again.
1660         * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1661         */
1662        ret = qgroup_account_snapshot(trans, root, parent_root,
1663                                      pending->inherit, objectid);
1664        if (ret < 0)
1665                goto fail;
1666
1667        ret = btrfs_insert_dir_item(trans, dentry->d_name.name,
1668                                    dentry->d_name.len, BTRFS_I(parent_inode),
1669                                    &key, BTRFS_FT_DIR, index);
1670        /* We have check then name at the beginning, so it is impossible. */
1671        BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1672        if (ret) {
1673                btrfs_abort_transaction(trans, ret);
1674                goto fail;
1675        }
1676
1677        btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
1678                                         dentry->d_name.len * 2);
1679        parent_inode->i_mtime = parent_inode->i_ctime =
1680                current_time(parent_inode);
1681        ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1682        if (ret) {
1683                btrfs_abort_transaction(trans, ret);
1684                goto fail;
1685        }
1686        ret = btrfs_uuid_tree_add(trans, new_root_item->uuid,
1687                                  BTRFS_UUID_KEY_SUBVOL,
1688                                  objectid);
1689        if (ret) {
1690                btrfs_abort_transaction(trans, ret);
1691                goto fail;
1692        }
1693        if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1694                ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
1695                                          BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1696                                          objectid);
1697                if (ret && ret != -EEXIST) {
1698                        btrfs_abort_transaction(trans, ret);
1699                        goto fail;
1700                }
1701        }
1702
1703        ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1704        if (ret) {
1705                btrfs_abort_transaction(trans, ret);
1706                goto fail;
1707        }
1708
1709fail:
1710        pending->error = ret;
1711dir_item_existed:
1712        trans->block_rsv = rsv;
1713        trans->bytes_reserved = 0;
1714clear_skip_qgroup:
1715        btrfs_clear_skip_qgroup(trans);
1716no_free_objectid:
1717        kfree(new_root_item);
1718        pending->root_item = NULL;
1719        btrfs_free_path(path);
1720        pending->path = NULL;
1721
1722        return ret;
1723}
1724
1725/*
1726 * create all the snapshots we've scheduled for creation
1727 */
1728static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans)
1729{
1730        struct btrfs_pending_snapshot *pending, *next;
1731        struct list_head *head = &trans->transaction->pending_snapshots;
1732        int ret = 0;
1733
1734        list_for_each_entry_safe(pending, next, head, list) {
1735                list_del(&pending->list);
1736                ret = create_pending_snapshot(trans, pending);
1737                if (ret)
1738                        break;
1739        }
1740        return ret;
1741}
1742
1743static void update_super_roots(struct btrfs_fs_info *fs_info)
1744{
1745        struct btrfs_root_item *root_item;
1746        struct btrfs_super_block *super;
1747
1748        super = fs_info->super_copy;
1749
1750        root_item = &fs_info->chunk_root->root_item;
1751        super->chunk_root = root_item->bytenr;
1752        super->chunk_root_generation = root_item->generation;
1753        super->chunk_root_level = root_item->level;
1754
1755        root_item = &fs_info->tree_root->root_item;
1756        super->root = root_item->bytenr;
1757        super->generation = root_item->generation;
1758        super->root_level = root_item->level;
1759        if (btrfs_test_opt(fs_info, SPACE_CACHE))
1760                super->cache_generation = root_item->generation;
1761        if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1762                super->uuid_tree_generation = root_item->generation;
1763}
1764
1765int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1766{
1767        struct btrfs_transaction *trans;
1768        int ret = 0;
1769
1770        spin_lock(&info->trans_lock);
1771        trans = info->running_transaction;
1772        if (trans)
1773                ret = (trans->state >= TRANS_STATE_COMMIT_START);
1774        spin_unlock(&info->trans_lock);
1775        return ret;
1776}
1777
1778int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1779{
1780        struct btrfs_transaction *trans;
1781        int ret = 0;
1782
1783        spin_lock(&info->trans_lock);
1784        trans = info->running_transaction;
1785        if (trans)
1786                ret = is_transaction_blocked(trans);
1787        spin_unlock(&info->trans_lock);
1788        return ret;
1789}
1790
1791/*
1792 * wait for the current transaction commit to start and block subsequent
1793 * transaction joins
1794 */
1795static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
1796                                            struct btrfs_transaction *trans)
1797{
1798        wait_event(fs_info->transaction_blocked_wait,
1799                   trans->state >= TRANS_STATE_COMMIT_START ||
1800                   TRANS_ABORTED(trans));
1801}
1802
1803/*
1804 * wait for the current transaction to start and then become unblocked.
1805 * caller holds ref.
1806 */
1807static void wait_current_trans_commit_start_and_unblock(
1808                                        struct btrfs_fs_info *fs_info,
1809                                        struct btrfs_transaction *trans)
1810{
1811        wait_event(fs_info->transaction_wait,
1812                   trans->state >= TRANS_STATE_UNBLOCKED ||
1813                   TRANS_ABORTED(trans));
1814}
1815
1816/*
1817 * commit transactions asynchronously. once btrfs_commit_transaction_async
1818 * returns, any subsequent transaction will not be allowed to join.
1819 */
1820struct btrfs_async_commit {
1821        struct btrfs_trans_handle *newtrans;
1822        struct work_struct work;
1823};
1824
1825static void do_async_commit(struct work_struct *work)
1826{
1827        struct btrfs_async_commit *ac =
1828                container_of(work, struct btrfs_async_commit, work);
1829
1830        /*
1831         * We've got freeze protection passed with the transaction.
1832         * Tell lockdep about it.
1833         */
1834        if (ac->newtrans->type & __TRANS_FREEZABLE)
1835                __sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS);
1836
1837        current->journal_info = ac->newtrans;
1838
1839        btrfs_commit_transaction(ac->newtrans);
1840        kfree(ac);
1841}
1842
1843int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1844                                   int wait_for_unblock)
1845{
1846        struct btrfs_fs_info *fs_info = trans->fs_info;
1847        struct btrfs_async_commit *ac;
1848        struct btrfs_transaction *cur_trans;
1849
1850        ac = kmalloc(sizeof(*ac), GFP_NOFS);
1851        if (!ac)
1852                return -ENOMEM;
1853
1854        INIT_WORK(&ac->work, do_async_commit);
1855        ac->newtrans = btrfs_join_transaction(trans->root);
1856        if (IS_ERR(ac->newtrans)) {
1857                int err = PTR_ERR(ac->newtrans);
1858                kfree(ac);
1859                return err;
1860        }
1861
1862        /* take transaction reference */
1863        cur_trans = trans->transaction;
1864        refcount_inc(&cur_trans->use_count);
1865
1866        btrfs_end_transaction(trans);
1867
1868        /*
1869         * Tell lockdep we've released the freeze rwsem, since the
1870         * async commit thread will be the one to unlock it.
1871         */
1872        if (ac->newtrans->type & __TRANS_FREEZABLE)
1873                __sb_writers_release(fs_info->sb, SB_FREEZE_FS);
1874
1875        schedule_work(&ac->work);
1876
1877        /* wait for transaction to start and unblock */
1878        if (wait_for_unblock)
1879                wait_current_trans_commit_start_and_unblock(fs_info, cur_trans);
1880        else
1881                wait_current_trans_commit_start(fs_info, cur_trans);
1882
1883        if (current->journal_info == trans)
1884                current->journal_info = NULL;
1885
1886        btrfs_put_transaction(cur_trans);
1887        return 0;
1888}
1889
1890
1891static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1892{
1893        struct btrfs_fs_info *fs_info = trans->fs_info;
1894        struct btrfs_transaction *cur_trans = trans->transaction;
1895
1896        WARN_ON(refcount_read(&trans->use_count) > 1);
1897
1898        btrfs_abort_transaction(trans, err);
1899
1900        spin_lock(&fs_info->trans_lock);
1901
1902        /*
1903         * If the transaction is removed from the list, it means this
1904         * transaction has been committed successfully, so it is impossible
1905         * to call the cleanup function.
1906         */
1907        BUG_ON(list_empty(&cur_trans->list));
1908
1909        list_del_init(&cur_trans->list);
1910        if (cur_trans == fs_info->running_transaction) {
1911                cur_trans->state = TRANS_STATE_COMMIT_DOING;
1912                spin_unlock(&fs_info->trans_lock);
1913                wait_event(cur_trans->writer_wait,
1914                           atomic_read(&cur_trans->num_writers) == 1);
1915
1916                spin_lock(&fs_info->trans_lock);
1917        }
1918        spin_unlock(&fs_info->trans_lock);
1919
1920        btrfs_cleanup_one_transaction(trans->transaction, fs_info);
1921
1922        spin_lock(&fs_info->trans_lock);
1923        if (cur_trans == fs_info->running_transaction)
1924                fs_info->running_transaction = NULL;
1925        spin_unlock(&fs_info->trans_lock);
1926
1927        if (trans->type & __TRANS_FREEZABLE)
1928                sb_end_intwrite(fs_info->sb);
1929        btrfs_put_transaction(cur_trans);
1930        btrfs_put_transaction(cur_trans);
1931
1932        trace_btrfs_transaction_commit(trans->root);
1933
1934        if (current->journal_info == trans)
1935                current->journal_info = NULL;
1936        btrfs_scrub_cancel(fs_info);
1937
1938        kmem_cache_free(btrfs_trans_handle_cachep, trans);
1939}
1940
1941/*
1942 * Release reserved delayed ref space of all pending block groups of the
1943 * transaction and remove them from the list
1944 */
1945static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
1946{
1947       struct btrfs_fs_info *fs_info = trans->fs_info;
1948       struct btrfs_block_group *block_group, *tmp;
1949
1950       list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
1951               btrfs_delayed_refs_rsv_release(fs_info, 1);
1952               list_del_init(&block_group->bg_list);
1953       }
1954}
1955
1956static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
1957{
1958        struct btrfs_fs_info *fs_info = trans->fs_info;
1959
1960        /*
1961         * We use writeback_inodes_sb here because if we used
1962         * btrfs_start_delalloc_roots we would deadlock with fs freeze.
1963         * Currently are holding the fs freeze lock, if we do an async flush
1964         * we'll do btrfs_join_transaction() and deadlock because we need to
1965         * wait for the fs freeze lock.  Using the direct flushing we benefit
1966         * from already being in a transaction and our join_transaction doesn't
1967         * have to re-take the fs freeze lock.
1968         */
1969        if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1970                writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
1971        } else {
1972                struct btrfs_pending_snapshot *pending;
1973                struct list_head *head = &trans->transaction->pending_snapshots;
1974
1975                /*
1976                 * Flush dellaloc for any root that is going to be snapshotted.
1977                 * This is done to avoid a corrupted version of files, in the
1978                 * snapshots, that had both buffered and direct IO writes (even
1979                 * if they were done sequentially) due to an unordered update of
1980                 * the inode's size on disk.
1981                 */
1982                list_for_each_entry(pending, head, list) {
1983                        int ret;
1984
1985                        ret = btrfs_start_delalloc_snapshot(pending->root);
1986                        if (ret)
1987                                return ret;
1988                }
1989        }
1990        return 0;
1991}
1992
1993static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
1994{
1995        struct btrfs_fs_info *fs_info = trans->fs_info;
1996
1997        if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1998                btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1999        } else {
2000                struct btrfs_pending_snapshot *pending;
2001                struct list_head *head = &trans->transaction->pending_snapshots;
2002
2003                /*
2004                 * Wait for any dellaloc that we started previously for the roots
2005                 * that are going to be snapshotted. This is to avoid a corrupted
2006                 * version of files in the snapshots that had both buffered and
2007                 * direct IO writes (even if they were done sequentially).
2008                 */
2009                list_for_each_entry(pending, head, list)
2010                        btrfs_wait_ordered_extents(pending->root,
2011                                                   U64_MAX, 0, U64_MAX);
2012        }
2013}
2014
2015int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2016{
2017        struct btrfs_fs_info *fs_info = trans->fs_info;
2018        struct btrfs_transaction *cur_trans = trans->transaction;
2019        struct btrfs_transaction *prev_trans = NULL;
2020        int ret;
2021
2022        ASSERT(refcount_read(&trans->use_count) == 1);
2023
2024        /*
2025         * Some places just start a transaction to commit it.  We need to make
2026         * sure that if this commit fails that the abort code actually marks the
2027         * transaction as failed, so set trans->dirty to make the abort code do
2028         * the right thing.
2029         */
2030        trans->dirty = true;
2031
2032        /* Stop the commit early if ->aborted is set */
2033        if (TRANS_ABORTED(cur_trans)) {
2034                ret = cur_trans->aborted;
2035                btrfs_end_transaction(trans);
2036                return ret;
2037        }
2038
2039        btrfs_trans_release_metadata(trans);
2040        trans->block_rsv = NULL;
2041
2042        /* make a pass through all the delayed refs we have so far
2043         * any runnings procs may add more while we are here
2044         */
2045        ret = btrfs_run_delayed_refs(trans, 0);
2046        if (ret) {
2047                btrfs_end_transaction(trans);
2048                return ret;
2049        }
2050
2051        cur_trans = trans->transaction;
2052
2053        /*
2054         * set the flushing flag so procs in this transaction have to
2055         * start sending their work down.
2056         */
2057        cur_trans->delayed_refs.flushing = 1;
2058        smp_wmb();
2059
2060        btrfs_create_pending_block_groups(trans);
2061
2062        ret = btrfs_run_delayed_refs(trans, 0);
2063        if (ret) {
2064                btrfs_end_transaction(trans);
2065                return ret;
2066        }
2067
2068        if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
2069                int run_it = 0;
2070
2071                /* this mutex is also taken before trying to set
2072                 * block groups readonly.  We need to make sure
2073                 * that nobody has set a block group readonly
2074                 * after a extents from that block group have been
2075                 * allocated for cache files.  btrfs_set_block_group_ro
2076                 * will wait for the transaction to commit if it
2077                 * finds BTRFS_TRANS_DIRTY_BG_RUN set.
2078                 *
2079                 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
2080                 * only one process starts all the block group IO.  It wouldn't
2081                 * hurt to have more than one go through, but there's no
2082                 * real advantage to it either.
2083                 */
2084                mutex_lock(&fs_info->ro_block_group_mutex);
2085                if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
2086                                      &cur_trans->flags))
2087                        run_it = 1;
2088                mutex_unlock(&fs_info->ro_block_group_mutex);
2089
2090                if (run_it) {
2091                        ret = btrfs_start_dirty_block_groups(trans);
2092                        if (ret) {
2093                                btrfs_end_transaction(trans);
2094                                return ret;
2095                        }
2096                }
2097        }
2098
2099        spin_lock(&fs_info->trans_lock);
2100        if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
2101                spin_unlock(&fs_info->trans_lock);
2102                refcount_inc(&cur_trans->use_count);
2103                ret = btrfs_end_transaction(trans);
2104
2105                wait_for_commit(cur_trans);
2106
2107                if (TRANS_ABORTED(cur_trans))
2108                        ret = cur_trans->aborted;
2109
2110                btrfs_put_transaction(cur_trans);
2111
2112                return ret;
2113        }
2114
2115        cur_trans->state = TRANS_STATE_COMMIT_START;
2116        wake_up(&fs_info->transaction_blocked_wait);
2117
2118        if (cur_trans->list.prev != &fs_info->trans_list) {
2119                prev_trans = list_entry(cur_trans->list.prev,
2120                                        struct btrfs_transaction, list);
2121                if (prev_trans->state != TRANS_STATE_COMPLETED) {
2122                        refcount_inc(&prev_trans->use_count);
2123                        spin_unlock(&fs_info->trans_lock);
2124
2125                        wait_for_commit(prev_trans);
2126                        ret = READ_ONCE(prev_trans->aborted);
2127
2128                        btrfs_put_transaction(prev_trans);
2129                        if (ret)
2130                                goto cleanup_transaction;
2131                } else {
2132                        spin_unlock(&fs_info->trans_lock);
2133                }
2134        } else {
2135                spin_unlock(&fs_info->trans_lock);
2136                /*
2137                 * The previous transaction was aborted and was already removed
2138                 * from the list of transactions at fs_info->trans_list. So we
2139                 * abort to prevent writing a new superblock that reflects a
2140                 * corrupt state (pointing to trees with unwritten nodes/leafs).
2141                 */
2142                if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
2143                        ret = -EROFS;
2144                        goto cleanup_transaction;
2145                }
2146        }
2147
2148        extwriter_counter_dec(cur_trans, trans->type);
2149
2150        ret = btrfs_start_delalloc_flush(trans);
2151        if (ret)
2152                goto cleanup_transaction;
2153
2154        ret = btrfs_run_delayed_items(trans);
2155        if (ret)
2156                goto cleanup_transaction;
2157
2158        wait_event(cur_trans->writer_wait,
2159                   extwriter_counter_read(cur_trans) == 0);
2160
2161        /* some pending stuffs might be added after the previous flush. */
2162        ret = btrfs_run_delayed_items(trans);
2163        if (ret)
2164                goto cleanup_transaction;
2165
2166        btrfs_wait_delalloc_flush(trans);
2167
2168        btrfs_scrub_pause(fs_info);
2169        /*
2170         * Ok now we need to make sure to block out any other joins while we
2171         * commit the transaction.  We could have started a join before setting
2172         * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2173         */
2174        spin_lock(&fs_info->trans_lock);
2175        cur_trans->state = TRANS_STATE_COMMIT_DOING;
2176        spin_unlock(&fs_info->trans_lock);
2177        wait_event(cur_trans->writer_wait,
2178                   atomic_read(&cur_trans->num_writers) == 1);
2179
2180        if (TRANS_ABORTED(cur_trans)) {
2181                ret = cur_trans->aborted;
2182                goto scrub_continue;
2183        }
2184        /*
2185         * the reloc mutex makes sure that we stop
2186         * the balancing code from coming in and moving
2187         * extents around in the middle of the commit
2188         */
2189        mutex_lock(&fs_info->reloc_mutex);
2190
2191        /*
2192         * We needn't worry about the delayed items because we will
2193         * deal with them in create_pending_snapshot(), which is the
2194         * core function of the snapshot creation.
2195         */
2196        ret = create_pending_snapshots(trans);
2197        if (ret)
2198                goto unlock_reloc;
2199
2200        /*
2201         * We insert the dir indexes of the snapshots and update the inode
2202         * of the snapshots' parents after the snapshot creation, so there
2203         * are some delayed items which are not dealt with. Now deal with
2204         * them.
2205         *
2206         * We needn't worry that this operation will corrupt the snapshots,
2207         * because all the tree which are snapshoted will be forced to COW
2208         * the nodes and leaves.
2209         */
2210        ret = btrfs_run_delayed_items(trans);
2211        if (ret)
2212                goto unlock_reloc;
2213
2214        ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2215        if (ret)
2216                goto unlock_reloc;
2217
2218        /*
2219         * make sure none of the code above managed to slip in a
2220         * delayed item
2221         */
2222        btrfs_assert_delayed_root_empty(fs_info);
2223
2224        WARN_ON(cur_trans != trans->transaction);
2225
2226        /* btrfs_commit_tree_roots is responsible for getting the
2227         * various roots consistent with each other.  Every pointer
2228         * in the tree of tree roots has to point to the most up to date
2229         * root for every subvolume and other tree.  So, we have to keep
2230         * the tree logging code from jumping in and changing any
2231         * of the trees.
2232         *
2233         * At this point in the commit, there can't be any tree-log
2234         * writers, but a little lower down we drop the trans mutex
2235         * and let new people in.  By holding the tree_log_mutex
2236         * from now until after the super is written, we avoid races
2237         * with the tree-log code.
2238         */
2239        mutex_lock(&fs_info->tree_log_mutex);
2240
2241        ret = commit_fs_roots(trans);
2242        if (ret)
2243                goto unlock_tree_log;
2244
2245        /*
2246         * Since the transaction is done, we can apply the pending changes
2247         * before the next transaction.
2248         */
2249        btrfs_apply_pending_changes(fs_info);
2250
2251        /* commit_fs_roots gets rid of all the tree log roots, it is now
2252         * safe to free the root of tree log roots
2253         */
2254        btrfs_free_log_root_tree(trans, fs_info);
2255
2256        /*
2257         * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
2258         * new delayed refs. Must handle them or qgroup can be wrong.
2259         */
2260        ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2261        if (ret)
2262                goto unlock_tree_log;
2263
2264        /*
2265         * Since fs roots are all committed, we can get a quite accurate
2266         * new_roots. So let's do quota accounting.
2267         */
2268        ret = btrfs_qgroup_account_extents(trans);
2269        if (ret < 0)
2270                goto unlock_tree_log;
2271
2272        ret = commit_cowonly_roots(trans);
2273        if (ret)
2274                goto unlock_tree_log;
2275
2276        /*
2277         * The tasks which save the space cache and inode cache may also
2278         * update ->aborted, check it.
2279         */
2280        if (TRANS_ABORTED(cur_trans)) {
2281                ret = cur_trans->aborted;
2282                goto unlock_tree_log;
2283        }
2284
2285        btrfs_prepare_extent_commit(fs_info);
2286
2287        cur_trans = fs_info->running_transaction;
2288
2289        btrfs_set_root_node(&fs_info->tree_root->root_item,
2290                            fs_info->tree_root->node);
2291        list_add_tail(&fs_info->tree_root->dirty_list,
2292                      &cur_trans->switch_commits);
2293
2294        btrfs_set_root_node(&fs_info->chunk_root->root_item,
2295                            fs_info->chunk_root->node);
2296        list_add_tail(&fs_info->chunk_root->dirty_list,
2297                      &cur_trans->switch_commits);
2298
2299        switch_commit_roots(trans);
2300
2301        ASSERT(list_empty(&cur_trans->dirty_bgs));
2302        ASSERT(list_empty(&cur_trans->io_bgs));
2303        update_super_roots(fs_info);
2304
2305        btrfs_set_super_log_root(fs_info->super_copy, 0);
2306        btrfs_set_super_log_root_level(fs_info->super_copy, 0);
2307        memcpy(fs_info->super_for_commit, fs_info->super_copy,
2308               sizeof(*fs_info->super_copy));
2309
2310        btrfs_commit_device_sizes(cur_trans);
2311
2312        clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2313        clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2314
2315        btrfs_trans_release_chunk_metadata(trans);
2316
2317        spin_lock(&fs_info->trans_lock);
2318        cur_trans->state = TRANS_STATE_UNBLOCKED;
2319        fs_info->running_transaction = NULL;
2320        spin_unlock(&fs_info->trans_lock);
2321        mutex_unlock(&fs_info->reloc_mutex);
2322
2323        wake_up(&fs_info->transaction_wait);
2324
2325        ret = btrfs_write_and_wait_transaction(trans);
2326        if (ret) {
2327                btrfs_handle_fs_error(fs_info, ret,
2328                                      "Error while writing out transaction");
2329                /*
2330                 * reloc_mutex has been unlocked, tree_log_mutex is still held
2331                 * but we can't jump to unlock_tree_log causing double unlock
2332                 */
2333                mutex_unlock(&fs_info->tree_log_mutex);
2334                goto scrub_continue;
2335        }
2336
2337        ret = write_all_supers(fs_info, 0);
2338        /*
2339         * the super is written, we can safely allow the tree-loggers
2340         * to go about their business
2341         */
2342        mutex_unlock(&fs_info->tree_log_mutex);
2343        if (ret)
2344                goto scrub_continue;
2345
2346        btrfs_finish_extent_commit(trans);
2347
2348        if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2349                btrfs_clear_space_info_full(fs_info);
2350
2351        fs_info->last_trans_committed = cur_trans->transid;
2352        /*
2353         * We needn't acquire the lock here because there is no other task
2354         * which can change it.
2355         */
2356        cur_trans->state = TRANS_STATE_COMPLETED;
2357        wake_up(&cur_trans->commit_wait);
2358
2359        spin_lock(&fs_info->trans_lock);
2360        list_del_init(&cur_trans->list);
2361        spin_unlock(&fs_info->trans_lock);
2362
2363        btrfs_put_transaction(cur_trans);
2364        btrfs_put_transaction(cur_trans);
2365
2366        if (trans->type & __TRANS_FREEZABLE)
2367                sb_end_intwrite(fs_info->sb);
2368
2369        trace_btrfs_transaction_commit(trans->root);
2370
2371        btrfs_scrub_continue(fs_info);
2372
2373        if (current->journal_info == trans)
2374                current->journal_info = NULL;
2375
2376        kmem_cache_free(btrfs_trans_handle_cachep, trans);
2377
2378        return ret;
2379
2380unlock_tree_log:
2381        mutex_unlock(&fs_info->tree_log_mutex);
2382unlock_reloc:
2383        mutex_unlock(&fs_info->reloc_mutex);
2384scrub_continue:
2385        btrfs_scrub_continue(fs_info);
2386cleanup_transaction:
2387        btrfs_trans_release_metadata(trans);
2388        btrfs_cleanup_pending_block_groups(trans);
2389        btrfs_trans_release_chunk_metadata(trans);
2390        trans->block_rsv = NULL;
2391        btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
2392        if (current->journal_info == trans)
2393                current->journal_info = NULL;
2394        cleanup_transaction(trans, ret);
2395
2396        return ret;
2397}
2398
2399/*
2400 * return < 0 if error
2401 * 0 if there are no more dead_roots at the time of call
2402 * 1 there are more to be processed, call me again
2403 *
2404 * The return value indicates there are certainly more snapshots to delete, but
2405 * if there comes a new one during processing, it may return 0. We don't mind,
2406 * because btrfs_commit_super will poke cleaner thread and it will process it a
2407 * few seconds later.
2408 */
2409int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
2410{
2411        int ret;
2412        struct btrfs_fs_info *fs_info = root->fs_info;
2413
2414        spin_lock(&fs_info->trans_lock);
2415        if (list_empty(&fs_info->dead_roots)) {
2416                spin_unlock(&fs_info->trans_lock);
2417                return 0;
2418        }
2419        root = list_first_entry(&fs_info->dead_roots,
2420                        struct btrfs_root, root_list);
2421        list_del_init(&root->root_list);
2422        spin_unlock(&fs_info->trans_lock);
2423
2424        btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
2425
2426        btrfs_kill_all_delayed_nodes(root);
2427        if (root->ino_cache_inode) {
2428                iput(root->ino_cache_inode);
2429                root->ino_cache_inode = NULL;
2430        }
2431
2432        if (btrfs_header_backref_rev(root->node) <
2433                        BTRFS_MIXED_BACKREF_REV)
2434                ret = btrfs_drop_snapshot(root, 0, 0);
2435        else
2436                ret = btrfs_drop_snapshot(root, 1, 0);
2437
2438        btrfs_put_root(root);
2439        return (ret < 0) ? 0 : 1;
2440}
2441
2442void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2443{
2444        unsigned long prev;
2445        unsigned long bit;
2446
2447        prev = xchg(&fs_info->pending_changes, 0);
2448        if (!prev)
2449                return;
2450
2451        bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
2452        if (prev & bit)
2453                btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2454        prev &= ~bit;
2455
2456        bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
2457        if (prev & bit)
2458                btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2459        prev &= ~bit;
2460
2461        bit = 1 << BTRFS_PENDING_COMMIT;
2462        if (prev & bit)
2463                btrfs_debug(fs_info, "pending commit done");
2464        prev &= ~bit;
2465
2466        if (prev)
2467                btrfs_warn(fs_info,
2468                        "unknown pending changes left 0x%lx, ignoring", prev);
2469}
2470