linux/fs/btrfs/disk-io.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/blkdev.h>
   8#include <linux/scatterlist.h>
   9#include <linux/swap.h>
  10#include <linux/radix-tree.h>
  11#include <linux/writeback.h>
  12#include <linux/buffer_head.h>
  13#include <linux/workqueue.h>
  14#include <linux/kthread.h>
  15#include <linux/slab.h>
  16#include <linux/migrate.h>
  17#include <linux/ratelimit.h>
  18#include <linux/uuid.h>
  19#include <linux/semaphore.h>
  20#include <linux/error-injection.h>
  21#include <linux/crc32c.h>
  22#include <asm/unaligned.h>
  23#include "ctree.h"
  24#include "disk-io.h"
  25#include "transaction.h"
  26#include "btrfs_inode.h"
  27#include "volumes.h"
  28#include "print-tree.h"
  29#include "locking.h"
  30#include "tree-log.h"
  31#include "free-space-cache.h"
  32#include "free-space-tree.h"
  33#include "inode-map.h"
  34#include "check-integrity.h"
  35#include "rcu-string.h"
  36#include "dev-replace.h"
  37#include "raid56.h"
  38#include "sysfs.h"
  39#include "qgroup.h"
  40#include "compression.h"
  41#include "tree-checker.h"
  42#include "ref-verify.h"
  43
  44#ifdef CONFIG_X86
  45#include <asm/cpufeature.h>
  46#endif
  47
  48#define BTRFS_SUPER_FLAG_SUPP   (BTRFS_HEADER_FLAG_WRITTEN |\
  49                                 BTRFS_HEADER_FLAG_RELOC |\
  50                                 BTRFS_SUPER_FLAG_ERROR |\
  51                                 BTRFS_SUPER_FLAG_SEEDING |\
  52                                 BTRFS_SUPER_FLAG_METADUMP |\
  53                                 BTRFS_SUPER_FLAG_METADUMP_V2)
  54
  55static const struct extent_io_ops btree_extent_io_ops;
  56static void end_workqueue_fn(struct btrfs_work *work);
  57static void free_fs_root(struct btrfs_root *root);
  58static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info);
  59static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
  60static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
  61                                      struct btrfs_fs_info *fs_info);
  62static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
  63static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
  64                                        struct extent_io_tree *dirty_pages,
  65                                        int mark);
  66static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
  67                                       struct extent_io_tree *pinned_extents);
  68static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
  69static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
  70
  71/*
  72 * btrfs_end_io_wq structs are used to do processing in task context when an IO
  73 * is complete.  This is used during reads to verify checksums, and it is used
  74 * by writes to insert metadata for new file extents after IO is complete.
  75 */
  76struct btrfs_end_io_wq {
  77        struct bio *bio;
  78        bio_end_io_t *end_io;
  79        void *private;
  80        struct btrfs_fs_info *info;
  81        blk_status_t status;
  82        enum btrfs_wq_endio_type metadata;
  83        struct btrfs_work work;
  84};
  85
  86static struct kmem_cache *btrfs_end_io_wq_cache;
  87
  88int __init btrfs_end_io_wq_init(void)
  89{
  90        btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
  91                                        sizeof(struct btrfs_end_io_wq),
  92                                        0,
  93                                        SLAB_MEM_SPREAD,
  94                                        NULL);
  95        if (!btrfs_end_io_wq_cache)
  96                return -ENOMEM;
  97        return 0;
  98}
  99
 100void __cold btrfs_end_io_wq_exit(void)
 101{
 102        kmem_cache_destroy(btrfs_end_io_wq_cache);
 103}
 104
 105/*
 106 * async submit bios are used to offload expensive checksumming
 107 * onto the worker threads.  They checksum file and metadata bios
 108 * just before they are sent down the IO stack.
 109 */
 110struct async_submit_bio {
 111        void *private_data;
 112        struct btrfs_fs_info *fs_info;
 113        struct bio *bio;
 114        extent_submit_bio_start_t *submit_bio_start;
 115        extent_submit_bio_done_t *submit_bio_done;
 116        int mirror_num;
 117        unsigned long bio_flags;
 118        /*
 119         * bio_offset is optional, can be used if the pages in the bio
 120         * can't tell us where in the file the bio should go
 121         */
 122        u64 bio_offset;
 123        struct btrfs_work work;
 124        blk_status_t status;
 125};
 126
 127/*
 128 * Lockdep class keys for extent_buffer->lock's in this root.  For a given
 129 * eb, the lockdep key is determined by the btrfs_root it belongs to and
 130 * the level the eb occupies in the tree.
 131 *
 132 * Different roots are used for different purposes and may nest inside each
 133 * other and they require separate keysets.  As lockdep keys should be
 134 * static, assign keysets according to the purpose of the root as indicated
 135 * by btrfs_root->objectid.  This ensures that all special purpose roots
 136 * have separate keysets.
 137 *
 138 * Lock-nesting across peer nodes is always done with the immediate parent
 139 * node locked thus preventing deadlock.  As lockdep doesn't know this, use
 140 * subclass to avoid triggering lockdep warning in such cases.
 141 *
 142 * The key is set by the readpage_end_io_hook after the buffer has passed
 143 * csum validation but before the pages are unlocked.  It is also set by
 144 * btrfs_init_new_buffer on freshly allocated blocks.
 145 *
 146 * We also add a check to make sure the highest level of the tree is the
 147 * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
 148 * needs update as well.
 149 */
 150#ifdef CONFIG_DEBUG_LOCK_ALLOC
 151# if BTRFS_MAX_LEVEL != 8
 152#  error
 153# endif
 154
 155static struct btrfs_lockdep_keyset {
 156        u64                     id;             /* root objectid */
 157        const char              *name_stem;     /* lock name stem */
 158        char                    names[BTRFS_MAX_LEVEL + 1][20];
 159        struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
 160} btrfs_lockdep_keysets[] = {
 161        { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
 162        { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
 163        { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
 164        { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
 165        { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
 166        { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
 167        { .id = BTRFS_QUOTA_TREE_OBJECTID,      .name_stem = "quota"    },
 168        { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
 169        { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
 170        { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
 171        { .id = BTRFS_UUID_TREE_OBJECTID,       .name_stem = "uuid"     },
 172        { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" },
 173        { .id = 0,                              .name_stem = "tree"     },
 174};
 175
 176void __init btrfs_init_lockdep(void)
 177{
 178        int i, j;
 179
 180        /* initialize lockdep class names */
 181        for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
 182                struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
 183
 184                for (j = 0; j < ARRAY_SIZE(ks->names); j++)
 185                        snprintf(ks->names[j], sizeof(ks->names[j]),
 186                                 "btrfs-%s-%02d", ks->name_stem, j);
 187        }
 188}
 189
 190void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
 191                                    int level)
 192{
 193        struct btrfs_lockdep_keyset *ks;
 194
 195        BUG_ON(level >= ARRAY_SIZE(ks->keys));
 196
 197        /* find the matching keyset, id 0 is the default entry */
 198        for (ks = btrfs_lockdep_keysets; ks->id; ks++)
 199                if (ks->id == objectid)
 200                        break;
 201
 202        lockdep_set_class_and_name(&eb->lock,
 203                                   &ks->keys[level], ks->names[level]);
 204}
 205
 206#endif
 207
 208/*
 209 * extents on the btree inode are pretty simple, there's one extent
 210 * that covers the entire device
 211 */
 212struct extent_map *btree_get_extent(struct btrfs_inode *inode,
 213                struct page *page, size_t pg_offset, u64 start, u64 len,
 214                int create)
 215{
 216        struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
 217        struct extent_map_tree *em_tree = &inode->extent_tree;
 218        struct extent_map *em;
 219        int ret;
 220
 221        read_lock(&em_tree->lock);
 222        em = lookup_extent_mapping(em_tree, start, len);
 223        if (em) {
 224                em->bdev = fs_info->fs_devices->latest_bdev;
 225                read_unlock(&em_tree->lock);
 226                goto out;
 227        }
 228        read_unlock(&em_tree->lock);
 229
 230        em = alloc_extent_map();
 231        if (!em) {
 232                em = ERR_PTR(-ENOMEM);
 233                goto out;
 234        }
 235        em->start = 0;
 236        em->len = (u64)-1;
 237        em->block_len = (u64)-1;
 238        em->block_start = 0;
 239        em->bdev = fs_info->fs_devices->latest_bdev;
 240
 241        write_lock(&em_tree->lock);
 242        ret = add_extent_mapping(em_tree, em, 0);
 243        if (ret == -EEXIST) {
 244                free_extent_map(em);
 245                em = lookup_extent_mapping(em_tree, start, len);
 246                if (!em)
 247                        em = ERR_PTR(-EIO);
 248        } else if (ret) {
 249                free_extent_map(em);
 250                em = ERR_PTR(ret);
 251        }
 252        write_unlock(&em_tree->lock);
 253
 254out:
 255        return em;
 256}
 257
 258u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
 259{
 260        return crc32c(seed, data, len);
 261}
 262
 263void btrfs_csum_final(u32 crc, u8 *result)
 264{
 265        put_unaligned_le32(~crc, result);
 266}
 267
 268/*
 269 * compute the csum for a btree block, and either verify it or write it
 270 * into the csum field of the block.
 271 */
 272static int csum_tree_block(struct btrfs_fs_info *fs_info,
 273                           struct extent_buffer *buf,
 274                           int verify)
 275{
 276        u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 277        char result[BTRFS_CSUM_SIZE];
 278        unsigned long len;
 279        unsigned long cur_len;
 280        unsigned long offset = BTRFS_CSUM_SIZE;
 281        char *kaddr;
 282        unsigned long map_start;
 283        unsigned long map_len;
 284        int err;
 285        u32 crc = ~(u32)0;
 286
 287        len = buf->len - offset;
 288        while (len > 0) {
 289                err = map_private_extent_buffer(buf, offset, 32,
 290                                        &kaddr, &map_start, &map_len);
 291                if (err)
 292                        return err;
 293                cur_len = min(len, map_len - (offset - map_start));
 294                crc = btrfs_csum_data(kaddr + offset - map_start,
 295                                      crc, cur_len);
 296                len -= cur_len;
 297                offset += cur_len;
 298        }
 299        memset(result, 0, BTRFS_CSUM_SIZE);
 300
 301        btrfs_csum_final(crc, result);
 302
 303        if (verify) {
 304                if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
 305                        u32 val;
 306                        u32 found = 0;
 307                        memcpy(&found, result, csum_size);
 308
 309                        read_extent_buffer(buf, &val, 0, csum_size);
 310                        btrfs_warn_rl(fs_info,
 311                                "%s checksum verify failed on %llu wanted %X found %X level %d",
 312                                fs_info->sb->s_id, buf->start,
 313                                val, found, btrfs_header_level(buf));
 314                        return -EUCLEAN;
 315                }
 316        } else {
 317                write_extent_buffer(buf, result, 0, csum_size);
 318        }
 319
 320        return 0;
 321}
 322
 323/*
 324 * we can't consider a given block up to date unless the transid of the
 325 * block matches the transid in the parent node's pointer.  This is how we
 326 * detect blocks that either didn't get written at all or got written
 327 * in the wrong place.
 328 */
 329static int verify_parent_transid(struct extent_io_tree *io_tree,
 330                                 struct extent_buffer *eb, u64 parent_transid,
 331                                 int atomic)
 332{
 333        struct extent_state *cached_state = NULL;
 334        int ret;
 335        bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
 336
 337        if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
 338                return 0;
 339
 340        if (atomic)
 341                return -EAGAIN;
 342
 343        if (need_lock) {
 344                btrfs_tree_read_lock(eb);
 345                btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
 346        }
 347
 348        lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
 349                         &cached_state);
 350        if (extent_buffer_uptodate(eb) &&
 351            btrfs_header_generation(eb) == parent_transid) {
 352                ret = 0;
 353                goto out;
 354        }
 355        btrfs_err_rl(eb->fs_info,
 356                "parent transid verify failed on %llu wanted %llu found %llu",
 357                        eb->start,
 358                        parent_transid, btrfs_header_generation(eb));
 359        ret = 1;
 360
 361        /*
 362         * Things reading via commit roots that don't have normal protection,
 363         * like send, can have a really old block in cache that may point at a
 364         * block that has been freed and re-allocated.  So don't clear uptodate
 365         * if we find an eb that is under IO (dirty/writeback) because we could
 366         * end up reading in the stale data and then writing it back out and
 367         * making everybody very sad.
 368         */
 369        if (!extent_buffer_under_io(eb))
 370                clear_extent_buffer_uptodate(eb);
 371out:
 372        unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
 373                             &cached_state);
 374        if (need_lock)
 375                btrfs_tree_read_unlock_blocking(eb);
 376        return ret;
 377}
 378
 379/*
 380 * Return 0 if the superblock checksum type matches the checksum value of that
 381 * algorithm. Pass the raw disk superblock data.
 382 */
 383static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
 384                                  char *raw_disk_sb)
 385{
 386        struct btrfs_super_block *disk_sb =
 387                (struct btrfs_super_block *)raw_disk_sb;
 388        u16 csum_type = btrfs_super_csum_type(disk_sb);
 389        int ret = 0;
 390
 391        if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
 392                u32 crc = ~(u32)0;
 393                char result[sizeof(crc)];
 394
 395                /*
 396                 * The super_block structure does not span the whole
 397                 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
 398                 * is filled with zeros and is included in the checksum.
 399                 */
 400                crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
 401                                crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
 402                btrfs_csum_final(crc, result);
 403
 404                if (memcmp(raw_disk_sb, result, sizeof(result)))
 405                        ret = 1;
 406        }
 407
 408        if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
 409                btrfs_err(fs_info, "unsupported checksum algorithm %u",
 410                                csum_type);
 411                ret = 1;
 412        }
 413
 414        return ret;
 415}
 416
 417static int verify_level_key(struct btrfs_fs_info *fs_info,
 418                            struct extent_buffer *eb, int level,
 419                            struct btrfs_key *first_key)
 420{
 421        int found_level;
 422        struct btrfs_key found_key;
 423        int ret;
 424
 425        found_level = btrfs_header_level(eb);
 426        if (found_level != level) {
 427#ifdef CONFIG_BTRFS_DEBUG
 428                WARN_ON(1);
 429                btrfs_err(fs_info,
 430"tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
 431                          eb->start, level, found_level);
 432#endif
 433                return -EIO;
 434        }
 435
 436        if (!first_key)
 437                return 0;
 438
 439        /*
 440         * For live tree block (new tree blocks in current transaction),
 441         * we need proper lock context to avoid race, which is impossible here.
 442         * So we only checks tree blocks which is read from disk, whose
 443         * generation <= fs_info->last_trans_committed.
 444         */
 445        if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
 446                return 0;
 447        if (found_level)
 448                btrfs_node_key_to_cpu(eb, &found_key, 0);
 449        else
 450                btrfs_item_key_to_cpu(eb, &found_key, 0);
 451        ret = btrfs_comp_cpu_keys(first_key, &found_key);
 452
 453#ifdef CONFIG_BTRFS_DEBUG
 454        if (ret) {
 455                WARN_ON(1);
 456                btrfs_err(fs_info,
 457"tree first key mismatch detected, bytenr=%llu key expected=(%llu, %u, %llu) has=(%llu, %u, %llu)",
 458                          eb->start, first_key->objectid, first_key->type,
 459                          first_key->offset, found_key.objectid,
 460                          found_key.type, found_key.offset);
 461        }
 462#endif
 463        return ret;
 464}
 465
 466/*
 467 * helper to read a given tree block, doing retries as required when
 468 * the checksums don't match and we have alternate mirrors to try.
 469 *
 470 * @parent_transid:     expected transid, skip check if 0
 471 * @level:              expected level, mandatory check
 472 * @first_key:          expected key of first slot, skip check if NULL
 473 */
 474static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
 475                                          struct extent_buffer *eb,
 476                                          u64 parent_transid, int level,
 477                                          struct btrfs_key *first_key)
 478{
 479        struct extent_io_tree *io_tree;
 480        int failed = 0;
 481        int ret;
 482        int num_copies = 0;
 483        int mirror_num = 0;
 484        int failed_mirror = 0;
 485
 486        clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 487        io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
 488        while (1) {
 489                ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
 490                                               mirror_num);
 491                if (!ret) {
 492                        if (verify_parent_transid(io_tree, eb,
 493                                                   parent_transid, 0))
 494                                ret = -EIO;
 495                        else if (verify_level_key(fs_info, eb, level,
 496                                                  first_key))
 497                                ret = -EUCLEAN;
 498                        else
 499                                break;
 500                }
 501
 502                /*
 503                 * This buffer's crc is fine, but its contents are corrupted, so
 504                 * there is no reason to read the other copies, they won't be
 505                 * any less wrong.
 506                 */
 507                if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags) ||
 508                    ret == -EUCLEAN)
 509                        break;
 510
 511                num_copies = btrfs_num_copies(fs_info,
 512                                              eb->start, eb->len);
 513                if (num_copies == 1)
 514                        break;
 515
 516                if (!failed_mirror) {
 517                        failed = 1;
 518                        failed_mirror = eb->read_mirror;
 519                }
 520
 521                mirror_num++;
 522                if (mirror_num == failed_mirror)
 523                        mirror_num++;
 524
 525                if (mirror_num > num_copies)
 526                        break;
 527        }
 528
 529        if (failed && !ret && failed_mirror)
 530                repair_eb_io_failure(fs_info, eb, failed_mirror);
 531
 532        return ret;
 533}
 534
 535/*
 536 * checksum a dirty tree block before IO.  This has extra checks to make sure
 537 * we only fill in the checksum field in the first page of a multi-page block
 538 */
 539
 540static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
 541{
 542        u64 start = page_offset(page);
 543        u64 found_start;
 544        struct extent_buffer *eb;
 545
 546        eb = (struct extent_buffer *)page->private;
 547        if (page != eb->pages[0])
 548                return 0;
 549
 550        found_start = btrfs_header_bytenr(eb);
 551        /*
 552         * Please do not consolidate these warnings into a single if.
 553         * It is useful to know what went wrong.
 554         */
 555        if (WARN_ON(found_start != start))
 556                return -EUCLEAN;
 557        if (WARN_ON(!PageUptodate(page)))
 558                return -EUCLEAN;
 559
 560        ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
 561                        btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
 562
 563        return csum_tree_block(fs_info, eb, 0);
 564}
 565
 566static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
 567                                 struct extent_buffer *eb)
 568{
 569        struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 570        u8 fsid[BTRFS_FSID_SIZE];
 571        int ret = 1;
 572
 573        read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
 574        while (fs_devices) {
 575                if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
 576                        ret = 0;
 577                        break;
 578                }
 579                fs_devices = fs_devices->seed;
 580        }
 581        return ret;
 582}
 583
 584static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
 585                                      u64 phy_offset, struct page *page,
 586                                      u64 start, u64 end, int mirror)
 587{
 588        u64 found_start;
 589        int found_level;
 590        struct extent_buffer *eb;
 591        struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
 592        struct btrfs_fs_info *fs_info = root->fs_info;
 593        int ret = 0;
 594        int reads_done;
 595
 596        if (!page->private)
 597                goto out;
 598
 599        eb = (struct extent_buffer *)page->private;
 600
 601        /* the pending IO might have been the only thing that kept this buffer
 602         * in memory.  Make sure we have a ref for all this other checks
 603         */
 604        extent_buffer_get(eb);
 605
 606        reads_done = atomic_dec_and_test(&eb->io_pages);
 607        if (!reads_done)
 608                goto err;
 609
 610        eb->read_mirror = mirror;
 611        if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
 612                ret = -EIO;
 613                goto err;
 614        }
 615
 616        found_start = btrfs_header_bytenr(eb);
 617        if (found_start != eb->start) {
 618                btrfs_err_rl(fs_info, "bad tree block start %llu %llu",
 619                             found_start, eb->start);
 620                ret = -EIO;
 621                goto err;
 622        }
 623        if (check_tree_block_fsid(fs_info, eb)) {
 624                btrfs_err_rl(fs_info, "bad fsid on block %llu",
 625                             eb->start);
 626                ret = -EIO;
 627                goto err;
 628        }
 629        found_level = btrfs_header_level(eb);
 630        if (found_level >= BTRFS_MAX_LEVEL) {
 631                btrfs_err(fs_info, "bad tree block level %d",
 632                          (int)btrfs_header_level(eb));
 633                ret = -EIO;
 634                goto err;
 635        }
 636
 637        btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
 638                                       eb, found_level);
 639
 640        ret = csum_tree_block(fs_info, eb, 1);
 641        if (ret)
 642                goto err;
 643
 644        /*
 645         * If this is a leaf block and it is corrupt, set the corrupt bit so
 646         * that we don't try and read the other copies of this block, just
 647         * return -EIO.
 648         */
 649        if (found_level == 0 && btrfs_check_leaf_full(fs_info, eb)) {
 650                set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 651                ret = -EIO;
 652        }
 653
 654        if (found_level > 0 && btrfs_check_node(fs_info, eb))
 655                ret = -EIO;
 656
 657        if (!ret)
 658                set_extent_buffer_uptodate(eb);
 659err:
 660        if (reads_done &&
 661            test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
 662                btree_readahead_hook(eb, ret);
 663
 664        if (ret) {
 665                /*
 666                 * our io error hook is going to dec the io pages
 667                 * again, we have to make sure it has something
 668                 * to decrement
 669                 */
 670                atomic_inc(&eb->io_pages);
 671                clear_extent_buffer_uptodate(eb);
 672        }
 673        free_extent_buffer(eb);
 674out:
 675        return ret;
 676}
 677
 678static int btree_io_failed_hook(struct page *page, int failed_mirror)
 679{
 680        struct extent_buffer *eb;
 681
 682        eb = (struct extent_buffer *)page->private;
 683        set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
 684        eb->read_mirror = failed_mirror;
 685        atomic_dec(&eb->io_pages);
 686        if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
 687                btree_readahead_hook(eb, -EIO);
 688        return -EIO;    /* we fixed nothing */
 689}
 690
 691static void end_workqueue_bio(struct bio *bio)
 692{
 693        struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
 694        struct btrfs_fs_info *fs_info;
 695        struct btrfs_workqueue *wq;
 696        btrfs_work_func_t func;
 697
 698        fs_info = end_io_wq->info;
 699        end_io_wq->status = bio->bi_status;
 700
 701        if (bio_op(bio) == REQ_OP_WRITE) {
 702                if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
 703                        wq = fs_info->endio_meta_write_workers;
 704                        func = btrfs_endio_meta_write_helper;
 705                } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
 706                        wq = fs_info->endio_freespace_worker;
 707                        func = btrfs_freespace_write_helper;
 708                } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
 709                        wq = fs_info->endio_raid56_workers;
 710                        func = btrfs_endio_raid56_helper;
 711                } else {
 712                        wq = fs_info->endio_write_workers;
 713                        func = btrfs_endio_write_helper;
 714                }
 715        } else {
 716                if (unlikely(end_io_wq->metadata ==
 717                             BTRFS_WQ_ENDIO_DIO_REPAIR)) {
 718                        wq = fs_info->endio_repair_workers;
 719                        func = btrfs_endio_repair_helper;
 720                } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
 721                        wq = fs_info->endio_raid56_workers;
 722                        func = btrfs_endio_raid56_helper;
 723                } else if (end_io_wq->metadata) {
 724                        wq = fs_info->endio_meta_workers;
 725                        func = btrfs_endio_meta_helper;
 726                } else {
 727                        wq = fs_info->endio_workers;
 728                        func = btrfs_endio_helper;
 729                }
 730        }
 731
 732        btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
 733        btrfs_queue_work(wq, &end_io_wq->work);
 734}
 735
 736blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 737                        enum btrfs_wq_endio_type metadata)
 738{
 739        struct btrfs_end_io_wq *end_io_wq;
 740
 741        end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
 742        if (!end_io_wq)
 743                return BLK_STS_RESOURCE;
 744
 745        end_io_wq->private = bio->bi_private;
 746        end_io_wq->end_io = bio->bi_end_io;
 747        end_io_wq->info = info;
 748        end_io_wq->status = 0;
 749        end_io_wq->bio = bio;
 750        end_io_wq->metadata = metadata;
 751
 752        bio->bi_private = end_io_wq;
 753        bio->bi_end_io = end_workqueue_bio;
 754        return 0;
 755}
 756
 757static void run_one_async_start(struct btrfs_work *work)
 758{
 759        struct async_submit_bio *async;
 760        blk_status_t ret;
 761
 762        async = container_of(work, struct  async_submit_bio, work);
 763        ret = async->submit_bio_start(async->private_data, async->bio,
 764                                      async->bio_offset);
 765        if (ret)
 766                async->status = ret;
 767}
 768
 769static void run_one_async_done(struct btrfs_work *work)
 770{
 771        struct async_submit_bio *async;
 772
 773        async = container_of(work, struct  async_submit_bio, work);
 774
 775        /* If an error occurred we just want to clean up the bio and move on */
 776        if (async->status) {
 777                async->bio->bi_status = async->status;
 778                bio_endio(async->bio);
 779                return;
 780        }
 781
 782        async->submit_bio_done(async->private_data, async->bio, async->mirror_num);
 783}
 784
 785static void run_one_async_free(struct btrfs_work *work)
 786{
 787        struct async_submit_bio *async;
 788
 789        async = container_of(work, struct  async_submit_bio, work);
 790        kfree(async);
 791}
 792
 793blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
 794                                 int mirror_num, unsigned long bio_flags,
 795                                 u64 bio_offset, void *private_data,
 796                                 extent_submit_bio_start_t *submit_bio_start,
 797                                 extent_submit_bio_done_t *submit_bio_done)
 798{
 799        struct async_submit_bio *async;
 800
 801        async = kmalloc(sizeof(*async), GFP_NOFS);
 802        if (!async)
 803                return BLK_STS_RESOURCE;
 804
 805        async->private_data = private_data;
 806        async->fs_info = fs_info;
 807        async->bio = bio;
 808        async->mirror_num = mirror_num;
 809        async->submit_bio_start = submit_bio_start;
 810        async->submit_bio_done = submit_bio_done;
 811
 812        btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
 813                        run_one_async_done, run_one_async_free);
 814
 815        async->bio_flags = bio_flags;
 816        async->bio_offset = bio_offset;
 817
 818        async->status = 0;
 819
 820        if (op_is_sync(bio->bi_opf))
 821                btrfs_set_work_high_priority(&async->work);
 822
 823        btrfs_queue_work(fs_info->workers, &async->work);
 824        return 0;
 825}
 826
 827static blk_status_t btree_csum_one_bio(struct bio *bio)
 828{
 829        struct bio_vec *bvec;
 830        struct btrfs_root *root;
 831        int i, ret = 0;
 832
 833        ASSERT(!bio_flagged(bio, BIO_CLONED));
 834        bio_for_each_segment_all(bvec, bio, i) {
 835                root = BTRFS_I(bvec->bv_page->mapping->host)->root;
 836                ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
 837                if (ret)
 838                        break;
 839        }
 840
 841        return errno_to_blk_status(ret);
 842}
 843
 844static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
 845                                             u64 bio_offset)
 846{
 847        /*
 848         * when we're called for a write, we're already in the async
 849         * submission context.  Just jump into btrfs_map_bio
 850         */
 851        return btree_csum_one_bio(bio);
 852}
 853
 854static blk_status_t btree_submit_bio_done(void *private_data, struct bio *bio,
 855                                            int mirror_num)
 856{
 857        struct inode *inode = private_data;
 858        blk_status_t ret;
 859
 860        /*
 861         * when we're called for a write, we're already in the async
 862         * submission context.  Just jump into btrfs_map_bio
 863         */
 864        ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
 865        if (ret) {
 866                bio->bi_status = ret;
 867                bio_endio(bio);
 868        }
 869        return ret;
 870}
 871
 872static int check_async_write(struct btrfs_inode *bi)
 873{
 874        if (atomic_read(&bi->sync_writers))
 875                return 0;
 876#ifdef CONFIG_X86
 877        if (static_cpu_has(X86_FEATURE_XMM4_2))
 878                return 0;
 879#endif
 880        return 1;
 881}
 882
 883static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
 884                                          int mirror_num, unsigned long bio_flags,
 885                                          u64 bio_offset)
 886{
 887        struct inode *inode = private_data;
 888        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 889        int async = check_async_write(BTRFS_I(inode));
 890        blk_status_t ret;
 891
 892        if (bio_op(bio) != REQ_OP_WRITE) {
 893                /*
 894                 * called for a read, do the setup so that checksum validation
 895                 * can happen in the async kernel threads
 896                 */
 897                ret = btrfs_bio_wq_end_io(fs_info, bio,
 898                                          BTRFS_WQ_ENDIO_METADATA);
 899                if (ret)
 900                        goto out_w_error;
 901                ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
 902        } else if (!async) {
 903                ret = btree_csum_one_bio(bio);
 904                if (ret)
 905                        goto out_w_error;
 906                ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
 907        } else {
 908                /*
 909                 * kthread helpers are used to submit writes so that
 910                 * checksumming can happen in parallel across all CPUs
 911                 */
 912                ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
 913                                          bio_offset, private_data,
 914                                          btree_submit_bio_start,
 915                                          btree_submit_bio_done);
 916        }
 917
 918        if (ret)
 919                goto out_w_error;
 920        return 0;
 921
 922out_w_error:
 923        bio->bi_status = ret;
 924        bio_endio(bio);
 925        return ret;
 926}
 927
 928#ifdef CONFIG_MIGRATION
 929static int btree_migratepage(struct address_space *mapping,
 930                        struct page *newpage, struct page *page,
 931                        enum migrate_mode mode)
 932{
 933        /*
 934         * we can't safely write a btree page from here,
 935         * we haven't done the locking hook
 936         */
 937        if (PageDirty(page))
 938                return -EAGAIN;
 939        /*
 940         * Buffers may be managed in a filesystem specific way.
 941         * We must have no buffers or drop them.
 942         */
 943        if (page_has_private(page) &&
 944            !try_to_release_page(page, GFP_KERNEL))
 945                return -EAGAIN;
 946        return migrate_page(mapping, newpage, page, mode);
 947}
 948#endif
 949
 950
 951static int btree_writepages(struct address_space *mapping,
 952                            struct writeback_control *wbc)
 953{
 954        struct btrfs_fs_info *fs_info;
 955        int ret;
 956
 957        if (wbc->sync_mode == WB_SYNC_NONE) {
 958
 959                if (wbc->for_kupdate)
 960                        return 0;
 961
 962                fs_info = BTRFS_I(mapping->host)->root->fs_info;
 963                /* this is a bit racy, but that's ok */
 964                ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
 965                                             BTRFS_DIRTY_METADATA_THRESH);
 966                if (ret < 0)
 967                        return 0;
 968        }
 969        return btree_write_cache_pages(mapping, wbc);
 970}
 971
 972static int btree_readpage(struct file *file, struct page *page)
 973{
 974        struct extent_io_tree *tree;
 975        tree = &BTRFS_I(page->mapping->host)->io_tree;
 976        return extent_read_full_page(tree, page, btree_get_extent, 0);
 977}
 978
 979static int btree_releasepage(struct page *page, gfp_t gfp_flags)
 980{
 981        if (PageWriteback(page) || PageDirty(page))
 982                return 0;
 983
 984        return try_release_extent_buffer(page);
 985}
 986
 987static void btree_invalidatepage(struct page *page, unsigned int offset,
 988                                 unsigned int length)
 989{
 990        struct extent_io_tree *tree;
 991        tree = &BTRFS_I(page->mapping->host)->io_tree;
 992        extent_invalidatepage(tree, page, offset);
 993        btree_releasepage(page, GFP_NOFS);
 994        if (PagePrivate(page)) {
 995                btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
 996                           "page private not zero on page %llu",
 997                           (unsigned long long)page_offset(page));
 998                ClearPagePrivate(page);
 999                set_page_private(page, 0);
1000                put_page(page);
1001        }
1002}
1003
1004static int btree_set_page_dirty(struct page *page)
1005{
1006#ifdef DEBUG
1007        struct extent_buffer *eb;
1008
1009        BUG_ON(!PagePrivate(page));
1010        eb = (struct extent_buffer *)page->private;
1011        BUG_ON(!eb);
1012        BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1013        BUG_ON(!atomic_read(&eb->refs));
1014        btrfs_assert_tree_locked(eb);
1015#endif
1016        return __set_page_dirty_nobuffers(page);
1017}
1018
1019static const struct address_space_operations btree_aops = {
1020        .readpage       = btree_readpage,
1021        .writepages     = btree_writepages,
1022        .releasepage    = btree_releasepage,
1023        .invalidatepage = btree_invalidatepage,
1024#ifdef CONFIG_MIGRATION
1025        .migratepage    = btree_migratepage,
1026#endif
1027        .set_page_dirty = btree_set_page_dirty,
1028};
1029
1030void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1031{
1032        struct extent_buffer *buf = NULL;
1033        struct inode *btree_inode = fs_info->btree_inode;
1034
1035        buf = btrfs_find_create_tree_block(fs_info, bytenr);
1036        if (IS_ERR(buf))
1037                return;
1038        read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1039                                 buf, WAIT_NONE, 0);
1040        free_extent_buffer(buf);
1041}
1042
1043int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
1044                         int mirror_num, struct extent_buffer **eb)
1045{
1046        struct extent_buffer *buf = NULL;
1047        struct inode *btree_inode = fs_info->btree_inode;
1048        struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1049        int ret;
1050
1051        buf = btrfs_find_create_tree_block(fs_info, bytenr);
1052        if (IS_ERR(buf))
1053                return 0;
1054
1055        set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1056
1057        ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1058                                       mirror_num);
1059        if (ret) {
1060                free_extent_buffer(buf);
1061                return ret;
1062        }
1063
1064        if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1065                free_extent_buffer(buf);
1066                return -EIO;
1067        } else if (extent_buffer_uptodate(buf)) {
1068                *eb = buf;
1069        } else {
1070                free_extent_buffer(buf);
1071        }
1072        return 0;
1073}
1074
1075struct extent_buffer *btrfs_find_create_tree_block(
1076                                                struct btrfs_fs_info *fs_info,
1077                                                u64 bytenr)
1078{
1079        if (btrfs_is_testing(fs_info))
1080                return alloc_test_extent_buffer(fs_info, bytenr);
1081        return alloc_extent_buffer(fs_info, bytenr);
1082}
1083
1084
1085int btrfs_write_tree_block(struct extent_buffer *buf)
1086{
1087        return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1088                                        buf->start + buf->len - 1);
1089}
1090
1091void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1092{
1093        filemap_fdatawait_range(buf->pages[0]->mapping,
1094                                buf->start, buf->start + buf->len - 1);
1095}
1096
1097/*
1098 * Read tree block at logical address @bytenr and do variant basic but critical
1099 * verification.
1100 *
1101 * @parent_transid:     expected transid of this tree block, skip check if 0
1102 * @level:              expected level, mandatory check
1103 * @first_key:          expected key in slot 0, skip check if NULL
1104 */
1105struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1106                                      u64 parent_transid, int level,
1107                                      struct btrfs_key *first_key)
1108{
1109        struct extent_buffer *buf = NULL;
1110        int ret;
1111
1112        buf = btrfs_find_create_tree_block(fs_info, bytenr);
1113        if (IS_ERR(buf))
1114                return buf;
1115
1116        ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
1117                                             level, first_key);
1118        if (ret) {
1119                free_extent_buffer(buf);
1120                return ERR_PTR(ret);
1121        }
1122        return buf;
1123
1124}
1125
1126void clean_tree_block(struct btrfs_fs_info *fs_info,
1127                      struct extent_buffer *buf)
1128{
1129        if (btrfs_header_generation(buf) ==
1130            fs_info->running_transaction->transid) {
1131                btrfs_assert_tree_locked(buf);
1132
1133                if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1134                        percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1135                                                 -buf->len,
1136                                                 fs_info->dirty_metadata_batch);
1137                        /* ugh, clear_extent_buffer_dirty needs to lock the page */
1138                        btrfs_set_lock_blocking(buf);
1139                        clear_extent_buffer_dirty(buf);
1140                }
1141        }
1142}
1143
1144static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1145{
1146        struct btrfs_subvolume_writers *writers;
1147        int ret;
1148
1149        writers = kmalloc(sizeof(*writers), GFP_NOFS);
1150        if (!writers)
1151                return ERR_PTR(-ENOMEM);
1152
1153        ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
1154        if (ret < 0) {
1155                kfree(writers);
1156                return ERR_PTR(ret);
1157        }
1158
1159        init_waitqueue_head(&writers->wait);
1160        return writers;
1161}
1162
1163static void
1164btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1165{
1166        percpu_counter_destroy(&writers->counter);
1167        kfree(writers);
1168}
1169
1170static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1171                         u64 objectid)
1172{
1173        bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1174        root->node = NULL;
1175        root->commit_root = NULL;
1176        root->state = 0;
1177        root->orphan_cleanup_state = 0;
1178
1179        root->objectid = objectid;
1180        root->last_trans = 0;
1181        root->highest_objectid = 0;
1182        root->nr_delalloc_inodes = 0;
1183        root->nr_ordered_extents = 0;
1184        root->name = NULL;
1185        root->inode_tree = RB_ROOT;
1186        INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1187        root->block_rsv = NULL;
1188        root->orphan_block_rsv = NULL;
1189
1190        INIT_LIST_HEAD(&root->dirty_list);
1191        INIT_LIST_HEAD(&root->root_list);
1192        INIT_LIST_HEAD(&root->delalloc_inodes);
1193        INIT_LIST_HEAD(&root->delalloc_root);
1194        INIT_LIST_HEAD(&root->ordered_extents);
1195        INIT_LIST_HEAD(&root->ordered_root);
1196        INIT_LIST_HEAD(&root->logged_list[0]);
1197        INIT_LIST_HEAD(&root->logged_list[1]);
1198        spin_lock_init(&root->orphan_lock);
1199        spin_lock_init(&root->inode_lock);
1200        spin_lock_init(&root->delalloc_lock);
1201        spin_lock_init(&root->ordered_extent_lock);
1202        spin_lock_init(&root->accounting_lock);
1203        spin_lock_init(&root->log_extents_lock[0]);
1204        spin_lock_init(&root->log_extents_lock[1]);
1205        spin_lock_init(&root->qgroup_meta_rsv_lock);
1206        mutex_init(&root->objectid_mutex);
1207        mutex_init(&root->log_mutex);
1208        mutex_init(&root->ordered_extent_mutex);
1209        mutex_init(&root->delalloc_mutex);
1210        init_waitqueue_head(&root->log_writer_wait);
1211        init_waitqueue_head(&root->log_commit_wait[0]);
1212        init_waitqueue_head(&root->log_commit_wait[1]);
1213        INIT_LIST_HEAD(&root->log_ctxs[0]);
1214        INIT_LIST_HEAD(&root->log_ctxs[1]);
1215        atomic_set(&root->log_commit[0], 0);
1216        atomic_set(&root->log_commit[1], 0);
1217        atomic_set(&root->log_writers, 0);
1218        atomic_set(&root->log_batch, 0);
1219        atomic_set(&root->orphan_inodes, 0);
1220        refcount_set(&root->refs, 1);
1221        atomic_set(&root->will_be_snapshotted, 0);
1222        root->log_transid = 0;
1223        root->log_transid_committed = -1;
1224        root->last_log_commit = 0;
1225        if (!dummy)
1226                extent_io_tree_init(&root->dirty_log_pages, NULL);
1227
1228        memset(&root->root_key, 0, sizeof(root->root_key));
1229        memset(&root->root_item, 0, sizeof(root->root_item));
1230        memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1231        if (!dummy)
1232                root->defrag_trans_start = fs_info->generation;
1233        else
1234                root->defrag_trans_start = 0;
1235        root->root_key.objectid = objectid;
1236        root->anon_dev = 0;
1237
1238        spin_lock_init(&root->root_item_lock);
1239}
1240
1241static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1242                gfp_t flags)
1243{
1244        struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1245        if (root)
1246                root->fs_info = fs_info;
1247        return root;
1248}
1249
1250#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1251/* Should only be used by the testing infrastructure */
1252struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1253{
1254        struct btrfs_root *root;
1255
1256        if (!fs_info)
1257                return ERR_PTR(-EINVAL);
1258
1259        root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1260        if (!root)
1261                return ERR_PTR(-ENOMEM);
1262
1263        /* We don't use the stripesize in selftest, set it as sectorsize */
1264        __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1265        root->alloc_bytenr = 0;
1266
1267        return root;
1268}
1269#endif
1270
1271struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1272                                     struct btrfs_fs_info *fs_info,
1273                                     u64 objectid)
1274{
1275        struct extent_buffer *leaf;
1276        struct btrfs_root *tree_root = fs_info->tree_root;
1277        struct btrfs_root *root;
1278        struct btrfs_key key;
1279        int ret = 0;
1280        uuid_le uuid = NULL_UUID_LE;
1281
1282        root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1283        if (!root)
1284                return ERR_PTR(-ENOMEM);
1285
1286        __setup_root(root, fs_info, objectid);
1287        root->root_key.objectid = objectid;
1288        root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1289        root->root_key.offset = 0;
1290
1291        leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1292        if (IS_ERR(leaf)) {
1293                ret = PTR_ERR(leaf);
1294                leaf = NULL;
1295                goto fail;
1296        }
1297
1298        memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1299        btrfs_set_header_bytenr(leaf, leaf->start);
1300        btrfs_set_header_generation(leaf, trans->transid);
1301        btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1302        btrfs_set_header_owner(leaf, objectid);
1303        root->node = leaf;
1304
1305        write_extent_buffer_fsid(leaf, fs_info->fsid);
1306        write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
1307        btrfs_mark_buffer_dirty(leaf);
1308
1309        root->commit_root = btrfs_root_node(root);
1310        set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1311
1312        root->root_item.flags = 0;
1313        root->root_item.byte_limit = 0;
1314        btrfs_set_root_bytenr(&root->root_item, leaf->start);
1315        btrfs_set_root_generation(&root->root_item, trans->transid);
1316        btrfs_set_root_level(&root->root_item, 0);
1317        btrfs_set_root_refs(&root->root_item, 1);
1318        btrfs_set_root_used(&root->root_item, leaf->len);
1319        btrfs_set_root_last_snapshot(&root->root_item, 0);
1320        btrfs_set_root_dirid(&root->root_item, 0);
1321        if (is_fstree(objectid))
1322                uuid_le_gen(&uuid);
1323        memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1324        root->root_item.drop_level = 0;
1325
1326        key.objectid = objectid;
1327        key.type = BTRFS_ROOT_ITEM_KEY;
1328        key.offset = 0;
1329        ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1330        if (ret)
1331                goto fail;
1332
1333        btrfs_tree_unlock(leaf);
1334
1335        return root;
1336
1337fail:
1338        if (leaf) {
1339                btrfs_tree_unlock(leaf);
1340                free_extent_buffer(root->commit_root);
1341                free_extent_buffer(leaf);
1342        }
1343        kfree(root);
1344
1345        return ERR_PTR(ret);
1346}
1347
1348static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1349                                         struct btrfs_fs_info *fs_info)
1350{
1351        struct btrfs_root *root;
1352        struct extent_buffer *leaf;
1353
1354        root = btrfs_alloc_root(fs_info, GFP_NOFS);
1355        if (!root)
1356                return ERR_PTR(-ENOMEM);
1357
1358        __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1359
1360        root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1361        root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1362        root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1363
1364        /*
1365         * DON'T set REF_COWS for log trees
1366         *
1367         * log trees do not get reference counted because they go away
1368         * before a real commit is actually done.  They do store pointers
1369         * to file data extents, and those reference counts still get
1370         * updated (along with back refs to the log tree).
1371         */
1372
1373        leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1374                        NULL, 0, 0, 0);
1375        if (IS_ERR(leaf)) {
1376                kfree(root);
1377                return ERR_CAST(leaf);
1378        }
1379
1380        memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1381        btrfs_set_header_bytenr(leaf, leaf->start);
1382        btrfs_set_header_generation(leaf, trans->transid);
1383        btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1384        btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1385        root->node = leaf;
1386
1387        write_extent_buffer_fsid(root->node, fs_info->fsid);
1388        btrfs_mark_buffer_dirty(root->node);
1389        btrfs_tree_unlock(root->node);
1390        return root;
1391}
1392
1393int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1394                             struct btrfs_fs_info *fs_info)
1395{
1396        struct btrfs_root *log_root;
1397
1398        log_root = alloc_log_tree(trans, fs_info);
1399        if (IS_ERR(log_root))
1400                return PTR_ERR(log_root);
1401        WARN_ON(fs_info->log_root_tree);
1402        fs_info->log_root_tree = log_root;
1403        return 0;
1404}
1405
1406int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1407                       struct btrfs_root *root)
1408{
1409        struct btrfs_fs_info *fs_info = root->fs_info;
1410        struct btrfs_root *log_root;
1411        struct btrfs_inode_item *inode_item;
1412
1413        log_root = alloc_log_tree(trans, fs_info);
1414        if (IS_ERR(log_root))
1415                return PTR_ERR(log_root);
1416
1417        log_root->last_trans = trans->transid;
1418        log_root->root_key.offset = root->root_key.objectid;
1419
1420        inode_item = &log_root->root_item.inode;
1421        btrfs_set_stack_inode_generation(inode_item, 1);
1422        btrfs_set_stack_inode_size(inode_item, 3);
1423        btrfs_set_stack_inode_nlink(inode_item, 1);
1424        btrfs_set_stack_inode_nbytes(inode_item,
1425                                     fs_info->nodesize);
1426        btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1427
1428        btrfs_set_root_node(&log_root->root_item, log_root->node);
1429
1430        WARN_ON(root->log_root);
1431        root->log_root = log_root;
1432        root->log_transid = 0;
1433        root->log_transid_committed = -1;
1434        root->last_log_commit = 0;
1435        return 0;
1436}
1437
1438static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1439                                               struct btrfs_key *key)
1440{
1441        struct btrfs_root *root;
1442        struct btrfs_fs_info *fs_info = tree_root->fs_info;
1443        struct btrfs_path *path;
1444        u64 generation;
1445        int ret;
1446        int level;
1447
1448        path = btrfs_alloc_path();
1449        if (!path)
1450                return ERR_PTR(-ENOMEM);
1451
1452        root = btrfs_alloc_root(fs_info, GFP_NOFS);
1453        if (!root) {
1454                ret = -ENOMEM;
1455                goto alloc_fail;
1456        }
1457
1458        __setup_root(root, fs_info, key->objectid);
1459
1460        ret = btrfs_find_root(tree_root, key, path,
1461                              &root->root_item, &root->root_key);
1462        if (ret) {
1463                if (ret > 0)
1464                        ret = -ENOENT;
1465                goto find_fail;
1466        }
1467
1468        generation = btrfs_root_generation(&root->root_item);
1469        level = btrfs_root_level(&root->root_item);
1470        root->node = read_tree_block(fs_info,
1471                                     btrfs_root_bytenr(&root->root_item),
1472                                     generation, level, NULL);
1473        if (IS_ERR(root->node)) {
1474                ret = PTR_ERR(root->node);
1475                goto find_fail;
1476        } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1477                ret = -EIO;
1478                free_extent_buffer(root->node);
1479                goto find_fail;
1480        }
1481        root->commit_root = btrfs_root_node(root);
1482out:
1483        btrfs_free_path(path);
1484        return root;
1485
1486find_fail:
1487        kfree(root);
1488alloc_fail:
1489        root = ERR_PTR(ret);
1490        goto out;
1491}
1492
1493struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1494                                      struct btrfs_key *location)
1495{
1496        struct btrfs_root *root;
1497
1498        root = btrfs_read_tree_root(tree_root, location);
1499        if (IS_ERR(root))
1500                return root;
1501
1502        if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1503                set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1504                btrfs_check_and_init_root_item(&root->root_item);
1505        }
1506
1507        return root;
1508}
1509
1510int btrfs_init_fs_root(struct btrfs_root *root)
1511{
1512        int ret;
1513        struct btrfs_subvolume_writers *writers;
1514
1515        root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1516        root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1517                                        GFP_NOFS);
1518        if (!root->free_ino_pinned || !root->free_ino_ctl) {
1519                ret = -ENOMEM;
1520                goto fail;
1521        }
1522
1523        writers = btrfs_alloc_subvolume_writers();
1524        if (IS_ERR(writers)) {
1525                ret = PTR_ERR(writers);
1526                goto fail;
1527        }
1528        root->subv_writers = writers;
1529
1530        btrfs_init_free_ino_ctl(root);
1531        spin_lock_init(&root->ino_cache_lock);
1532        init_waitqueue_head(&root->ino_cache_wait);
1533
1534        ret = get_anon_bdev(&root->anon_dev);
1535        if (ret)
1536                goto fail;
1537
1538        mutex_lock(&root->objectid_mutex);
1539        ret = btrfs_find_highest_objectid(root,
1540                                        &root->highest_objectid);
1541        if (ret) {
1542                mutex_unlock(&root->objectid_mutex);
1543                goto fail;
1544        }
1545
1546        ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1547
1548        mutex_unlock(&root->objectid_mutex);
1549
1550        return 0;
1551fail:
1552        /* the caller is responsible to call free_fs_root */
1553        return ret;
1554}
1555
1556struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1557                                        u64 root_id)
1558{
1559        struct btrfs_root *root;
1560
1561        spin_lock(&fs_info->fs_roots_radix_lock);
1562        root = radix_tree_lookup(&fs_info->fs_roots_radix,
1563                                 (unsigned long)root_id);
1564        spin_unlock(&fs_info->fs_roots_radix_lock);
1565        return root;
1566}
1567
1568int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1569                         struct btrfs_root *root)
1570{
1571        int ret;
1572
1573        ret = radix_tree_preload(GFP_NOFS);
1574        if (ret)
1575                return ret;
1576
1577        spin_lock(&fs_info->fs_roots_radix_lock);
1578        ret = radix_tree_insert(&fs_info->fs_roots_radix,
1579                                (unsigned long)root->root_key.objectid,
1580                                root);
1581        if (ret == 0)
1582                set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1583        spin_unlock(&fs_info->fs_roots_radix_lock);
1584        radix_tree_preload_end();
1585
1586        return ret;
1587}
1588
1589struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1590                                     struct btrfs_key *location,
1591                                     bool check_ref)
1592{
1593        struct btrfs_root *root;
1594        struct btrfs_path *path;
1595        struct btrfs_key key;
1596        int ret;
1597
1598        if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1599                return fs_info->tree_root;
1600        if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1601                return fs_info->extent_root;
1602        if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1603                return fs_info->chunk_root;
1604        if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1605                return fs_info->dev_root;
1606        if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1607                return fs_info->csum_root;
1608        if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1609                return fs_info->quota_root ? fs_info->quota_root :
1610                                             ERR_PTR(-ENOENT);
1611        if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1612                return fs_info->uuid_root ? fs_info->uuid_root :
1613                                            ERR_PTR(-ENOENT);
1614        if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1615                return fs_info->free_space_root ? fs_info->free_space_root :
1616                                                  ERR_PTR(-ENOENT);
1617again:
1618        root = btrfs_lookup_fs_root(fs_info, location->objectid);
1619        if (root) {
1620                if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1621                        return ERR_PTR(-ENOENT);
1622                return root;
1623        }
1624
1625        root = btrfs_read_fs_root(fs_info->tree_root, location);
1626        if (IS_ERR(root))
1627                return root;
1628
1629        if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1630                ret = -ENOENT;
1631                goto fail;
1632        }
1633
1634        ret = btrfs_init_fs_root(root);
1635        if (ret)
1636                goto fail;
1637
1638        path = btrfs_alloc_path();
1639        if (!path) {
1640                ret = -ENOMEM;
1641                goto fail;
1642        }
1643        key.objectid = BTRFS_ORPHAN_OBJECTID;
1644        key.type = BTRFS_ORPHAN_ITEM_KEY;
1645        key.offset = location->objectid;
1646
1647        ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1648        btrfs_free_path(path);
1649        if (ret < 0)
1650                goto fail;
1651        if (ret == 0)
1652                set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1653
1654        ret = btrfs_insert_fs_root(fs_info, root);
1655        if (ret) {
1656                if (ret == -EEXIST) {
1657                        free_fs_root(root);
1658                        goto again;
1659                }
1660                goto fail;
1661        }
1662        return root;
1663fail:
1664        free_fs_root(root);
1665        return ERR_PTR(ret);
1666}
1667
1668static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1669{
1670        struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1671        int ret = 0;
1672        struct btrfs_device *device;
1673        struct backing_dev_info *bdi;
1674
1675        rcu_read_lock();
1676        list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1677                if (!device->bdev)
1678                        continue;
1679                bdi = device->bdev->bd_bdi;
1680                if (bdi_congested(bdi, bdi_bits)) {
1681                        ret = 1;
1682                        break;
1683                }
1684        }
1685        rcu_read_unlock();
1686        return ret;
1687}
1688
1689/*
1690 * called by the kthread helper functions to finally call the bio end_io
1691 * functions.  This is where read checksum verification actually happens
1692 */
1693static void end_workqueue_fn(struct btrfs_work *work)
1694{
1695        struct bio *bio;
1696        struct btrfs_end_io_wq *end_io_wq;
1697
1698        end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1699        bio = end_io_wq->bio;
1700
1701        bio->bi_status = end_io_wq->status;
1702        bio->bi_private = end_io_wq->private;
1703        bio->bi_end_io = end_io_wq->end_io;
1704        kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1705        bio_endio(bio);
1706}
1707
1708static int cleaner_kthread(void *arg)
1709{
1710        struct btrfs_root *root = arg;
1711        struct btrfs_fs_info *fs_info = root->fs_info;
1712        int again;
1713        struct btrfs_trans_handle *trans;
1714
1715        do {
1716                again = 0;
1717
1718                /* Make the cleaner go to sleep early. */
1719                if (btrfs_need_cleaner_sleep(fs_info))
1720                        goto sleep;
1721
1722                /*
1723                 * Do not do anything if we might cause open_ctree() to block
1724                 * before we have finished mounting the filesystem.
1725                 */
1726                if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1727                        goto sleep;
1728
1729                if (!mutex_trylock(&fs_info->cleaner_mutex))
1730                        goto sleep;
1731
1732                /*
1733                 * Avoid the problem that we change the status of the fs
1734                 * during the above check and trylock.
1735                 */
1736                if (btrfs_need_cleaner_sleep(fs_info)) {
1737                        mutex_unlock(&fs_info->cleaner_mutex);
1738                        goto sleep;
1739                }
1740
1741                mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
1742                btrfs_run_delayed_iputs(fs_info);
1743                mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
1744
1745                again = btrfs_clean_one_deleted_snapshot(root);
1746                mutex_unlock(&fs_info->cleaner_mutex);
1747
1748                /*
1749                 * The defragger has dealt with the R/O remount and umount,
1750                 * needn't do anything special here.
1751                 */
1752                btrfs_run_defrag_inodes(fs_info);
1753
1754                /*
1755                 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1756                 * with relocation (btrfs_relocate_chunk) and relocation
1757                 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1758                 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1759                 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1760                 * unused block groups.
1761                 */
1762                btrfs_delete_unused_bgs(fs_info);
1763sleep:
1764                if (!again) {
1765                        set_current_state(TASK_INTERRUPTIBLE);
1766                        if (!kthread_should_stop())
1767                                schedule();
1768                        __set_current_state(TASK_RUNNING);
1769                }
1770        } while (!kthread_should_stop());
1771
1772        /*
1773         * Transaction kthread is stopped before us and wakes us up.
1774         * However we might have started a new transaction and COWed some
1775         * tree blocks when deleting unused block groups for example. So
1776         * make sure we commit the transaction we started to have a clean
1777         * shutdown when evicting the btree inode - if it has dirty pages
1778         * when we do the final iput() on it, eviction will trigger a
1779         * writeback for it which will fail with null pointer dereferences
1780         * since work queues and other resources were already released and
1781         * destroyed by the time the iput/eviction/writeback is made.
1782         */
1783        trans = btrfs_attach_transaction(root);
1784        if (IS_ERR(trans)) {
1785                if (PTR_ERR(trans) != -ENOENT)
1786                        btrfs_err(fs_info,
1787                                  "cleaner transaction attach returned %ld",
1788                                  PTR_ERR(trans));
1789        } else {
1790                int ret;
1791
1792                ret = btrfs_commit_transaction(trans);
1793                if (ret)
1794                        btrfs_err(fs_info,
1795                                  "cleaner open transaction commit returned %d",
1796                                  ret);
1797        }
1798
1799        return 0;
1800}
1801
1802static int transaction_kthread(void *arg)
1803{
1804        struct btrfs_root *root = arg;
1805        struct btrfs_fs_info *fs_info = root->fs_info;
1806        struct btrfs_trans_handle *trans;
1807        struct btrfs_transaction *cur;
1808        u64 transid;
1809        unsigned long now;
1810        unsigned long delay;
1811        bool cannot_commit;
1812
1813        do {
1814                cannot_commit = false;
1815                delay = HZ * fs_info->commit_interval;
1816                mutex_lock(&fs_info->transaction_kthread_mutex);
1817
1818                spin_lock(&fs_info->trans_lock);
1819                cur = fs_info->running_transaction;
1820                if (!cur) {
1821                        spin_unlock(&fs_info->trans_lock);
1822                        goto sleep;
1823                }
1824
1825                now = get_seconds();
1826                if (cur->state < TRANS_STATE_BLOCKED &&
1827                    !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
1828                    (now < cur->start_time ||
1829                     now - cur->start_time < fs_info->commit_interval)) {
1830                        spin_unlock(&fs_info->trans_lock);
1831                        delay = HZ * 5;
1832                        goto sleep;
1833                }
1834                transid = cur->transid;
1835                spin_unlock(&fs_info->trans_lock);
1836
1837                /* If the file system is aborted, this will always fail. */
1838                trans = btrfs_attach_transaction(root);
1839                if (IS_ERR(trans)) {
1840                        if (PTR_ERR(trans) != -ENOENT)
1841                                cannot_commit = true;
1842                        goto sleep;
1843                }
1844                if (transid == trans->transid) {
1845                        btrfs_commit_transaction(trans);
1846                } else {
1847                        btrfs_end_transaction(trans);
1848                }
1849sleep:
1850                wake_up_process(fs_info->cleaner_kthread);
1851                mutex_unlock(&fs_info->transaction_kthread_mutex);
1852
1853                if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1854                                      &fs_info->fs_state)))
1855                        btrfs_cleanup_transaction(fs_info);
1856                if (!kthread_should_stop() &&
1857                                (!btrfs_transaction_blocked(fs_info) ||
1858                                 cannot_commit))
1859                        schedule_timeout_interruptible(delay);
1860        } while (!kthread_should_stop());
1861        return 0;
1862}
1863
1864/*
1865 * this will find the highest generation in the array of
1866 * root backups.  The index of the highest array is returned,
1867 * or -1 if we can't find anything.
1868 *
1869 * We check to make sure the array is valid by comparing the
1870 * generation of the latest  root in the array with the generation
1871 * in the super block.  If they don't match we pitch it.
1872 */
1873static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1874{
1875        u64 cur;
1876        int newest_index = -1;
1877        struct btrfs_root_backup *root_backup;
1878        int i;
1879
1880        for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1881                root_backup = info->super_copy->super_roots + i;
1882                cur = btrfs_backup_tree_root_gen(root_backup);
1883                if (cur == newest_gen)
1884                        newest_index = i;
1885        }
1886
1887        /* check to see if we actually wrapped around */
1888        if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1889                root_backup = info->super_copy->super_roots;
1890                cur = btrfs_backup_tree_root_gen(root_backup);
1891                if (cur == newest_gen)
1892                        newest_index = 0;
1893        }
1894        return newest_index;
1895}
1896
1897
1898/*
1899 * find the oldest backup so we know where to store new entries
1900 * in the backup array.  This will set the backup_root_index
1901 * field in the fs_info struct
1902 */
1903static void find_oldest_super_backup(struct btrfs_fs_info *info,
1904                                     u64 newest_gen)
1905{
1906        int newest_index = -1;
1907
1908        newest_index = find_newest_super_backup(info, newest_gen);
1909        /* if there was garbage in there, just move along */
1910        if (newest_index == -1) {
1911                info->backup_root_index = 0;
1912        } else {
1913                info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1914        }
1915}
1916
1917/*
1918 * copy all the root pointers into the super backup array.
1919 * this will bump the backup pointer by one when it is
1920 * done
1921 */
1922static void backup_super_roots(struct btrfs_fs_info *info)
1923{
1924        int next_backup;
1925        struct btrfs_root_backup *root_backup;
1926        int last_backup;
1927
1928        next_backup = info->backup_root_index;
1929        last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1930                BTRFS_NUM_BACKUP_ROOTS;
1931
1932        /*
1933         * just overwrite the last backup if we're at the same generation
1934         * this happens only at umount
1935         */
1936        root_backup = info->super_for_commit->super_roots + last_backup;
1937        if (btrfs_backup_tree_root_gen(root_backup) ==
1938            btrfs_header_generation(info->tree_root->node))
1939                next_backup = last_backup;
1940
1941        root_backup = info->super_for_commit->super_roots + next_backup;
1942
1943        /*
1944         * make sure all of our padding and empty slots get zero filled
1945         * regardless of which ones we use today
1946         */
1947        memset(root_backup, 0, sizeof(*root_backup));
1948
1949        info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1950
1951        btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1952        btrfs_set_backup_tree_root_gen(root_backup,
1953                               btrfs_header_generation(info->tree_root->node));
1954
1955        btrfs_set_backup_tree_root_level(root_backup,
1956                               btrfs_header_level(info->tree_root->node));
1957
1958        btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1959        btrfs_set_backup_chunk_root_gen(root_backup,
1960                               btrfs_header_generation(info->chunk_root->node));
1961        btrfs_set_backup_chunk_root_level(root_backup,
1962                               btrfs_header_level(info->chunk_root->node));
1963
1964        btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1965        btrfs_set_backup_extent_root_gen(root_backup,
1966                               btrfs_header_generation(info->extent_root->node));
1967        btrfs_set_backup_extent_root_level(root_backup,
1968                               btrfs_header_level(info->extent_root->node));
1969
1970        /*
1971         * we might commit during log recovery, which happens before we set
1972         * the fs_root.  Make sure it is valid before we fill it in.
1973         */
1974        if (info->fs_root && info->fs_root->node) {
1975                btrfs_set_backup_fs_root(root_backup,
1976                                         info->fs_root->node->start);
1977                btrfs_set_backup_fs_root_gen(root_backup,
1978                               btrfs_header_generation(info->fs_root->node));
1979                btrfs_set_backup_fs_root_level(root_backup,
1980                               btrfs_header_level(info->fs_root->node));
1981        }
1982
1983        btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1984        btrfs_set_backup_dev_root_gen(root_backup,
1985                               btrfs_header_generation(info->dev_root->node));
1986        btrfs_set_backup_dev_root_level(root_backup,
1987                                       btrfs_header_level(info->dev_root->node));
1988
1989        btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1990        btrfs_set_backup_csum_root_gen(root_backup,
1991                               btrfs_header_generation(info->csum_root->node));
1992        btrfs_set_backup_csum_root_level(root_backup,
1993                               btrfs_header_level(info->csum_root->node));
1994
1995        btrfs_set_backup_total_bytes(root_backup,
1996                             btrfs_super_total_bytes(info->super_copy));
1997        btrfs_set_backup_bytes_used(root_backup,
1998                             btrfs_super_bytes_used(info->super_copy));
1999        btrfs_set_backup_num_devices(root_backup,
2000                             btrfs_super_num_devices(info->super_copy));
2001
2002        /*
2003         * if we don't copy this out to the super_copy, it won't get remembered
2004         * for the next commit
2005         */
2006        memcpy(&info->super_copy->super_roots,
2007               &info->super_for_commit->super_roots,
2008               sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
2009}
2010
2011/*
2012 * this copies info out of the root backup array and back into
2013 * the in-memory super block.  It is meant to help iterate through
2014 * the array, so you send it the number of backups you've already
2015 * tried and the last backup index you used.
2016 *
2017 * this returns -1 when it has tried all the backups
2018 */
2019static noinline int next_root_backup(struct btrfs_fs_info *info,
2020                                     struct btrfs_super_block *super,
2021                                     int *num_backups_tried, int *backup_index)
2022{
2023        struct btrfs_root_backup *root_backup;
2024        int newest = *backup_index;
2025
2026        if (*num_backups_tried == 0) {
2027                u64 gen = btrfs_super_generation(super);
2028
2029                newest = find_newest_super_backup(info, gen);
2030                if (newest == -1)
2031                        return -1;
2032
2033                *backup_index = newest;
2034                *num_backups_tried = 1;
2035        } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
2036                /* we've tried all the backups, all done */
2037                return -1;
2038        } else {
2039                /* jump to the next oldest backup */
2040                newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
2041                        BTRFS_NUM_BACKUP_ROOTS;
2042                *backup_index = newest;
2043                *num_backups_tried += 1;
2044        }
2045        root_backup = super->super_roots + newest;
2046
2047        btrfs_set_super_generation(super,
2048                                   btrfs_backup_tree_root_gen(root_backup));
2049        btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
2050        btrfs_set_super_root_level(super,
2051                                   btrfs_backup_tree_root_level(root_backup));
2052        btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
2053
2054        /*
2055         * fixme: the total bytes and num_devices need to match or we should
2056         * need a fsck
2057         */
2058        btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2059        btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2060        return 0;
2061}
2062
2063/* helper to cleanup workers */
2064static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2065{
2066        btrfs_destroy_workqueue(fs_info->fixup_workers);
2067        btrfs_destroy_workqueue(fs_info->delalloc_workers);
2068        btrfs_destroy_workqueue(fs_info->workers);
2069        btrfs_destroy_workqueue(fs_info->endio_workers);
2070        btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2071        btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2072        btrfs_destroy_workqueue(fs_info->rmw_workers);
2073        btrfs_destroy_workqueue(fs_info->endio_write_workers);
2074        btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2075        btrfs_destroy_workqueue(fs_info->submit_workers);
2076        btrfs_destroy_workqueue(fs_info->delayed_workers);
2077        btrfs_destroy_workqueue(fs_info->caching_workers);
2078        btrfs_destroy_workqueue(fs_info->readahead_workers);
2079        btrfs_destroy_workqueue(fs_info->flush_workers);
2080        btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2081        btrfs_destroy_workqueue(fs_info->extent_workers);
2082        /*
2083         * Now that all other work queues are destroyed, we can safely destroy
2084         * the queues used for metadata I/O, since tasks from those other work
2085         * queues can do metadata I/O operations.
2086         */
2087        btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2088        btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2089}
2090
2091static void free_root_extent_buffers(struct btrfs_root *root)
2092{
2093        if (root) {
2094                free_extent_buffer(root->node);
2095                free_extent_buffer(root->commit_root);
2096                root->node = NULL;
2097                root->commit_root = NULL;
2098        }
2099}
2100
2101/* helper to cleanup tree roots */
2102static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2103{
2104        free_root_extent_buffers(info->tree_root);
2105
2106        free_root_extent_buffers(info->dev_root);
2107        free_root_extent_buffers(info->extent_root);
2108        free_root_extent_buffers(info->csum_root);
2109        free_root_extent_buffers(info->quota_root);
2110        free_root_extent_buffers(info->uuid_root);
2111        if (chunk_root)
2112                free_root_extent_buffers(info->chunk_root);
2113        free_root_extent_buffers(info->free_space_root);
2114}
2115
2116void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2117{
2118        int ret;
2119        struct btrfs_root *gang[8];
2120        int i;
2121
2122        while (!list_empty(&fs_info->dead_roots)) {
2123                gang[0] = list_entry(fs_info->dead_roots.next,
2124                                     struct btrfs_root, root_list);
2125                list_del(&gang[0]->root_list);
2126
2127                if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2128                        btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2129                } else {
2130                        free_extent_buffer(gang[0]->node);
2131                        free_extent_buffer(gang[0]->commit_root);
2132                        btrfs_put_fs_root(gang[0]);
2133                }
2134        }
2135
2136        while (1) {
2137                ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2138                                             (void **)gang, 0,
2139                                             ARRAY_SIZE(gang));
2140                if (!ret)
2141                        break;
2142                for (i = 0; i < ret; i++)
2143                        btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2144        }
2145
2146        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2147                btrfs_free_log_root_tree(NULL, fs_info);
2148                btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2149        }
2150}
2151
2152static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2153{
2154        mutex_init(&fs_info->scrub_lock);
2155        atomic_set(&fs_info->scrubs_running, 0);
2156        atomic_set(&fs_info->scrub_pause_req, 0);
2157        atomic_set(&fs_info->scrubs_paused, 0);
2158        atomic_set(&fs_info->scrub_cancel_req, 0);
2159        init_waitqueue_head(&fs_info->scrub_pause_wait);
2160        fs_info->scrub_workers_refcnt = 0;
2161}
2162
2163static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2164{
2165        spin_lock_init(&fs_info->balance_lock);
2166        mutex_init(&fs_info->balance_mutex);
2167        atomic_set(&fs_info->balance_running, 0);
2168        atomic_set(&fs_info->balance_pause_req, 0);
2169        atomic_set(&fs_info->balance_cancel_req, 0);
2170        fs_info->balance_ctl = NULL;
2171        init_waitqueue_head(&fs_info->balance_wait_q);
2172}
2173
2174static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2175{
2176        struct inode *inode = fs_info->btree_inode;
2177
2178        inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2179        set_nlink(inode, 1);
2180        /*
2181         * we set the i_size on the btree inode to the max possible int.
2182         * the real end of the address space is determined by all of
2183         * the devices in the system
2184         */
2185        inode->i_size = OFFSET_MAX;
2186        inode->i_mapping->a_ops = &btree_aops;
2187
2188        RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2189        extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
2190        BTRFS_I(inode)->io_tree.track_uptodate = 0;
2191        extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2192
2193        BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2194
2195        BTRFS_I(inode)->root = fs_info->tree_root;
2196        memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2197        set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2198        btrfs_insert_inode_hash(inode);
2199}
2200
2201static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2202{
2203        fs_info->dev_replace.lock_owner = 0;
2204        atomic_set(&fs_info->dev_replace.nesting_level, 0);
2205        mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2206        rwlock_init(&fs_info->dev_replace.lock);
2207        atomic_set(&fs_info->dev_replace.read_locks, 0);
2208        atomic_set(&fs_info->dev_replace.blocking_readers, 0);
2209        init_waitqueue_head(&fs_info->replace_wait);
2210        init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
2211}
2212
2213static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2214{
2215        spin_lock_init(&fs_info->qgroup_lock);
2216        mutex_init(&fs_info->qgroup_ioctl_lock);
2217        fs_info->qgroup_tree = RB_ROOT;
2218        fs_info->qgroup_op_tree = RB_ROOT;
2219        INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2220        fs_info->qgroup_seq = 1;
2221        fs_info->qgroup_ulist = NULL;
2222        fs_info->qgroup_rescan_running = false;
2223        mutex_init(&fs_info->qgroup_rescan_lock);
2224}
2225
2226static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2227                struct btrfs_fs_devices *fs_devices)
2228{
2229        u32 max_active = fs_info->thread_pool_size;
2230        unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2231
2232        fs_info->workers =
2233                btrfs_alloc_workqueue(fs_info, "worker",
2234                                      flags | WQ_HIGHPRI, max_active, 16);
2235
2236        fs_info->delalloc_workers =
2237                btrfs_alloc_workqueue(fs_info, "delalloc",
2238                                      flags, max_active, 2);
2239
2240        fs_info->flush_workers =
2241                btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2242                                      flags, max_active, 0);
2243
2244        fs_info->caching_workers =
2245                btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2246
2247        /*
2248         * a higher idle thresh on the submit workers makes it much more
2249         * likely that bios will be send down in a sane order to the
2250         * devices
2251         */
2252        fs_info->submit_workers =
2253                btrfs_alloc_workqueue(fs_info, "submit", flags,
2254                                      min_t(u64, fs_devices->num_devices,
2255                                            max_active), 64);
2256
2257        fs_info->fixup_workers =
2258                btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2259
2260        /*
2261         * endios are largely parallel and should have a very
2262         * low idle thresh
2263         */
2264        fs_info->endio_workers =
2265                btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2266        fs_info->endio_meta_workers =
2267                btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2268                                      max_active, 4);
2269        fs_info->endio_meta_write_workers =
2270                btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2271                                      max_active, 2);
2272        fs_info->endio_raid56_workers =
2273                btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2274                                      max_active, 4);
2275        fs_info->endio_repair_workers =
2276                btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2277        fs_info->rmw_workers =
2278                btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2279        fs_info->endio_write_workers =
2280                btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2281                                      max_active, 2);
2282        fs_info->endio_freespace_worker =
2283                btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2284                                      max_active, 0);
2285        fs_info->delayed_workers =
2286                btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2287                                      max_active, 0);
2288        fs_info->readahead_workers =
2289                btrfs_alloc_workqueue(fs_info, "readahead", flags,
2290                                      max_active, 2);
2291        fs_info->qgroup_rescan_workers =
2292                btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2293        fs_info->extent_workers =
2294                btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2295                                      min_t(u64, fs_devices->num_devices,
2296                                            max_active), 8);
2297
2298        if (!(fs_info->workers && fs_info->delalloc_workers &&
2299              fs_info->submit_workers && fs_info->flush_workers &&
2300              fs_info->endio_workers && fs_info->endio_meta_workers &&
2301              fs_info->endio_meta_write_workers &&
2302              fs_info->endio_repair_workers &&
2303              fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2304              fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2305              fs_info->caching_workers && fs_info->readahead_workers &&
2306              fs_info->fixup_workers && fs_info->delayed_workers &&
2307              fs_info->extent_workers &&
2308              fs_info->qgroup_rescan_workers)) {
2309                return -ENOMEM;
2310        }
2311
2312        return 0;
2313}
2314
2315static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2316                            struct btrfs_fs_devices *fs_devices)
2317{
2318        int ret;
2319        struct btrfs_root *log_tree_root;
2320        struct btrfs_super_block *disk_super = fs_info->super_copy;
2321        u64 bytenr = btrfs_super_log_root(disk_super);
2322        int level = btrfs_super_log_root_level(disk_super);
2323
2324        if (fs_devices->rw_devices == 0) {
2325                btrfs_warn(fs_info, "log replay required on RO media");
2326                return -EIO;
2327        }
2328
2329        log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2330        if (!log_tree_root)
2331                return -ENOMEM;
2332
2333        __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2334
2335        log_tree_root->node = read_tree_block(fs_info, bytenr,
2336                                              fs_info->generation + 1,
2337                                              level, NULL);
2338        if (IS_ERR(log_tree_root->node)) {
2339                btrfs_warn(fs_info, "failed to read log tree");
2340                ret = PTR_ERR(log_tree_root->node);
2341                kfree(log_tree_root);
2342                return ret;
2343        } else if (!extent_buffer_uptodate(log_tree_root->node)) {
2344                btrfs_err(fs_info, "failed to read log tree");
2345                free_extent_buffer(log_tree_root->node);
2346                kfree(log_tree_root);
2347                return -EIO;
2348        }
2349        /* returns with log_tree_root freed on success */
2350        ret = btrfs_recover_log_trees(log_tree_root);
2351        if (ret) {
2352                btrfs_handle_fs_error(fs_info, ret,
2353                                      "Failed to recover log tree");
2354                free_extent_buffer(log_tree_root->node);
2355                kfree(log_tree_root);
2356                return ret;
2357        }
2358
2359        if (sb_rdonly(fs_info->sb)) {
2360                ret = btrfs_commit_super(fs_info);
2361                if (ret)
2362                        return ret;
2363        }
2364
2365        return 0;
2366}
2367
2368static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2369{
2370        struct btrfs_root *tree_root = fs_info->tree_root;
2371        struct btrfs_root *root;
2372        struct btrfs_key location;
2373        int ret;
2374
2375        BUG_ON(!fs_info->tree_root);
2376
2377        location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2378        location.type = BTRFS_ROOT_ITEM_KEY;
2379        location.offset = 0;
2380
2381        root = btrfs_read_tree_root(tree_root, &location);
2382        if (IS_ERR(root)) {
2383                ret = PTR_ERR(root);
2384                goto out;
2385        }
2386        set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2387        fs_info->extent_root = root;
2388
2389        location.objectid = BTRFS_DEV_TREE_OBJECTID;
2390        root = btrfs_read_tree_root(tree_root, &location);
2391        if (IS_ERR(root)) {
2392                ret = PTR_ERR(root);
2393                goto out;
2394        }
2395        set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2396        fs_info->dev_root = root;
2397        btrfs_init_devices_late(fs_info);
2398
2399        location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2400        root = btrfs_read_tree_root(tree_root, &location);
2401        if (IS_ERR(root)) {
2402                ret = PTR_ERR(root);
2403                goto out;
2404        }
2405        set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2406        fs_info->csum_root = root;
2407
2408        location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2409        root = btrfs_read_tree_root(tree_root, &location);
2410        if (!IS_ERR(root)) {
2411                set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2412                set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2413                fs_info->quota_root = root;
2414        }
2415
2416        location.objectid = BTRFS_UUID_TREE_OBJECTID;
2417        root = btrfs_read_tree_root(tree_root, &location);
2418        if (IS_ERR(root)) {
2419                ret = PTR_ERR(root);
2420                if (ret != -ENOENT)
2421                        goto out;
2422        } else {
2423                set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2424                fs_info->uuid_root = root;
2425        }
2426
2427        if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2428                location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2429                root = btrfs_read_tree_root(tree_root, &location);
2430                if (IS_ERR(root)) {
2431                        ret = PTR_ERR(root);
2432                        goto out;
2433                }
2434                set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2435                fs_info->free_space_root = root;
2436        }
2437
2438        return 0;
2439out:
2440        btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2441                   location.objectid, ret);
2442        return ret;
2443}
2444
2445int open_ctree(struct super_block *sb,
2446               struct btrfs_fs_devices *fs_devices,
2447               char *options)
2448{
2449        u32 sectorsize;
2450        u32 nodesize;
2451        u32 stripesize;
2452        u64 generation;
2453        u64 features;
2454        struct btrfs_key location;
2455        struct buffer_head *bh;
2456        struct btrfs_super_block *disk_super;
2457        struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2458        struct btrfs_root *tree_root;
2459        struct btrfs_root *chunk_root;
2460        int ret;
2461        int err = -EINVAL;
2462        int num_backups_tried = 0;
2463        int backup_index = 0;
2464        int clear_free_space_tree = 0;
2465        int level;
2466
2467        tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2468        chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2469        if (!tree_root || !chunk_root) {
2470                err = -ENOMEM;
2471                goto fail;
2472        }
2473
2474        ret = init_srcu_struct(&fs_info->subvol_srcu);
2475        if (ret) {
2476                err = ret;
2477                goto fail;
2478        }
2479
2480        ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2481        if (ret) {
2482                err = ret;
2483                goto fail_srcu;
2484        }
2485        fs_info->dirty_metadata_batch = PAGE_SIZE *
2486                                        (1 + ilog2(nr_cpu_ids));
2487
2488        ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2489        if (ret) {
2490                err = ret;
2491                goto fail_dirty_metadata_bytes;
2492        }
2493
2494        ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
2495        if (ret) {
2496                err = ret;
2497                goto fail_delalloc_bytes;
2498        }
2499
2500        INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2501        INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2502        INIT_LIST_HEAD(&fs_info->trans_list);
2503        INIT_LIST_HEAD(&fs_info->dead_roots);
2504        INIT_LIST_HEAD(&fs_info->delayed_iputs);
2505        INIT_LIST_HEAD(&fs_info->delalloc_roots);
2506        INIT_LIST_HEAD(&fs_info->caching_block_groups);
2507        INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
2508        spin_lock_init(&fs_info->pending_raid_kobjs_lock);
2509        spin_lock_init(&fs_info->delalloc_root_lock);
2510        spin_lock_init(&fs_info->trans_lock);
2511        spin_lock_init(&fs_info->fs_roots_radix_lock);
2512        spin_lock_init(&fs_info->delayed_iput_lock);
2513        spin_lock_init(&fs_info->defrag_inodes_lock);
2514        spin_lock_init(&fs_info->tree_mod_seq_lock);
2515        spin_lock_init(&fs_info->super_lock);
2516        spin_lock_init(&fs_info->qgroup_op_lock);
2517        spin_lock_init(&fs_info->buffer_lock);
2518        spin_lock_init(&fs_info->unused_bgs_lock);
2519        rwlock_init(&fs_info->tree_mod_log_lock);
2520        mutex_init(&fs_info->unused_bg_unpin_mutex);
2521        mutex_init(&fs_info->delete_unused_bgs_mutex);
2522        mutex_init(&fs_info->reloc_mutex);
2523        mutex_init(&fs_info->delalloc_root_mutex);
2524        mutex_init(&fs_info->cleaner_delayed_iput_mutex);
2525        seqlock_init(&fs_info->profiles_lock);
2526
2527        INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2528        INIT_LIST_HEAD(&fs_info->space_info);
2529        INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2530        INIT_LIST_HEAD(&fs_info->unused_bgs);
2531        btrfs_mapping_init(&fs_info->mapping_tree);
2532        btrfs_init_block_rsv(&fs_info->global_block_rsv,
2533                             BTRFS_BLOCK_RSV_GLOBAL);
2534        btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2535        btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2536        btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2537        btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2538                             BTRFS_BLOCK_RSV_DELOPS);
2539        atomic_set(&fs_info->async_delalloc_pages, 0);
2540        atomic_set(&fs_info->defrag_running, 0);
2541        atomic_set(&fs_info->qgroup_op_seq, 0);
2542        atomic_set(&fs_info->reada_works_cnt, 0);
2543        atomic64_set(&fs_info->tree_mod_seq, 0);
2544        fs_info->sb = sb;
2545        fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2546        fs_info->metadata_ratio = 0;
2547        fs_info->defrag_inodes = RB_ROOT;
2548        atomic64_set(&fs_info->free_chunk_space, 0);
2549        fs_info->tree_mod_log = RB_ROOT;
2550        fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2551        fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2552        /* readahead state */
2553        INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2554        spin_lock_init(&fs_info->reada_lock);
2555        btrfs_init_ref_verify(fs_info);
2556
2557        fs_info->thread_pool_size = min_t(unsigned long,
2558                                          num_online_cpus() + 2, 8);
2559
2560        INIT_LIST_HEAD(&fs_info->ordered_roots);
2561        spin_lock_init(&fs_info->ordered_root_lock);
2562
2563        fs_info->btree_inode = new_inode(sb);
2564        if (!fs_info->btree_inode) {
2565                err = -ENOMEM;
2566                goto fail_bio_counter;
2567        }
2568        mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2569
2570        fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2571                                        GFP_KERNEL);
2572        if (!fs_info->delayed_root) {
2573                err = -ENOMEM;
2574                goto fail_iput;
2575        }
2576        btrfs_init_delayed_root(fs_info->delayed_root);
2577
2578        btrfs_init_scrub(fs_info);
2579#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2580        fs_info->check_integrity_print_mask = 0;
2581#endif
2582        btrfs_init_balance(fs_info);
2583        btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2584
2585        sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2586        sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2587
2588        btrfs_init_btree_inode(fs_info);
2589
2590        spin_lock_init(&fs_info->block_group_cache_lock);
2591        fs_info->block_group_cache_tree = RB_ROOT;
2592        fs_info->first_logical_byte = (u64)-1;
2593
2594        extent_io_tree_init(&fs_info->freed_extents[0], NULL);
2595        extent_io_tree_init(&fs_info->freed_extents[1], NULL);
2596        fs_info->pinned_extents = &fs_info->freed_extents[0];
2597        set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2598
2599        mutex_init(&fs_info->ordered_operations_mutex);
2600        mutex_init(&fs_info->tree_log_mutex);
2601        mutex_init(&fs_info->chunk_mutex);
2602        mutex_init(&fs_info->transaction_kthread_mutex);
2603        mutex_init(&fs_info->cleaner_mutex);
2604        mutex_init(&fs_info->volume_mutex);
2605        mutex_init(&fs_info->ro_block_group_mutex);
2606        init_rwsem(&fs_info->commit_root_sem);
2607        init_rwsem(&fs_info->cleanup_work_sem);
2608        init_rwsem(&fs_info->subvol_sem);
2609        sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2610
2611        btrfs_init_dev_replace_locks(fs_info);
2612        btrfs_init_qgroup(fs_info);
2613
2614        btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2615        btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2616
2617        init_waitqueue_head(&fs_info->transaction_throttle);
2618        init_waitqueue_head(&fs_info->transaction_wait);
2619        init_waitqueue_head(&fs_info->transaction_blocked_wait);
2620        init_waitqueue_head(&fs_info->async_submit_wait);
2621
2622        INIT_LIST_HEAD(&fs_info->pinned_chunks);
2623
2624        /* Usable values until the real ones are cached from the superblock */
2625        fs_info->nodesize = 4096;
2626        fs_info->sectorsize = 4096;
2627        fs_info->stripesize = 4096;
2628
2629        ret = btrfs_alloc_stripe_hash_table(fs_info);
2630        if (ret) {
2631                err = ret;
2632                goto fail_alloc;
2633        }
2634
2635        __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2636
2637        invalidate_bdev(fs_devices->latest_bdev);
2638
2639        /*
2640         * Read super block and check the signature bytes only
2641         */
2642        bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2643        if (IS_ERR(bh)) {
2644                err = PTR_ERR(bh);
2645                goto fail_alloc;
2646        }
2647
2648        /*
2649         * We want to check superblock checksum, the type is stored inside.
2650         * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2651         */
2652        if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2653                btrfs_err(fs_info, "superblock checksum mismatch");
2654                err = -EINVAL;
2655                brelse(bh);
2656                goto fail_alloc;
2657        }
2658
2659        /*
2660         * super_copy is zeroed at allocation time and we never touch the
2661         * following bytes up to INFO_SIZE, the checksum is calculated from
2662         * the whole block of INFO_SIZE
2663         */
2664        memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2665        memcpy(fs_info->super_for_commit, fs_info->super_copy,
2666               sizeof(*fs_info->super_for_commit));
2667        brelse(bh);
2668
2669        memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2670
2671        ret = btrfs_check_super_valid(fs_info);
2672        if (ret) {
2673                btrfs_err(fs_info, "superblock contains fatal errors");
2674                err = -EINVAL;
2675                goto fail_alloc;
2676        }
2677
2678        disk_super = fs_info->super_copy;
2679        if (!btrfs_super_root(disk_super))
2680                goto fail_alloc;
2681
2682        /* check FS state, whether FS is broken. */
2683        if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2684                set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2685
2686        /*
2687         * run through our array of backup supers and setup
2688         * our ring pointer to the oldest one
2689         */
2690        generation = btrfs_super_generation(disk_super);
2691        find_oldest_super_backup(fs_info, generation);
2692
2693        /*
2694         * In the long term, we'll store the compression type in the super
2695         * block, and it'll be used for per file compression control.
2696         */
2697        fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2698
2699        ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2700        if (ret) {
2701                err = ret;
2702                goto fail_alloc;
2703        }
2704
2705        features = btrfs_super_incompat_flags(disk_super) &
2706                ~BTRFS_FEATURE_INCOMPAT_SUPP;
2707        if (features) {
2708                btrfs_err(fs_info,
2709                    "cannot mount because of unsupported optional features (%llx)",
2710                    features);
2711                err = -EINVAL;
2712                goto fail_alloc;
2713        }
2714
2715        features = btrfs_super_incompat_flags(disk_super);
2716        features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2717        if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2718                features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2719        else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
2720                features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
2721
2722        if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2723                btrfs_info(fs_info, "has skinny extents");
2724
2725        /*
2726         * flag our filesystem as having big metadata blocks if
2727         * they are bigger than the page size
2728         */
2729        if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2730                if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2731                        btrfs_info(fs_info,
2732                                "flagging fs with big metadata feature");
2733                features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2734        }
2735
2736        nodesize = btrfs_super_nodesize(disk_super);
2737        sectorsize = btrfs_super_sectorsize(disk_super);
2738        stripesize = sectorsize;
2739        fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2740        fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2741
2742        /* Cache block sizes */
2743        fs_info->nodesize = nodesize;
2744        fs_info->sectorsize = sectorsize;
2745        fs_info->stripesize = stripesize;
2746
2747        /*
2748         * mixed block groups end up with duplicate but slightly offset
2749         * extent buffers for the same range.  It leads to corruptions
2750         */
2751        if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2752            (sectorsize != nodesize)) {
2753                btrfs_err(fs_info,
2754"unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2755                        nodesize, sectorsize);
2756                goto fail_alloc;
2757        }
2758
2759        /*
2760         * Needn't use the lock because there is no other task which will
2761         * update the flag.
2762         */
2763        btrfs_set_super_incompat_flags(disk_super, features);
2764
2765        features = btrfs_super_compat_ro_flags(disk_super) &
2766                ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2767        if (!sb_rdonly(sb) && features) {
2768                btrfs_err(fs_info,
2769        "cannot mount read-write because of unsupported optional features (%llx)",
2770                       features);
2771                err = -EINVAL;
2772                goto fail_alloc;
2773        }
2774
2775        ret = btrfs_init_workqueues(fs_info, fs_devices);
2776        if (ret) {
2777                err = ret;
2778                goto fail_sb_buffer;
2779        }
2780
2781        sb->s_bdi->congested_fn = btrfs_congested_fn;
2782        sb->s_bdi->congested_data = fs_info;
2783        sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
2784        sb->s_bdi->ra_pages = VM_MAX_READAHEAD * SZ_1K / PAGE_SIZE;
2785        sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
2786        sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
2787
2788        sb->s_blocksize = sectorsize;
2789        sb->s_blocksize_bits = blksize_bits(sectorsize);
2790        memcpy(&sb->s_uuid, fs_info->fsid, BTRFS_FSID_SIZE);
2791
2792        mutex_lock(&fs_info->chunk_mutex);
2793        ret = btrfs_read_sys_array(fs_info);
2794        mutex_unlock(&fs_info->chunk_mutex);
2795        if (ret) {
2796                btrfs_err(fs_info, "failed to read the system array: %d", ret);
2797                goto fail_sb_buffer;
2798        }
2799
2800        generation = btrfs_super_chunk_root_generation(disk_super);
2801        level = btrfs_super_chunk_root_level(disk_super);
2802
2803        __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2804
2805        chunk_root->node = read_tree_block(fs_info,
2806                                           btrfs_super_chunk_root(disk_super),
2807                                           generation, level, NULL);
2808        if (IS_ERR(chunk_root->node) ||
2809            !extent_buffer_uptodate(chunk_root->node)) {
2810                btrfs_err(fs_info, "failed to read chunk root");
2811                if (!IS_ERR(chunk_root->node))
2812                        free_extent_buffer(chunk_root->node);
2813                chunk_root->node = NULL;
2814                goto fail_tree_roots;
2815        }
2816        btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2817        chunk_root->commit_root = btrfs_root_node(chunk_root);
2818
2819        read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2820           btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2821
2822        ret = btrfs_read_chunk_tree(fs_info);
2823        if (ret) {
2824                btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
2825                goto fail_tree_roots;
2826        }
2827
2828        /*
2829         * Keep the devid that is marked to be the target device for the
2830         * device replace procedure
2831         */
2832        btrfs_free_extra_devids(fs_devices, 0);
2833
2834        if (!fs_devices->latest_bdev) {
2835                btrfs_err(fs_info, "failed to read devices");
2836                goto fail_tree_roots;
2837        }
2838
2839retry_root_backup:
2840        generation = btrfs_super_generation(disk_super);
2841        level = btrfs_super_root_level(disk_super);
2842
2843        tree_root->node = read_tree_block(fs_info,
2844                                          btrfs_super_root(disk_super),
2845                                          generation, level, NULL);
2846        if (IS_ERR(tree_root->node) ||
2847            !extent_buffer_uptodate(tree_root->node)) {
2848                btrfs_warn(fs_info, "failed to read tree root");
2849                if (!IS_ERR(tree_root->node))
2850                        free_extent_buffer(tree_root->node);
2851                tree_root->node = NULL;
2852                goto recovery_tree_root;
2853        }
2854
2855        btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2856        tree_root->commit_root = btrfs_root_node(tree_root);
2857        btrfs_set_root_refs(&tree_root->root_item, 1);
2858
2859        mutex_lock(&tree_root->objectid_mutex);
2860        ret = btrfs_find_highest_objectid(tree_root,
2861                                        &tree_root->highest_objectid);
2862        if (ret) {
2863                mutex_unlock(&tree_root->objectid_mutex);
2864                goto recovery_tree_root;
2865        }
2866
2867        ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2868
2869        mutex_unlock(&tree_root->objectid_mutex);
2870
2871        ret = btrfs_read_roots(fs_info);
2872        if (ret)
2873                goto recovery_tree_root;
2874
2875        fs_info->generation = generation;
2876        fs_info->last_trans_committed = generation;
2877
2878        ret = btrfs_recover_balance(fs_info);
2879        if (ret) {
2880                btrfs_err(fs_info, "failed to recover balance: %d", ret);
2881                goto fail_block_groups;
2882        }
2883
2884        ret = btrfs_init_dev_stats(fs_info);
2885        if (ret) {
2886                btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
2887                goto fail_block_groups;
2888        }
2889
2890        ret = btrfs_init_dev_replace(fs_info);
2891        if (ret) {
2892                btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
2893                goto fail_block_groups;
2894        }
2895
2896        btrfs_free_extra_devids(fs_devices, 1);
2897
2898        ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
2899        if (ret) {
2900                btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
2901                                ret);
2902                goto fail_block_groups;
2903        }
2904
2905        ret = btrfs_sysfs_add_device(fs_devices);
2906        if (ret) {
2907                btrfs_err(fs_info, "failed to init sysfs device interface: %d",
2908                                ret);
2909                goto fail_fsdev_sysfs;
2910        }
2911
2912        ret = btrfs_sysfs_add_mounted(fs_info);
2913        if (ret) {
2914                btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
2915                goto fail_fsdev_sysfs;
2916        }
2917
2918        ret = btrfs_init_space_info(fs_info);
2919        if (ret) {
2920                btrfs_err(fs_info, "failed to initialize space info: %d", ret);
2921                goto fail_sysfs;
2922        }
2923
2924        ret = btrfs_read_block_groups(fs_info);
2925        if (ret) {
2926                btrfs_err(fs_info, "failed to read block groups: %d", ret);
2927                goto fail_sysfs;
2928        }
2929
2930        if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
2931                btrfs_warn(fs_info,
2932                "writeable mount is not allowed due to too many missing devices");
2933                goto fail_sysfs;
2934        }
2935
2936        fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2937                                               "btrfs-cleaner");
2938        if (IS_ERR(fs_info->cleaner_kthread))
2939                goto fail_sysfs;
2940
2941        fs_info->transaction_kthread = kthread_run(transaction_kthread,
2942                                                   tree_root,
2943                                                   "btrfs-transaction");
2944        if (IS_ERR(fs_info->transaction_kthread))
2945                goto fail_cleaner;
2946
2947        if (!btrfs_test_opt(fs_info, NOSSD) &&
2948            !fs_info->fs_devices->rotating) {
2949                btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
2950        }
2951
2952        /*
2953         * Mount does not set all options immediately, we can do it now and do
2954         * not have to wait for transaction commit
2955         */
2956        btrfs_apply_pending_changes(fs_info);
2957
2958#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2959        if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
2960                ret = btrfsic_mount(fs_info, fs_devices,
2961                                    btrfs_test_opt(fs_info,
2962                                        CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2963                                    1 : 0,
2964                                    fs_info->check_integrity_print_mask);
2965                if (ret)
2966                        btrfs_warn(fs_info,
2967                                "failed to initialize integrity check module: %d",
2968                                ret);
2969        }
2970#endif
2971        ret = btrfs_read_qgroup_config(fs_info);
2972        if (ret)
2973                goto fail_trans_kthread;
2974
2975        if (btrfs_build_ref_tree(fs_info))
2976                btrfs_err(fs_info, "couldn't build ref tree");
2977
2978        /* do not make disk changes in broken FS or nologreplay is given */
2979        if (btrfs_super_log_root(disk_super) != 0 &&
2980            !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
2981                ret = btrfs_replay_log(fs_info, fs_devices);
2982                if (ret) {
2983                        err = ret;
2984                        goto fail_qgroup;
2985                }
2986        }
2987
2988        ret = btrfs_find_orphan_roots(fs_info);
2989        if (ret)
2990                goto fail_qgroup;
2991
2992        if (!sb_rdonly(sb)) {
2993                ret = btrfs_cleanup_fs_roots(fs_info);
2994                if (ret)
2995                        goto fail_qgroup;
2996
2997                mutex_lock(&fs_info->cleaner_mutex);
2998                ret = btrfs_recover_relocation(tree_root);
2999                mutex_unlock(&fs_info->cleaner_mutex);
3000                if (ret < 0) {
3001                        btrfs_warn(fs_info, "failed to recover relocation: %d",
3002                                        ret);
3003                        err = -EINVAL;
3004                        goto fail_qgroup;
3005                }
3006        }
3007
3008        location.objectid = BTRFS_FS_TREE_OBJECTID;
3009        location.type = BTRFS_ROOT_ITEM_KEY;
3010        location.offset = 0;
3011
3012        fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3013        if (IS_ERR(fs_info->fs_root)) {
3014                err = PTR_ERR(fs_info->fs_root);
3015                btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3016                goto fail_qgroup;
3017        }
3018
3019        if (sb_rdonly(sb))
3020                return 0;
3021
3022        if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3023            btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3024                clear_free_space_tree = 1;
3025        } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3026                   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3027                btrfs_warn(fs_info, "free space tree is invalid");
3028                clear_free_space_tree = 1;
3029        }
3030
3031        if (clear_free_space_tree) {
3032                btrfs_info(fs_info, "clearing free space tree");
3033                ret = btrfs_clear_free_space_tree(fs_info);
3034                if (ret) {
3035                        btrfs_warn(fs_info,
3036                                   "failed to clear free space tree: %d", ret);
3037                        close_ctree(fs_info);
3038                        return ret;
3039                }
3040        }
3041
3042        if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3043            !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3044                btrfs_info(fs_info, "creating free space tree");
3045                ret = btrfs_create_free_space_tree(fs_info);
3046                if (ret) {
3047                        btrfs_warn(fs_info,
3048                                "failed to create free space tree: %d", ret);
3049                        close_ctree(fs_info);
3050                        return ret;
3051                }
3052        }
3053
3054        down_read(&fs_info->cleanup_work_sem);
3055        if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3056            (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3057                up_read(&fs_info->cleanup_work_sem);
3058                close_ctree(fs_info);
3059                return ret;
3060        }
3061        up_read(&fs_info->cleanup_work_sem);
3062
3063        ret = btrfs_resume_balance_async(fs_info);
3064        if (ret) {
3065                btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3066                close_ctree(fs_info);
3067                return ret;
3068        }
3069
3070        ret = btrfs_resume_dev_replace_async(fs_info);
3071        if (ret) {
3072                btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3073                close_ctree(fs_info);
3074                return ret;
3075        }
3076
3077        btrfs_qgroup_rescan_resume(fs_info);
3078
3079        if (!fs_info->uuid_root) {
3080                btrfs_info(fs_info, "creating UUID tree");
3081                ret = btrfs_create_uuid_tree(fs_info);
3082                if (ret) {
3083                        btrfs_warn(fs_info,
3084                                "failed to create the UUID tree: %d", ret);
3085                        close_ctree(fs_info);
3086                        return ret;
3087                }
3088        } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3089                   fs_info->generation !=
3090                                btrfs_super_uuid_tree_generation(disk_super)) {
3091                btrfs_info(fs_info, "checking UUID tree");
3092                ret = btrfs_check_uuid_tree(fs_info);
3093                if (ret) {
3094                        btrfs_warn(fs_info,
3095                                "failed to check the UUID tree: %d", ret);
3096                        close_ctree(fs_info);
3097                        return ret;
3098                }
3099        } else {
3100                set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3101        }
3102        set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3103
3104        /*
3105         * backuproot only affect mount behavior, and if open_ctree succeeded,
3106         * no need to keep the flag
3107         */
3108        btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3109
3110        return 0;
3111
3112fail_qgroup:
3113        btrfs_free_qgroup_config(fs_info);
3114fail_trans_kthread:
3115        kthread_stop(fs_info->transaction_kthread);
3116        btrfs_cleanup_transaction(fs_info);
3117        btrfs_free_fs_roots(fs_info);
3118fail_cleaner:
3119        kthread_stop(fs_info->cleaner_kthread);
3120
3121        /*
3122         * make sure we're done with the btree inode before we stop our
3123         * kthreads
3124         */
3125        filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3126
3127fail_sysfs:
3128        btrfs_sysfs_remove_mounted(fs_info);
3129
3130fail_fsdev_sysfs:
3131        btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3132
3133fail_block_groups:
3134        btrfs_put_block_group_cache(fs_info);
3135
3136fail_tree_roots:
3137        free_root_pointers(fs_info, 1);
3138        invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3139
3140fail_sb_buffer:
3141        btrfs_stop_all_workers(fs_info);
3142        btrfs_free_block_groups(fs_info);
3143fail_alloc:
3144fail_iput:
3145        btrfs_mapping_tree_free(&fs_info->mapping_tree);
3146
3147        iput(fs_info->btree_inode);
3148fail_bio_counter:
3149        percpu_counter_destroy(&fs_info->bio_counter);
3150fail_delalloc_bytes:
3151        percpu_counter_destroy(&fs_info->delalloc_bytes);
3152fail_dirty_metadata_bytes:
3153        percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3154fail_srcu:
3155        cleanup_srcu_struct(&fs_info->subvol_srcu);
3156fail:
3157        btrfs_free_stripe_hash_table(fs_info);
3158        btrfs_close_devices(fs_info->fs_devices);
3159        return err;
3160
3161recovery_tree_root:
3162        if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3163                goto fail_tree_roots;
3164
3165        free_root_pointers(fs_info, 0);
3166
3167        /* don't use the log in recovery mode, it won't be valid */
3168        btrfs_set_super_log_root(disk_super, 0);
3169
3170        /* we can't trust the free space cache either */
3171        btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3172
3173        ret = next_root_backup(fs_info, fs_info->super_copy,
3174                               &num_backups_tried, &backup_index);
3175        if (ret == -1)
3176                goto fail_block_groups;
3177        goto retry_root_backup;
3178}
3179ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3180
3181static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3182{
3183        if (uptodate) {
3184                set_buffer_uptodate(bh);
3185        } else {
3186                struct btrfs_device *device = (struct btrfs_device *)
3187                        bh->b_private;
3188
3189                btrfs_warn_rl_in_rcu(device->fs_info,
3190                                "lost page write due to IO error on %s",
3191                                          rcu_str_deref(device->name));
3192                /* note, we don't set_buffer_write_io_error because we have
3193                 * our own ways of dealing with the IO errors
3194                 */
3195                clear_buffer_uptodate(bh);
3196                btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3197        }
3198        unlock_buffer(bh);
3199        put_bh(bh);
3200}
3201
3202int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3203                        struct buffer_head **bh_ret)
3204{
3205        struct buffer_head *bh;
3206        struct btrfs_super_block *super;
3207        u64 bytenr;
3208
3209        bytenr = btrfs_sb_offset(copy_num);
3210        if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3211                return -EINVAL;
3212
3213        bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE);
3214        /*
3215         * If we fail to read from the underlying devices, as of now
3216         * the best option we have is to mark it EIO.
3217         */
3218        if (!bh)
3219                return -EIO;
3220
3221        super = (struct btrfs_super_block *)bh->b_data;
3222        if (btrfs_super_bytenr(super) != bytenr ||
3223                    btrfs_super_magic(super) != BTRFS_MAGIC) {
3224                brelse(bh);
3225                return -EINVAL;
3226        }
3227
3228        *bh_ret = bh;
3229        return 0;
3230}
3231
3232
3233struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3234{
3235        struct buffer_head *bh;
3236        struct buffer_head *latest = NULL;
3237        struct btrfs_super_block *super;
3238        int i;
3239        u64 transid = 0;
3240        int ret = -EINVAL;
3241
3242        /* we would like to check all the supers, but that would make
3243         * a btrfs mount succeed after a mkfs from a different FS.
3244         * So, we need to add a special mount option to scan for
3245         * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3246         */
3247        for (i = 0; i < 1; i++) {
3248                ret = btrfs_read_dev_one_super(bdev, i, &bh);
3249                if (ret)
3250                        continue;
3251
3252                super = (struct btrfs_super_block *)bh->b_data;
3253
3254                if (!latest || btrfs_super_generation(super) > transid) {
3255                        brelse(latest);
3256                        latest = bh;
3257                        transid = btrfs_super_generation(super);
3258                } else {
3259                        brelse(bh);
3260                }
3261        }
3262
3263        if (!latest)
3264                return ERR_PTR(ret);
3265
3266        return latest;
3267}
3268
3269/*
3270 * Write superblock @sb to the @device. Do not wait for completion, all the
3271 * buffer heads we write are pinned.
3272 *
3273 * Write @max_mirrors copies of the superblock, where 0 means default that fit
3274 * the expected device size at commit time. Note that max_mirrors must be
3275 * same for write and wait phases.
3276 *
3277 * Return number of errors when buffer head is not found or submission fails.
3278 */
3279static int write_dev_supers(struct btrfs_device *device,
3280                            struct btrfs_super_block *sb, int max_mirrors)
3281{
3282        struct buffer_head *bh;
3283        int i;
3284        int ret;
3285        int errors = 0;
3286        u32 crc;
3287        u64 bytenr;
3288        int op_flags;
3289
3290        if (max_mirrors == 0)
3291                max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3292
3293        for (i = 0; i < max_mirrors; i++) {
3294                bytenr = btrfs_sb_offset(i);
3295                if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3296                    device->commit_total_bytes)
3297                        break;
3298
3299                btrfs_set_super_bytenr(sb, bytenr);
3300
3301                crc = ~(u32)0;
3302                crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc,
3303                                      BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
3304                btrfs_csum_final(crc, sb->csum);
3305
3306                /* One reference for us, and we leave it for the caller */
3307                bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE,
3308                              BTRFS_SUPER_INFO_SIZE);
3309                if (!bh) {
3310                        btrfs_err(device->fs_info,
3311                            "couldn't get super buffer head for bytenr %llu",
3312                            bytenr);
3313                        errors++;
3314                        continue;
3315                }
3316
3317                memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3318
3319                /* one reference for submit_bh */
3320                get_bh(bh);
3321
3322                set_buffer_uptodate(bh);
3323                lock_buffer(bh);
3324                bh->b_end_io = btrfs_end_buffer_write_sync;
3325                bh->b_private = device;
3326
3327                /*
3328                 * we fua the first super.  The others we allow
3329                 * to go down lazy.
3330                 */
3331                op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
3332                if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3333                        op_flags |= REQ_FUA;
3334                ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
3335                if (ret)
3336                        errors++;
3337        }
3338        return errors < i ? 0 : -1;
3339}
3340
3341/*
3342 * Wait for write completion of superblocks done by write_dev_supers,
3343 * @max_mirrors same for write and wait phases.
3344 *
3345 * Return number of errors when buffer head is not found or not marked up to
3346 * date.
3347 */
3348static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3349{
3350        struct buffer_head *bh;
3351        int i;
3352        int errors = 0;
3353        bool primary_failed = false;
3354        u64 bytenr;
3355
3356        if (max_mirrors == 0)
3357                max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3358
3359        for (i = 0; i < max_mirrors; i++) {
3360                bytenr = btrfs_sb_offset(i);
3361                if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3362                    device->commit_total_bytes)
3363                        break;
3364
3365                bh = __find_get_block(device->bdev,
3366                                      bytenr / BTRFS_BDEV_BLOCKSIZE,
3367                                      BTRFS_SUPER_INFO_SIZE);
3368                if (!bh) {
3369                        errors++;
3370                        if (i == 0)
3371                                primary_failed = true;
3372                        continue;
3373                }
3374                wait_on_buffer(bh);
3375                if (!buffer_uptodate(bh)) {
3376                        errors++;
3377                        if (i == 0)
3378                                primary_failed = true;
3379                }
3380
3381                /* drop our reference */
3382                brelse(bh);
3383
3384                /* drop the reference from the writing run */
3385                brelse(bh);
3386        }
3387
3388        /* log error, force error return */
3389        if (primary_failed) {
3390                btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3391                          device->devid);
3392                return -1;
3393        }
3394
3395        return errors < i ? 0 : -1;
3396}
3397
3398/*
3399 * endio for the write_dev_flush, this will wake anyone waiting
3400 * for the barrier when it is done
3401 */
3402static void btrfs_end_empty_barrier(struct bio *bio)
3403{
3404        complete(bio->bi_private);
3405}
3406
3407/*
3408 * Submit a flush request to the device if it supports it. Error handling is
3409 * done in the waiting counterpart.
3410 */
3411static void write_dev_flush(struct btrfs_device *device)
3412{
3413        struct request_queue *q = bdev_get_queue(device->bdev);
3414        struct bio *bio = device->flush_bio;
3415
3416        if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3417                return;
3418
3419        bio_reset(bio);
3420        bio->bi_end_io = btrfs_end_empty_barrier;
3421        bio_set_dev(bio, device->bdev);
3422        bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3423        init_completion(&device->flush_wait);
3424        bio->bi_private = &device->flush_wait;
3425
3426        btrfsic_submit_bio(bio);
3427        set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3428}
3429
3430/*
3431 * If the flush bio has been submitted by write_dev_flush, wait for it.
3432 */
3433static blk_status_t wait_dev_flush(struct btrfs_device *device)
3434{
3435        struct bio *bio = device->flush_bio;
3436
3437        if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3438                return BLK_STS_OK;
3439
3440        clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3441        wait_for_completion_io(&device->flush_wait);
3442
3443        return bio->bi_status;
3444}
3445
3446static int check_barrier_error(struct btrfs_fs_info *fs_info)
3447{
3448        if (!btrfs_check_rw_degradable(fs_info, NULL))
3449                return -EIO;
3450        return 0;
3451}
3452
3453/*
3454 * send an empty flush down to each device in parallel,
3455 * then wait for them
3456 */
3457static int barrier_all_devices(struct btrfs_fs_info *info)
3458{
3459        struct list_head *head;
3460        struct btrfs_device *dev;
3461        int errors_wait = 0;
3462        blk_status_t ret;
3463
3464        lockdep_assert_held(&info->fs_devices->device_list_mutex);
3465        /* send down all the barriers */
3466        head = &info->fs_devices->devices;
3467        list_for_each_entry(dev, head, dev_list) {
3468                if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3469                        continue;
3470                if (!dev->bdev)
3471                        continue;
3472                if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3473                    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3474                        continue;
3475
3476                write_dev_flush(dev);
3477                dev->last_flush_error = BLK_STS_OK;
3478        }
3479
3480        /* wait for all the barriers */
3481        list_for_each_entry(dev, head, dev_list) {
3482                if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3483                        continue;
3484                if (!dev->bdev) {
3485                        errors_wait++;
3486                        continue;
3487                }
3488                if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3489                    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3490                        continue;
3491
3492                ret = wait_dev_flush(dev);
3493                if (ret) {
3494                        dev->last_flush_error = ret;
3495                        btrfs_dev_stat_inc_and_print(dev,
3496                                        BTRFS_DEV_STAT_FLUSH_ERRS);
3497                        errors_wait++;
3498                }
3499        }
3500
3501        if (errors_wait) {
3502                /*
3503                 * At some point we need the status of all disks
3504                 * to arrive at the volume status. So error checking
3505                 * is being pushed to a separate loop.
3506                 */
3507                return check_barrier_error(info);
3508        }
3509        return 0;
3510}
3511
3512int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3513{
3514        int raid_type;
3515        int min_tolerated = INT_MAX;
3516
3517        if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3518            (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3519                min_tolerated = min(min_tolerated,
3520                                    btrfs_raid_array[BTRFS_RAID_SINGLE].
3521                                    tolerated_failures);
3522
3523        for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3524                if (raid_type == BTRFS_RAID_SINGLE)
3525                        continue;
3526                if (!(flags & btrfs_raid_group[raid_type]))
3527                        continue;
3528                min_tolerated = min(min_tolerated,
3529                                    btrfs_raid_array[raid_type].
3530                                    tolerated_failures);
3531        }
3532
3533        if (min_tolerated == INT_MAX) {
3534                pr_warn("BTRFS: unknown raid flag: %llu", flags);
3535                min_tolerated = 0;
3536        }
3537
3538        return min_tolerated;
3539}
3540
3541int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3542{
3543        struct list_head *head;
3544        struct btrfs_device *dev;
3545        struct btrfs_super_block *sb;
3546        struct btrfs_dev_item *dev_item;
3547        int ret;
3548        int do_barriers;
3549        int max_errors;
3550        int total_errors = 0;
3551        u64 flags;
3552
3553        do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3554
3555        /*
3556         * max_mirrors == 0 indicates we're from commit_transaction,
3557         * not from fsync where the tree roots in fs_info have not
3558         * been consistent on disk.
3559         */
3560        if (max_mirrors == 0)
3561                backup_super_roots(fs_info);
3562
3563        sb = fs_info->super_for_commit;
3564        dev_item = &sb->dev_item;
3565
3566        mutex_lock(&fs_info->fs_devices->device_list_mutex);
3567        head = &fs_info->fs_devices->devices;
3568        max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3569
3570        if (do_barriers) {
3571                ret = barrier_all_devices(fs_info);
3572                if (ret) {
3573                        mutex_unlock(
3574                                &fs_info->fs_devices->device_list_mutex);
3575                        btrfs_handle_fs_error(fs_info, ret,
3576                                              "errors while submitting device barriers.");
3577                        return ret;
3578                }
3579        }
3580
3581        list_for_each_entry(dev, head, dev_list) {
3582                if (!dev->bdev) {
3583                        total_errors++;
3584                        continue;
3585                }
3586                if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3587                    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3588                        continue;
3589
3590                btrfs_set_stack_device_generation(dev_item, 0);
3591                btrfs_set_stack_device_type(dev_item, dev->type);
3592                btrfs_set_stack_device_id(dev_item, dev->devid);
3593                btrfs_set_stack_device_total_bytes(dev_item,
3594                                                   dev->commit_total_bytes);
3595                btrfs_set_stack_device_bytes_used(dev_item,
3596                                                  dev->commit_bytes_used);
3597                btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3598                btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3599                btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3600                memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3601                memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_FSID_SIZE);
3602
3603                flags = btrfs_super_flags(sb);
3604                btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3605
3606                ret = write_dev_supers(dev, sb, max_mirrors);
3607                if (ret)
3608                        total_errors++;
3609        }
3610        if (total_errors > max_errors) {
3611                btrfs_err(fs_info, "%d errors while writing supers",
3612                          total_errors);
3613                mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3614
3615                /* FUA is masked off if unsupported and can't be the reason */
3616                btrfs_handle_fs_error(fs_info, -EIO,
3617                                      "%d errors while writing supers",
3618                                      total_errors);
3619                return -EIO;
3620        }
3621
3622        total_errors = 0;
3623        list_for_each_entry(dev, head, dev_list) {
3624                if (!dev->bdev)
3625                        continue;
3626                if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3627                    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3628                        continue;
3629
3630                ret = wait_dev_supers(dev, max_mirrors);
3631                if (ret)
3632                        total_errors++;
3633        }
3634        mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3635        if (total_errors > max_errors) {
3636                btrfs_handle_fs_error(fs_info, -EIO,
3637                                      "%d errors while writing supers",
3638                                      total_errors);
3639                return -EIO;
3640        }
3641        return 0;
3642}
3643
3644/* Drop a fs root from the radix tree and free it. */
3645void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3646                                  struct btrfs_root *root)
3647{
3648        spin_lock(&fs_info->fs_roots_radix_lock);
3649        radix_tree_delete(&fs_info->fs_roots_radix,
3650                          (unsigned long)root->root_key.objectid);
3651        spin_unlock(&fs_info->fs_roots_radix_lock);
3652
3653        if (btrfs_root_refs(&root->root_item) == 0)
3654                synchronize_srcu(&fs_info->subvol_srcu);
3655
3656        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3657                btrfs_free_log(NULL, root);
3658                if (root->reloc_root) {
3659                        free_extent_buffer(root->reloc_root->node);
3660                        free_extent_buffer(root->reloc_root->commit_root);
3661                        btrfs_put_fs_root(root->reloc_root);
3662                        root->reloc_root = NULL;
3663                }
3664        }
3665
3666        if (root->free_ino_pinned)
3667                __btrfs_remove_free_space_cache(root->free_ino_pinned);
3668        if (root->free_ino_ctl)
3669                __btrfs_remove_free_space_cache(root->free_ino_ctl);
3670        free_fs_root(root);
3671}
3672
3673static void free_fs_root(struct btrfs_root *root)
3674{
3675        iput(root->ino_cache_inode);
3676        WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3677        btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv);
3678        root->orphan_block_rsv = NULL;
3679        if (root->anon_dev)
3680                free_anon_bdev(root->anon_dev);
3681        if (root->subv_writers)
3682                btrfs_free_subvolume_writers(root->subv_writers);
3683        free_extent_buffer(root->node);
3684        free_extent_buffer(root->commit_root);
3685        kfree(root->free_ino_ctl);
3686        kfree(root->free_ino_pinned);
3687        kfree(root->name);
3688        btrfs_put_fs_root(root);
3689}
3690
3691void btrfs_free_fs_root(struct btrfs_root *root)
3692{
3693        free_fs_root(root);
3694}
3695
3696int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3697{
3698        u64 root_objectid = 0;
3699        struct btrfs_root *gang[8];
3700        int i = 0;
3701        int err = 0;
3702        unsigned int ret = 0;
3703        int index;
3704
3705        while (1) {
3706                index = srcu_read_lock(&fs_info->subvol_srcu);
3707                ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3708                                             (void **)gang, root_objectid,
3709                                             ARRAY_SIZE(gang));
3710                if (!ret) {
3711                        srcu_read_unlock(&fs_info->subvol_srcu, index);
3712                        break;
3713                }
3714                root_objectid = gang[ret - 1]->root_key.objectid + 1;
3715
3716                for (i = 0; i < ret; i++) {
3717                        /* Avoid to grab roots in dead_roots */
3718                        if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3719                                gang[i] = NULL;
3720                                continue;
3721                        }
3722                        /* grab all the search result for later use */
3723                        gang[i] = btrfs_grab_fs_root(gang[i]);
3724                }
3725                srcu_read_unlock(&fs_info->subvol_srcu, index);
3726
3727                for (i = 0; i < ret; i++) {
3728                        if (!gang[i])
3729                                continue;
3730                        root_objectid = gang[i]->root_key.objectid;
3731                        err = btrfs_orphan_cleanup(gang[i]);
3732                        if (err)
3733                                break;
3734                        btrfs_put_fs_root(gang[i]);
3735                }
3736                root_objectid++;
3737        }
3738
3739        /* release the uncleaned roots due to error */
3740        for (; i < ret; i++) {
3741                if (gang[i])
3742                        btrfs_put_fs_root(gang[i]);
3743        }
3744        return err;
3745}
3746
3747int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3748{
3749        struct btrfs_root *root = fs_info->tree_root;
3750        struct btrfs_trans_handle *trans;
3751
3752        mutex_lock(&fs_info->cleaner_mutex);
3753        btrfs_run_delayed_iputs(fs_info);
3754        mutex_unlock(&fs_info->cleaner_mutex);
3755        wake_up_process(fs_info->cleaner_kthread);
3756
3757        /* wait until ongoing cleanup work done */
3758        down_write(&fs_info->cleanup_work_sem);
3759        up_write(&fs_info->cleanup_work_sem);
3760
3761        trans = btrfs_join_transaction(root);
3762        if (IS_ERR(trans))
3763                return PTR_ERR(trans);
3764        return btrfs_commit_transaction(trans);
3765}
3766
3767void close_ctree(struct btrfs_fs_info *fs_info)
3768{
3769        struct btrfs_root *root = fs_info->tree_root;
3770        int ret;
3771
3772        set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3773
3774        /* wait for the qgroup rescan worker to stop */
3775        btrfs_qgroup_wait_for_completion(fs_info, false);
3776
3777        /* wait for the uuid_scan task to finish */
3778        down(&fs_info->uuid_tree_rescan_sem);
3779        /* avoid complains from lockdep et al., set sem back to initial state */
3780        up(&fs_info->uuid_tree_rescan_sem);
3781
3782        /* pause restriper - we want to resume on mount */
3783        btrfs_pause_balance(fs_info);
3784
3785        btrfs_dev_replace_suspend_for_unmount(fs_info);
3786
3787        btrfs_scrub_cancel(fs_info);
3788
3789        /* wait for any defraggers to finish */
3790        wait_event(fs_info->transaction_wait,
3791                   (atomic_read(&fs_info->defrag_running) == 0));
3792
3793        /* clear out the rbtree of defraggable inodes */
3794        btrfs_cleanup_defrag_inodes(fs_info);
3795
3796        cancel_work_sync(&fs_info->async_reclaim_work);
3797
3798        if (!sb_rdonly(fs_info->sb)) {
3799                /*
3800                 * If the cleaner thread is stopped and there are
3801                 * block groups queued for removal, the deletion will be
3802                 * skipped when we quit the cleaner thread.
3803                 */
3804                btrfs_delete_unused_bgs(fs_info);
3805
3806                ret = btrfs_commit_super(fs_info);
3807                if (ret)
3808                        btrfs_err(fs_info, "commit super ret %d", ret);
3809        }
3810
3811        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
3812            test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
3813                btrfs_error_commit_super(fs_info);
3814
3815        kthread_stop(fs_info->transaction_kthread);
3816        kthread_stop(fs_info->cleaner_kthread);
3817
3818        set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
3819
3820        btrfs_free_qgroup_config(fs_info);
3821        ASSERT(list_empty(&fs_info->delalloc_roots));
3822
3823        if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3824                btrfs_info(fs_info, "at unmount delalloc count %lld",
3825                       percpu_counter_sum(&fs_info->delalloc_bytes));
3826        }
3827
3828        btrfs_sysfs_remove_mounted(fs_info);
3829        btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3830
3831        btrfs_free_fs_roots(fs_info);
3832
3833        btrfs_put_block_group_cache(fs_info);
3834
3835        /*
3836         * we must make sure there is not any read request to
3837         * submit after we stopping all workers.
3838         */
3839        invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3840        btrfs_stop_all_workers(fs_info);
3841
3842        btrfs_free_block_groups(fs_info);
3843
3844        clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
3845        free_root_pointers(fs_info, 1);
3846
3847        iput(fs_info->btree_inode);
3848
3849#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3850        if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
3851                btrfsic_unmount(fs_info->fs_devices);
3852#endif
3853
3854        btrfs_close_devices(fs_info->fs_devices);
3855        btrfs_mapping_tree_free(&fs_info->mapping_tree);
3856
3857        percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3858        percpu_counter_destroy(&fs_info->delalloc_bytes);
3859        percpu_counter_destroy(&fs_info->bio_counter);
3860        cleanup_srcu_struct(&fs_info->subvol_srcu);
3861
3862        btrfs_free_stripe_hash_table(fs_info);
3863        btrfs_free_ref_cache(fs_info);
3864
3865        __btrfs_free_block_rsv(root->orphan_block_rsv);
3866        root->orphan_block_rsv = NULL;
3867
3868        while (!list_empty(&fs_info->pinned_chunks)) {
3869                struct extent_map *em;
3870
3871                em = list_first_entry(&fs_info->pinned_chunks,
3872                                      struct extent_map, list);
3873                list_del_init(&em->list);
3874                free_extent_map(em);
3875        }
3876}
3877
3878int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3879                          int atomic)
3880{
3881        int ret;
3882        struct inode *btree_inode = buf->pages[0]->mapping->host;
3883
3884        ret = extent_buffer_uptodate(buf);
3885        if (!ret)
3886                return ret;
3887
3888        ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3889                                    parent_transid, atomic);
3890        if (ret == -EAGAIN)
3891                return ret;
3892        return !ret;
3893}
3894
3895void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3896{
3897        struct btrfs_fs_info *fs_info;
3898        struct btrfs_root *root;
3899        u64 transid = btrfs_header_generation(buf);
3900        int was_dirty;
3901
3902#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3903        /*
3904         * This is a fast path so only do this check if we have sanity tests
3905         * enabled.  Normal people shouldn't be marking dummy buffers as dirty
3906         * outside of the sanity tests.
3907         */
3908        if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
3909                return;
3910#endif
3911        root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3912        fs_info = root->fs_info;
3913        btrfs_assert_tree_locked(buf);
3914        if (transid != fs_info->generation)
3915                WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
3916                        buf->start, transid, fs_info->generation);
3917        was_dirty = set_extent_buffer_dirty(buf);
3918        if (!was_dirty)
3919                percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3920                                         buf->len,
3921                                         fs_info->dirty_metadata_batch);
3922#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3923        /*
3924         * Since btrfs_mark_buffer_dirty() can be called with item pointer set
3925         * but item data not updated.
3926         * So here we should only check item pointers, not item data.
3927         */
3928        if (btrfs_header_level(buf) == 0 &&
3929            btrfs_check_leaf_relaxed(fs_info, buf)) {
3930                btrfs_print_leaf(buf);
3931                ASSERT(0);
3932        }
3933#endif
3934}
3935
3936static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
3937                                        int flush_delayed)
3938{
3939        /*
3940         * looks as though older kernels can get into trouble with
3941         * this code, they end up stuck in balance_dirty_pages forever
3942         */
3943        int ret;
3944
3945        if (current->flags & PF_MEMALLOC)
3946                return;
3947
3948        if (flush_delayed)
3949                btrfs_balance_delayed_items(fs_info);
3950
3951        ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
3952                                     BTRFS_DIRTY_METADATA_THRESH);
3953        if (ret > 0) {
3954                balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
3955        }
3956}
3957
3958void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
3959{
3960        __btrfs_btree_balance_dirty(fs_info, 1);
3961}
3962
3963void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
3964{
3965        __btrfs_btree_balance_dirty(fs_info, 0);
3966}
3967
3968int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
3969                      struct btrfs_key *first_key)
3970{
3971        struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3972        struct btrfs_fs_info *fs_info = root->fs_info;
3973
3974        return btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
3975                                              level, first_key);
3976}
3977
3978static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info)
3979{
3980        struct btrfs_super_block *sb = fs_info->super_copy;
3981        u64 nodesize = btrfs_super_nodesize(sb);
3982        u64 sectorsize = btrfs_super_sectorsize(sb);
3983        int ret = 0;
3984
3985        if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
3986                btrfs_err(fs_info, "no valid FS found");
3987                ret = -EINVAL;
3988        }
3989        if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
3990                btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
3991                                btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
3992                ret = -EINVAL;
3993        }
3994        if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
3995                btrfs_err(fs_info, "tree_root level too big: %d >= %d",
3996                                btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
3997                ret = -EINVAL;
3998        }
3999        if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
4000                btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
4001                                btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
4002                ret = -EINVAL;
4003        }
4004        if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
4005                btrfs_err(fs_info, "log_root level too big: %d >= %d",
4006                                btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
4007                ret = -EINVAL;
4008        }
4009
4010        /*
4011         * Check sectorsize and nodesize first, other check will need it.
4012         * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
4013         */
4014        if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
4015            sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4016                btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
4017                ret = -EINVAL;
4018        }
4019        /* Only PAGE SIZE is supported yet */
4020        if (sectorsize != PAGE_SIZE) {
4021                btrfs_err(fs_info,
4022                        "sectorsize %llu not supported yet, only support %lu",
4023                        sectorsize, PAGE_SIZE);
4024                ret = -EINVAL;
4025        }
4026        if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
4027            nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4028                btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
4029                ret = -EINVAL;
4030        }
4031        if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
4032                btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
4033                          le32_to_cpu(sb->__unused_leafsize), nodesize);
4034                ret = -EINVAL;
4035        }
4036
4037        /* Root alignment check */
4038        if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
4039                btrfs_warn(fs_info, "tree_root block unaligned: %llu",
4040                           btrfs_super_root(sb));
4041                ret = -EINVAL;
4042        }
4043        if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
4044                btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
4045                           btrfs_super_chunk_root(sb));
4046                ret = -EINVAL;
4047        }
4048        if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
4049                btrfs_warn(fs_info, "log_root block unaligned: %llu",
4050                           btrfs_super_log_root(sb));
4051                ret = -EINVAL;
4052        }
4053
4054        if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_FSID_SIZE) != 0) {
4055                btrfs_err(fs_info,
4056                           "dev_item UUID does not match fsid: %pU != %pU",
4057                           fs_info->fsid, sb->dev_item.fsid);
4058                ret = -EINVAL;
4059        }
4060
4061        /*
4062         * Hint to catch really bogus numbers, bitflips or so, more exact checks are
4063         * done later
4064         */
4065        if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
4066                btrfs_err(fs_info, "bytes_used is too small %llu",
4067                          btrfs_super_bytes_used(sb));
4068                ret = -EINVAL;
4069        }
4070        if (!is_power_of_2(btrfs_super_stripesize(sb))) {
4071                btrfs_err(fs_info, "invalid stripesize %u",
4072                          btrfs_super_stripesize(sb));
4073                ret = -EINVAL;
4074        }
4075        if (btrfs_super_num_devices(sb) > (1UL << 31))
4076                btrfs_warn(fs_info, "suspicious number of devices: %llu",
4077                           btrfs_super_num_devices(sb));
4078        if (btrfs_super_num_devices(sb) == 0) {
4079                btrfs_err(fs_info, "number of devices is 0");
4080                ret = -EINVAL;
4081        }
4082
4083        if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
4084                btrfs_err(fs_info, "super offset mismatch %llu != %u",
4085                          btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
4086                ret = -EINVAL;
4087        }
4088
4089        /*
4090         * Obvious sys_chunk_array corruptions, it must hold at least one key
4091         * and one chunk
4092         */
4093        if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4094                btrfs_err(fs_info, "system chunk array too big %u > %u",
4095                          btrfs_super_sys_array_size(sb),
4096                          BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
4097                ret = -EINVAL;
4098        }
4099        if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
4100                        + sizeof(struct btrfs_chunk)) {
4101                btrfs_err(fs_info, "system chunk array too small %u < %zu",
4102                          btrfs_super_sys_array_size(sb),
4103                          sizeof(struct btrfs_disk_key)
4104                          + sizeof(struct btrfs_chunk));
4105                ret = -EINVAL;
4106        }
4107
4108        /*
4109         * The generation is a global counter, we'll trust it more than the others
4110         * but it's still possible that it's the one that's wrong.
4111         */
4112        if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
4113                btrfs_warn(fs_info,
4114                        "suspicious: generation < chunk_root_generation: %llu < %llu",
4115                        btrfs_super_generation(sb),
4116                        btrfs_super_chunk_root_generation(sb));
4117        if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
4118            && btrfs_super_cache_generation(sb) != (u64)-1)
4119                btrfs_warn(fs_info,
4120                        "suspicious: generation < cache_generation: %llu < %llu",
4121                        btrfs_super_generation(sb),
4122                        btrfs_super_cache_generation(sb));
4123
4124        return ret;
4125}
4126
4127static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4128{
4129        /* cleanup FS via transaction */
4130        btrfs_cleanup_transaction(fs_info);
4131
4132        mutex_lock(&fs_info->cleaner_mutex);
4133        btrfs_run_delayed_iputs(fs_info);
4134        mutex_unlock(&fs_info->cleaner_mutex);
4135
4136        down_write(&fs_info->cleanup_work_sem);
4137        up_write(&fs_info->cleanup_work_sem);
4138}
4139
4140static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4141{
4142        struct btrfs_ordered_extent *ordered;
4143
4144        spin_lock(&root->ordered_extent_lock);
4145        /*
4146         * This will just short circuit the ordered completion stuff which will
4147         * make sure the ordered extent gets properly cleaned up.
4148         */
4149        list_for_each_entry(ordered, &root->ordered_extents,
4150                            root_extent_list)
4151                set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4152        spin_unlock(&root->ordered_extent_lock);
4153}
4154
4155static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4156{
4157        struct btrfs_root *root;
4158        struct list_head splice;
4159
4160        INIT_LIST_HEAD(&splice);
4161
4162        spin_lock(&fs_info->ordered_root_lock);
4163        list_splice_init(&fs_info->ordered_roots, &splice);
4164        while (!list_empty(&splice)) {
4165                root = list_first_entry(&splice, struct btrfs_root,
4166                                        ordered_root);
4167                list_move_tail(&root->ordered_root,
4168                               &fs_info->ordered_roots);
4169
4170                spin_unlock(&fs_info->ordered_root_lock);
4171                btrfs_destroy_ordered_extents(root);
4172
4173                cond_resched();
4174                spin_lock(&fs_info->ordered_root_lock);
4175        }
4176        spin_unlock(&fs_info->ordered_root_lock);
4177}
4178
4179static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4180                                      struct btrfs_fs_info *fs_info)
4181{
4182        struct rb_node *node;
4183        struct btrfs_delayed_ref_root *delayed_refs;
4184        struct btrfs_delayed_ref_node *ref;
4185        int ret = 0;
4186
4187        delayed_refs = &trans->delayed_refs;
4188
4189        spin_lock(&delayed_refs->lock);
4190        if (atomic_read(&delayed_refs->num_entries) == 0) {
4191                spin_unlock(&delayed_refs->lock);
4192                btrfs_info(fs_info, "delayed_refs has NO entry");
4193                return ret;
4194        }
4195
4196        while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
4197                struct btrfs_delayed_ref_head *head;
4198                struct rb_node *n;
4199                bool pin_bytes = false;
4200
4201                head = rb_entry(node, struct btrfs_delayed_ref_head,
4202                                href_node);
4203                if (!mutex_trylock(&head->mutex)) {
4204                        refcount_inc(&head->refs);
4205                        spin_unlock(&delayed_refs->lock);
4206
4207                        mutex_lock(&head->mutex);
4208                        mutex_unlock(&head->mutex);
4209                        btrfs_put_delayed_ref_head(head);
4210                        spin_lock(&delayed_refs->lock);
4211                        continue;
4212                }
4213                spin_lock(&head->lock);
4214                while ((n = rb_first(&head->ref_tree)) != NULL) {
4215                        ref = rb_entry(n, struct btrfs_delayed_ref_node,
4216                                       ref_node);
4217                        ref->in_tree = 0;
4218                        rb_erase(&ref->ref_node, &head->ref_tree);
4219                        RB_CLEAR_NODE(&ref->ref_node);
4220                        if (!list_empty(&ref->add_list))
4221                                list_del(&ref->add_list);
4222                        atomic_dec(&delayed_refs->num_entries);
4223                        btrfs_put_delayed_ref(ref);
4224                }
4225                if (head->must_insert_reserved)
4226                        pin_bytes = true;
4227                btrfs_free_delayed_extent_op(head->extent_op);
4228                delayed_refs->num_heads--;
4229                if (head->processing == 0)
4230                        delayed_refs->num_heads_ready--;
4231                atomic_dec(&delayed_refs->num_entries);
4232                rb_erase(&head->href_node, &delayed_refs->href_root);
4233                RB_CLEAR_NODE(&head->href_node);
4234                spin_unlock(&head->lock);
4235                spin_unlock(&delayed_refs->lock);
4236                mutex_unlock(&head->mutex);
4237
4238                if (pin_bytes)
4239                        btrfs_pin_extent(fs_info, head->bytenr,
4240                                         head->num_bytes, 1);
4241                btrfs_put_delayed_ref_head(head);
4242                cond_resched();
4243                spin_lock(&delayed_refs->lock);
4244        }
4245
4246        spin_unlock(&delayed_refs->lock);
4247
4248        return ret;
4249}
4250
4251static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4252{
4253        struct btrfs_inode *btrfs_inode;
4254        struct list_head splice;
4255
4256        INIT_LIST_HEAD(&splice);
4257
4258        spin_lock(&root->delalloc_lock);
4259        list_splice_init(&root->delalloc_inodes, &splice);
4260
4261        while (!list_empty(&splice)) {
4262                struct inode *inode = NULL;
4263                btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4264                                               delalloc_inodes);
4265                __btrfs_del_delalloc_inode(root, btrfs_inode);
4266                spin_unlock(&root->delalloc_lock);
4267
4268                /*
4269                 * Make sure we get a live inode and that it'll not disappear
4270                 * meanwhile.
4271                 */
4272                inode = igrab(&btrfs_inode->vfs_inode);
4273                if (inode) {
4274                        invalidate_inode_pages2(inode->i_mapping);
4275                        iput(inode);
4276                }
4277                spin_lock(&root->delalloc_lock);
4278        }
4279        spin_unlock(&root->delalloc_lock);
4280}
4281
4282static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4283{
4284        struct btrfs_root *root;
4285        struct list_head splice;
4286
4287        INIT_LIST_HEAD(&splice);
4288
4289        spin_lock(&fs_info->delalloc_root_lock);
4290        list_splice_init(&fs_info->delalloc_roots, &splice);
4291        while (!list_empty(&splice)) {
4292                root = list_first_entry(&splice, struct btrfs_root,
4293                                         delalloc_root);
4294                root = btrfs_grab_fs_root(root);
4295                BUG_ON(!root);
4296                spin_unlock(&fs_info->delalloc_root_lock);
4297
4298                btrfs_destroy_delalloc_inodes(root);
4299                btrfs_put_fs_root(root);
4300
4301                spin_lock(&fs_info->delalloc_root_lock);
4302        }
4303        spin_unlock(&fs_info->delalloc_root_lock);
4304}
4305
4306static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4307                                        struct extent_io_tree *dirty_pages,
4308                                        int mark)
4309{
4310        int ret;
4311        struct extent_buffer *eb;
4312        u64 start = 0;
4313        u64 end;
4314
4315        while (1) {
4316                ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4317                                            mark, NULL);
4318                if (ret)
4319                        break;
4320
4321                clear_extent_bits(dirty_pages, start, end, mark);
4322                while (start <= end) {
4323                        eb = find_extent_buffer(fs_info, start);
4324                        start += fs_info->nodesize;
4325                        if (!eb)
4326                                continue;
4327                        wait_on_extent_buffer_writeback(eb);
4328
4329                        if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4330                                               &eb->bflags))
4331                                clear_extent_buffer_dirty(eb);
4332                        free_extent_buffer_stale(eb);
4333                }
4334        }
4335
4336        return ret;
4337}
4338
4339static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4340                                       struct extent_io_tree *pinned_extents)
4341{
4342        struct extent_io_tree *unpin;
4343        u64 start;
4344        u64 end;
4345        int ret;
4346        bool loop = true;
4347
4348        unpin = pinned_extents;
4349again:
4350        while (1) {
4351                ret = find_first_extent_bit(unpin, 0, &start, &end,
4352                                            EXTENT_DIRTY, NULL);
4353                if (ret)
4354                        break;
4355
4356                clear_extent_dirty(unpin, start, end);
4357                btrfs_error_unpin_extent_range(fs_info, start, end);
4358                cond_resched();
4359        }
4360
4361        if (loop) {
4362                if (unpin == &fs_info->freed_extents[0])
4363                        unpin = &fs_info->freed_extents[1];
4364                else
4365                        unpin = &fs_info->freed_extents[0];
4366                loop = false;
4367                goto again;
4368        }
4369
4370        return 0;
4371}
4372
4373static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4374{
4375        struct inode *inode;
4376
4377        inode = cache->io_ctl.inode;
4378        if (inode) {
4379                invalidate_inode_pages2(inode->i_mapping);
4380                BTRFS_I(inode)->generation = 0;
4381                cache->io_ctl.inode = NULL;
4382                iput(inode);
4383        }
4384        btrfs_put_block_group(cache);
4385}
4386
4387void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4388                             struct btrfs_fs_info *fs_info)
4389{
4390        struct btrfs_block_group_cache *cache;
4391
4392        spin_lock(&cur_trans->dirty_bgs_lock);
4393        while (!list_empty(&cur_trans->dirty_bgs)) {
4394                cache = list_first_entry(&cur_trans->dirty_bgs,
4395                                         struct btrfs_block_group_cache,
4396                                         dirty_list);
4397
4398                if (!list_empty(&cache->io_list)) {
4399                        spin_unlock(&cur_trans->dirty_bgs_lock);
4400                        list_del_init(&cache->io_list);
4401                        btrfs_cleanup_bg_io(cache);
4402                        spin_lock(&cur_trans->dirty_bgs_lock);
4403                }
4404
4405                list_del_init(&cache->dirty_list);
4406                spin_lock(&cache->lock);
4407                cache->disk_cache_state = BTRFS_DC_ERROR;
4408                spin_unlock(&cache->lock);
4409
4410                spin_unlock(&cur_trans->dirty_bgs_lock);
4411                btrfs_put_block_group(cache);
4412                spin_lock(&cur_trans->dirty_bgs_lock);
4413        }
4414        spin_unlock(&cur_trans->dirty_bgs_lock);
4415
4416        /*
4417         * Refer to the definition of io_bgs member for details why it's safe
4418         * to use it without any locking
4419         */
4420        while (!list_empty(&cur_trans->io_bgs)) {
4421                cache = list_first_entry(&cur_trans->io_bgs,
4422                                         struct btrfs_block_group_cache,
4423                                         io_list);
4424
4425                list_del_init(&cache->io_list);
4426                spin_lock(&cache->lock);
4427                cache->disk_cache_state = BTRFS_DC_ERROR;
4428                spin_unlock(&cache->lock);
4429                btrfs_cleanup_bg_io(cache);
4430        }
4431}
4432
4433void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4434                                   struct btrfs_fs_info *fs_info)
4435{
4436        btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4437        ASSERT(list_empty(&cur_trans->dirty_bgs));
4438        ASSERT(list_empty(&cur_trans->io_bgs));
4439
4440        btrfs_destroy_delayed_refs(cur_trans, fs_info);
4441
4442        cur_trans->state = TRANS_STATE_COMMIT_START;
4443        wake_up(&fs_info->transaction_blocked_wait);
4444
4445        cur_trans->state = TRANS_STATE_UNBLOCKED;
4446        wake_up(&fs_info->transaction_wait);
4447
4448        btrfs_destroy_delayed_inodes(fs_info);
4449        btrfs_assert_delayed_root_empty(fs_info);
4450
4451        btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4452                                     EXTENT_DIRTY);
4453        btrfs_destroy_pinned_extent(fs_info,
4454                                    fs_info->pinned_extents);
4455
4456        cur_trans->state =TRANS_STATE_COMPLETED;
4457        wake_up(&cur_trans->commit_wait);
4458}
4459
4460static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4461{
4462        struct btrfs_transaction *t;
4463
4464        mutex_lock(&fs_info->transaction_kthread_mutex);
4465
4466        spin_lock(&fs_info->trans_lock);
4467        while (!list_empty(&fs_info->trans_list)) {
4468                t = list_first_entry(&fs_info->trans_list,
4469                                     struct btrfs_transaction, list);
4470                if (t->state >= TRANS_STATE_COMMIT_START) {
4471                        refcount_inc(&t->use_count);
4472                        spin_unlock(&fs_info->trans_lock);
4473                        btrfs_wait_for_commit(fs_info, t->transid);
4474                        btrfs_put_transaction(t);
4475                        spin_lock(&fs_info->trans_lock);
4476                        continue;
4477                }
4478                if (t == fs_info->running_transaction) {
4479                        t->state = TRANS_STATE_COMMIT_DOING;
4480                        spin_unlock(&fs_info->trans_lock);
4481                        /*
4482                         * We wait for 0 num_writers since we don't hold a trans
4483                         * handle open currently for this transaction.
4484                         */
4485                        wait_event(t->writer_wait,
4486                                   atomic_read(&t->num_writers) == 0);
4487                } else {
4488                        spin_unlock(&fs_info->trans_lock);
4489                }
4490                btrfs_cleanup_one_transaction(t, fs_info);
4491
4492                spin_lock(&fs_info->trans_lock);
4493                if (t == fs_info->running_transaction)
4494                        fs_info->running_transaction = NULL;
4495                list_del_init(&t->list);
4496                spin_unlock(&fs_info->trans_lock);
4497
4498                btrfs_put_transaction(t);
4499                trace_btrfs_transaction_commit(fs_info->tree_root);
4500                spin_lock(&fs_info->trans_lock);
4501        }
4502        spin_unlock(&fs_info->trans_lock);
4503        btrfs_destroy_all_ordered_extents(fs_info);
4504        btrfs_destroy_delayed_inodes(fs_info);
4505        btrfs_assert_delayed_root_empty(fs_info);
4506        btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4507        btrfs_destroy_all_delalloc_inodes(fs_info);
4508        mutex_unlock(&fs_info->transaction_kthread_mutex);
4509
4510        return 0;
4511}
4512
4513static struct btrfs_fs_info *btree_fs_info(void *private_data)
4514{
4515        struct inode *inode = private_data;
4516        return btrfs_sb(inode->i_sb);
4517}
4518
4519static const struct extent_io_ops btree_extent_io_ops = {
4520        /* mandatory callbacks */
4521        .submit_bio_hook = btree_submit_bio_hook,
4522        .readpage_end_io_hook = btree_readpage_end_io_hook,
4523        /* note we're sharing with inode.c for the merge bio hook */
4524        .merge_bio_hook = btrfs_merge_bio_hook,
4525        .readpage_io_failed_hook = btree_io_failed_hook,
4526        .set_range_writeback = btrfs_set_range_writeback,
4527        .tree_fs_info = btree_fs_info,
4528
4529        /* optional callbacks */
4530};
4531