linux/fs/btrfs/disk-io.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/fs.h>
  20#include <linux/blkdev.h>
  21#include <linux/scatterlist.h>
  22#include <linux/swap.h>
  23#include <linux/radix-tree.h>
  24#include <linux/writeback.h>
  25#include <linux/buffer_head.h>
  26#include <linux/workqueue.h>
  27#include <linux/kthread.h>
  28#include <linux/freezer.h>
  29#include <linux/slab.h>
  30#include <linux/migrate.h>
  31#include <linux/ratelimit.h>
  32#include <linux/uuid.h>
  33#include <linux/semaphore.h>
  34#include <asm/unaligned.h>
  35#include "ctree.h"
  36#include "disk-io.h"
  37#include "hash.h"
  38#include "transaction.h"
  39#include "btrfs_inode.h"
  40#include "volumes.h"
  41#include "print-tree.h"
  42#include "locking.h"
  43#include "tree-log.h"
  44#include "free-space-cache.h"
  45#include "inode-map.h"
  46#include "check-integrity.h"
  47#include "rcu-string.h"
  48#include "dev-replace.h"
  49#include "raid56.h"
  50#include "sysfs.h"
  51#include "qgroup.h"
  52
  53#ifdef CONFIG_X86
  54#include <asm/cpufeature.h>
  55#endif
  56
  57static const struct extent_io_ops btree_extent_io_ops;
  58static void end_workqueue_fn(struct btrfs_work *work);
  59static void free_fs_root(struct btrfs_root *root);
  60static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
  61                                    int read_only);
  62static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
  63static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
  64                                      struct btrfs_root *root);
  65static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
  66static int btrfs_destroy_marked_extents(struct btrfs_root *root,
  67                                        struct extent_io_tree *dirty_pages,
  68                                        int mark);
  69static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
  70                                       struct extent_io_tree *pinned_extents);
  71static int btrfs_cleanup_transaction(struct btrfs_root *root);
  72static void btrfs_error_commit_super(struct btrfs_root *root);
  73
  74/*
  75 * btrfs_end_io_wq structs are used to do processing in task context when an IO
  76 * is complete.  This is used during reads to verify checksums, and it is used
  77 * by writes to insert metadata for new file extents after IO is complete.
  78 */
  79struct btrfs_end_io_wq {
  80        struct bio *bio;
  81        bio_end_io_t *end_io;
  82        void *private;
  83        struct btrfs_fs_info *info;
  84        int error;
  85        enum btrfs_wq_endio_type metadata;
  86        struct list_head list;
  87        struct btrfs_work work;
  88};
  89
  90static struct kmem_cache *btrfs_end_io_wq_cache;
  91
  92int __init btrfs_end_io_wq_init(void)
  93{
  94        btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
  95                                        sizeof(struct btrfs_end_io_wq),
  96                                        0,
  97                                        SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
  98                                        NULL);
  99        if (!btrfs_end_io_wq_cache)
 100                return -ENOMEM;
 101        return 0;
 102}
 103
 104void btrfs_end_io_wq_exit(void)
 105{
 106        if (btrfs_end_io_wq_cache)
 107                kmem_cache_destroy(btrfs_end_io_wq_cache);
 108}
 109
 110/*
 111 * async submit bios are used to offload expensive checksumming
 112 * onto the worker threads.  They checksum file and metadata bios
 113 * just before they are sent down the IO stack.
 114 */
 115struct async_submit_bio {
 116        struct inode *inode;
 117        struct bio *bio;
 118        struct list_head list;
 119        extent_submit_bio_hook_t *submit_bio_start;
 120        extent_submit_bio_hook_t *submit_bio_done;
 121        int rw;
 122        int mirror_num;
 123        unsigned long bio_flags;
 124        /*
 125         * bio_offset is optional, can be used if the pages in the bio
 126         * can't tell us where in the file the bio should go
 127         */
 128        u64 bio_offset;
 129        struct btrfs_work work;
 130        int error;
 131};
 132
 133/*
 134 * Lockdep class keys for extent_buffer->lock's in this root.  For a given
 135 * eb, the lockdep key is determined by the btrfs_root it belongs to and
 136 * the level the eb occupies in the tree.
 137 *
 138 * Different roots are used for different purposes and may nest inside each
 139 * other and they require separate keysets.  As lockdep keys should be
 140 * static, assign keysets according to the purpose of the root as indicated
 141 * by btrfs_root->objectid.  This ensures that all special purpose roots
 142 * have separate keysets.
 143 *
 144 * Lock-nesting across peer nodes is always done with the immediate parent
 145 * node locked thus preventing deadlock.  As lockdep doesn't know this, use
 146 * subclass to avoid triggering lockdep warning in such cases.
 147 *
 148 * The key is set by the readpage_end_io_hook after the buffer has passed
 149 * csum validation but before the pages are unlocked.  It is also set by
 150 * btrfs_init_new_buffer on freshly allocated blocks.
 151 *
 152 * We also add a check to make sure the highest level of the tree is the
 153 * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
 154 * needs update as well.
 155 */
 156#ifdef CONFIG_DEBUG_LOCK_ALLOC
 157# if BTRFS_MAX_LEVEL != 8
 158#  error
 159# endif
 160
 161static struct btrfs_lockdep_keyset {
 162        u64                     id;             /* root objectid */
 163        const char              *name_stem;     /* lock name stem */
 164        char                    names[BTRFS_MAX_LEVEL + 1][20];
 165        struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
 166} btrfs_lockdep_keysets[] = {
 167        { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
 168        { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
 169        { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
 170        { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
 171        { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
 172        { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
 173        { .id = BTRFS_QUOTA_TREE_OBJECTID,      .name_stem = "quota"    },
 174        { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
 175        { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
 176        { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
 177        { .id = BTRFS_UUID_TREE_OBJECTID,       .name_stem = "uuid"     },
 178        { .id = 0,                              .name_stem = "tree"     },
 179};
 180
 181void __init btrfs_init_lockdep(void)
 182{
 183        int i, j;
 184
 185        /* initialize lockdep class names */
 186        for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
 187                struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
 188
 189                for (j = 0; j < ARRAY_SIZE(ks->names); j++)
 190                        snprintf(ks->names[j], sizeof(ks->names[j]),
 191                                 "btrfs-%s-%02d", ks->name_stem, j);
 192        }
 193}
 194
 195void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
 196                                    int level)
 197{
 198        struct btrfs_lockdep_keyset *ks;
 199
 200        BUG_ON(level >= ARRAY_SIZE(ks->keys));
 201
 202        /* find the matching keyset, id 0 is the default entry */
 203        for (ks = btrfs_lockdep_keysets; ks->id; ks++)
 204                if (ks->id == objectid)
 205                        break;
 206
 207        lockdep_set_class_and_name(&eb->lock,
 208                                   &ks->keys[level], ks->names[level]);
 209}
 210
 211#endif
 212
 213/*
 214 * extents on the btree inode are pretty simple, there's one extent
 215 * that covers the entire device
 216 */
 217static struct extent_map *btree_get_extent(struct inode *inode,
 218                struct page *page, size_t pg_offset, u64 start, u64 len,
 219                int create)
 220{
 221        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 222        struct extent_map *em;
 223        int ret;
 224
 225        read_lock(&em_tree->lock);
 226        em = lookup_extent_mapping(em_tree, start, len);
 227        if (em) {
 228                em->bdev =
 229                        BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 230                read_unlock(&em_tree->lock);
 231                goto out;
 232        }
 233        read_unlock(&em_tree->lock);
 234
 235        em = alloc_extent_map();
 236        if (!em) {
 237                em = ERR_PTR(-ENOMEM);
 238                goto out;
 239        }
 240        em->start = 0;
 241        em->len = (u64)-1;
 242        em->block_len = (u64)-1;
 243        em->block_start = 0;
 244        em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 245
 246        write_lock(&em_tree->lock);
 247        ret = add_extent_mapping(em_tree, em, 0);
 248        if (ret == -EEXIST) {
 249                free_extent_map(em);
 250                em = lookup_extent_mapping(em_tree, start, len);
 251                if (!em)
 252                        em = ERR_PTR(-EIO);
 253        } else if (ret) {
 254                free_extent_map(em);
 255                em = ERR_PTR(ret);
 256        }
 257        write_unlock(&em_tree->lock);
 258
 259out:
 260        return em;
 261}
 262
 263u32 btrfs_csum_data(char *data, u32 seed, size_t len)
 264{
 265        return btrfs_crc32c(seed, data, len);
 266}
 267
 268void btrfs_csum_final(u32 crc, char *result)
 269{
 270        put_unaligned_le32(~crc, result);
 271}
 272
 273/*
 274 * compute the csum for a btree block, and either verify it or write it
 275 * into the csum field of the block.
 276 */
 277static int csum_tree_block(struct btrfs_fs_info *fs_info,
 278                           struct extent_buffer *buf,
 279                           int verify)
 280{
 281        u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 282        char *result = NULL;
 283        unsigned long len;
 284        unsigned long cur_len;
 285        unsigned long offset = BTRFS_CSUM_SIZE;
 286        char *kaddr;
 287        unsigned long map_start;
 288        unsigned long map_len;
 289        int err;
 290        u32 crc = ~(u32)0;
 291        unsigned long inline_result;
 292
 293        len = buf->len - offset;
 294        while (len > 0) {
 295                err = map_private_extent_buffer(buf, offset, 32,
 296                                        &kaddr, &map_start, &map_len);
 297                if (err)
 298                        return 1;
 299                cur_len = min(len, map_len - (offset - map_start));
 300                crc = btrfs_csum_data(kaddr + offset - map_start,
 301                                      crc, cur_len);
 302                len -= cur_len;
 303                offset += cur_len;
 304        }
 305        if (csum_size > sizeof(inline_result)) {
 306                result = kzalloc(csum_size, GFP_NOFS);
 307                if (!result)
 308                        return 1;
 309        } else {
 310                result = (char *)&inline_result;
 311        }
 312
 313        btrfs_csum_final(crc, result);
 314
 315        if (verify) {
 316                if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
 317                        u32 val;
 318                        u32 found = 0;
 319                        memcpy(&found, result, csum_size);
 320
 321                        read_extent_buffer(buf, &val, 0, csum_size);
 322                        printk_ratelimited(KERN_WARNING
 323                                "BTRFS: %s checksum verify failed on %llu wanted %X found %X "
 324                                "level %d\n",
 325                                fs_info->sb->s_id, buf->start,
 326                                val, found, btrfs_header_level(buf));
 327                        if (result != (char *)&inline_result)
 328                                kfree(result);
 329                        return 1;
 330                }
 331        } else {
 332                write_extent_buffer(buf, result, 0, csum_size);
 333        }
 334        if (result != (char *)&inline_result)
 335                kfree(result);
 336        return 0;
 337}
 338
 339/*
 340 * we can't consider a given block up to date unless the transid of the
 341 * block matches the transid in the parent node's pointer.  This is how we
 342 * detect blocks that either didn't get written at all or got written
 343 * in the wrong place.
 344 */
 345static int verify_parent_transid(struct extent_io_tree *io_tree,
 346                                 struct extent_buffer *eb, u64 parent_transid,
 347                                 int atomic)
 348{
 349        struct extent_state *cached_state = NULL;
 350        int ret;
 351        bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
 352
 353        if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
 354                return 0;
 355
 356        if (atomic)
 357                return -EAGAIN;
 358
 359        if (need_lock) {
 360                btrfs_tree_read_lock(eb);
 361                btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
 362        }
 363
 364        lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
 365                         0, &cached_state);
 366        if (extent_buffer_uptodate(eb) &&
 367            btrfs_header_generation(eb) == parent_transid) {
 368                ret = 0;
 369                goto out;
 370        }
 371        printk_ratelimited(KERN_ERR
 372            "BTRFS (device %s): parent transid verify failed on %llu wanted %llu found %llu\n",
 373                        eb->fs_info->sb->s_id, eb->start,
 374                        parent_transid, btrfs_header_generation(eb));
 375        ret = 1;
 376
 377        /*
 378         * Things reading via commit roots that don't have normal protection,
 379         * like send, can have a really old block in cache that may point at a
 380         * block that has been free'd and re-allocated.  So don't clear uptodate
 381         * if we find an eb that is under IO (dirty/writeback) because we could
 382         * end up reading in the stale data and then writing it back out and
 383         * making everybody very sad.
 384         */
 385        if (!extent_buffer_under_io(eb))
 386                clear_extent_buffer_uptodate(eb);
 387out:
 388        unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
 389                             &cached_state, GFP_NOFS);
 390        if (need_lock)
 391                btrfs_tree_read_unlock_blocking(eb);
 392        return ret;
 393}
 394
 395/*
 396 * Return 0 if the superblock checksum type matches the checksum value of that
 397 * algorithm. Pass the raw disk superblock data.
 398 */
 399static int btrfs_check_super_csum(char *raw_disk_sb)
 400{
 401        struct btrfs_super_block *disk_sb =
 402                (struct btrfs_super_block *)raw_disk_sb;
 403        u16 csum_type = btrfs_super_csum_type(disk_sb);
 404        int ret = 0;
 405
 406        if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
 407                u32 crc = ~(u32)0;
 408                const int csum_size = sizeof(crc);
 409                char result[csum_size];
 410
 411                /*
 412                 * The super_block structure does not span the whole
 413                 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
 414                 * is filled with zeros and is included in the checkum.
 415                 */
 416                crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
 417                                crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
 418                btrfs_csum_final(crc, result);
 419
 420                if (memcmp(raw_disk_sb, result, csum_size))
 421                        ret = 1;
 422        }
 423
 424        if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
 425                printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n",
 426                                csum_type);
 427                ret = 1;
 428        }
 429
 430        return ret;
 431}
 432
 433/*
 434 * helper to read a given tree block, doing retries as required when
 435 * the checksums don't match and we have alternate mirrors to try.
 436 */
 437static int btree_read_extent_buffer_pages(struct btrfs_root *root,
 438                                          struct extent_buffer *eb,
 439                                          u64 start, u64 parent_transid)
 440{
 441        struct extent_io_tree *io_tree;
 442        int failed = 0;
 443        int ret;
 444        int num_copies = 0;
 445        int mirror_num = 0;
 446        int failed_mirror = 0;
 447
 448        clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 449        io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
 450        while (1) {
 451                ret = read_extent_buffer_pages(io_tree, eb, start,
 452                                               WAIT_COMPLETE,
 453                                               btree_get_extent, mirror_num);
 454                if (!ret) {
 455                        if (!verify_parent_transid(io_tree, eb,
 456                                                   parent_transid, 0))
 457                                break;
 458                        else
 459                                ret = -EIO;
 460                }
 461
 462                /*
 463                 * This buffer's crc is fine, but its contents are corrupted, so
 464                 * there is no reason to read the other copies, they won't be
 465                 * any less wrong.
 466                 */
 467                if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
 468                        break;
 469
 470                num_copies = btrfs_num_copies(root->fs_info,
 471                                              eb->start, eb->len);
 472                if (num_copies == 1)
 473                        break;
 474
 475                if (!failed_mirror) {
 476                        failed = 1;
 477                        failed_mirror = eb->read_mirror;
 478                }
 479
 480                mirror_num++;
 481                if (mirror_num == failed_mirror)
 482                        mirror_num++;
 483
 484                if (mirror_num > num_copies)
 485                        break;
 486        }
 487
 488        if (failed && !ret && failed_mirror)
 489                repair_eb_io_failure(root, eb, failed_mirror);
 490
 491        return ret;
 492}
 493
 494/*
 495 * checksum a dirty tree block before IO.  This has extra checks to make sure
 496 * we only fill in the checksum field in the first page of a multi-page block
 497 */
 498
 499static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
 500{
 501        u64 start = page_offset(page);
 502        u64 found_start;
 503        struct extent_buffer *eb;
 504
 505        eb = (struct extent_buffer *)page->private;
 506        if (page != eb->pages[0])
 507                return 0;
 508        found_start = btrfs_header_bytenr(eb);
 509        if (WARN_ON(found_start != start || !PageUptodate(page)))
 510                return 0;
 511        csum_tree_block(fs_info, eb, 0);
 512        return 0;
 513}
 514
 515static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
 516                                 struct extent_buffer *eb)
 517{
 518        struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 519        u8 fsid[BTRFS_UUID_SIZE];
 520        int ret = 1;
 521
 522        read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
 523        while (fs_devices) {
 524                if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
 525                        ret = 0;
 526                        break;
 527                }
 528                fs_devices = fs_devices->seed;
 529        }
 530        return ret;
 531}
 532
 533#define CORRUPT(reason, eb, root, slot)                         \
 534        btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu,"       \
 535                   "root=%llu, slot=%d", reason,                        \
 536               btrfs_header_bytenr(eb), root->objectid, slot)
 537
 538static noinline int check_leaf(struct btrfs_root *root,
 539                               struct extent_buffer *leaf)
 540{
 541        struct btrfs_key key;
 542        struct btrfs_key leaf_key;
 543        u32 nritems = btrfs_header_nritems(leaf);
 544        int slot;
 545
 546        if (nritems == 0)
 547                return 0;
 548
 549        /* Check the 0 item */
 550        if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
 551            BTRFS_LEAF_DATA_SIZE(root)) {
 552                CORRUPT("invalid item offset size pair", leaf, root, 0);
 553                return -EIO;
 554        }
 555
 556        /*
 557         * Check to make sure each items keys are in the correct order and their
 558         * offsets make sense.  We only have to loop through nritems-1 because
 559         * we check the current slot against the next slot, which verifies the
 560         * next slot's offset+size makes sense and that the current's slot
 561         * offset is correct.
 562         */
 563        for (slot = 0; slot < nritems - 1; slot++) {
 564                btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
 565                btrfs_item_key_to_cpu(leaf, &key, slot + 1);
 566
 567                /* Make sure the keys are in the right order */
 568                if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
 569                        CORRUPT("bad key order", leaf, root, slot);
 570                        return -EIO;
 571                }
 572
 573                /*
 574                 * Make sure the offset and ends are right, remember that the
 575                 * item data starts at the end of the leaf and grows towards the
 576                 * front.
 577                 */
 578                if (btrfs_item_offset_nr(leaf, slot) !=
 579                        btrfs_item_end_nr(leaf, slot + 1)) {
 580                        CORRUPT("slot offset bad", leaf, root, slot);
 581                        return -EIO;
 582                }
 583
 584                /*
 585                 * Check to make sure that we don't point outside of the leaf,
 586                 * just incase all the items are consistent to eachother, but
 587                 * all point outside of the leaf.
 588                 */
 589                if (btrfs_item_end_nr(leaf, slot) >
 590                    BTRFS_LEAF_DATA_SIZE(root)) {
 591                        CORRUPT("slot end outside of leaf", leaf, root, slot);
 592                        return -EIO;
 593                }
 594        }
 595
 596        return 0;
 597}
 598
 599static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
 600                                      u64 phy_offset, struct page *page,
 601                                      u64 start, u64 end, int mirror)
 602{
 603        u64 found_start;
 604        int found_level;
 605        struct extent_buffer *eb;
 606        struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
 607        int ret = 0;
 608        int reads_done;
 609
 610        if (!page->private)
 611                goto out;
 612
 613        eb = (struct extent_buffer *)page->private;
 614
 615        /* the pending IO might have been the only thing that kept this buffer
 616         * in memory.  Make sure we have a ref for all this other checks
 617         */
 618        extent_buffer_get(eb);
 619
 620        reads_done = atomic_dec_and_test(&eb->io_pages);
 621        if (!reads_done)
 622                goto err;
 623
 624        eb->read_mirror = mirror;
 625        if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
 626                ret = -EIO;
 627                goto err;
 628        }
 629
 630        found_start = btrfs_header_bytenr(eb);
 631        if (found_start != eb->start) {
 632                printk_ratelimited(KERN_ERR "BTRFS (device %s): bad tree block start "
 633                               "%llu %llu\n",
 634                               eb->fs_info->sb->s_id, found_start, eb->start);
 635                ret = -EIO;
 636                goto err;
 637        }
 638        if (check_tree_block_fsid(root->fs_info, eb)) {
 639                printk_ratelimited(KERN_ERR "BTRFS (device %s): bad fsid on block %llu\n",
 640                               eb->fs_info->sb->s_id, eb->start);
 641                ret = -EIO;
 642                goto err;
 643        }
 644        found_level = btrfs_header_level(eb);
 645        if (found_level >= BTRFS_MAX_LEVEL) {
 646                btrfs_err(root->fs_info, "bad tree block level %d",
 647                           (int)btrfs_header_level(eb));
 648                ret = -EIO;
 649                goto err;
 650        }
 651
 652        btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
 653                                       eb, found_level);
 654
 655        ret = csum_tree_block(root->fs_info, eb, 1);
 656        if (ret) {
 657                ret = -EIO;
 658                goto err;
 659        }
 660
 661        /*
 662         * If this is a leaf block and it is corrupt, set the corrupt bit so
 663         * that we don't try and read the other copies of this block, just
 664         * return -EIO.
 665         */
 666        if (found_level == 0 && check_leaf(root, eb)) {
 667                set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 668                ret = -EIO;
 669        }
 670
 671        if (!ret)
 672                set_extent_buffer_uptodate(eb);
 673err:
 674        if (reads_done &&
 675            test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
 676                btree_readahead_hook(root, eb, eb->start, ret);
 677
 678        if (ret) {
 679                /*
 680                 * our io error hook is going to dec the io pages
 681                 * again, we have to make sure it has something
 682                 * to decrement
 683                 */
 684                atomic_inc(&eb->io_pages);
 685                clear_extent_buffer_uptodate(eb);
 686        }
 687        free_extent_buffer(eb);
 688out:
 689        return ret;
 690}
 691
 692static int btree_io_failed_hook(struct page *page, int failed_mirror)
 693{
 694        struct extent_buffer *eb;
 695        struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
 696
 697        eb = (struct extent_buffer *)page->private;
 698        set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
 699        eb->read_mirror = failed_mirror;
 700        atomic_dec(&eb->io_pages);
 701        if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
 702                btree_readahead_hook(root, eb, eb->start, -EIO);
 703        return -EIO;    /* we fixed nothing */
 704}
 705
 706static void end_workqueue_bio(struct bio *bio)
 707{
 708        struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
 709        struct btrfs_fs_info *fs_info;
 710        struct btrfs_workqueue *wq;
 711        btrfs_work_func_t func;
 712
 713        fs_info = end_io_wq->info;
 714        end_io_wq->error = bio->bi_error;
 715
 716        if (bio->bi_rw & REQ_WRITE) {
 717                if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
 718                        wq = fs_info->endio_meta_write_workers;
 719                        func = btrfs_endio_meta_write_helper;
 720                } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
 721                        wq = fs_info->endio_freespace_worker;
 722                        func = btrfs_freespace_write_helper;
 723                } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
 724                        wq = fs_info->endio_raid56_workers;
 725                        func = btrfs_endio_raid56_helper;
 726                } else {
 727                        wq = fs_info->endio_write_workers;
 728                        func = btrfs_endio_write_helper;
 729                }
 730        } else {
 731                if (unlikely(end_io_wq->metadata ==
 732                             BTRFS_WQ_ENDIO_DIO_REPAIR)) {
 733                        wq = fs_info->endio_repair_workers;
 734                        func = btrfs_endio_repair_helper;
 735                } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
 736                        wq = fs_info->endio_raid56_workers;
 737                        func = btrfs_endio_raid56_helper;
 738                } else if (end_io_wq->metadata) {
 739                        wq = fs_info->endio_meta_workers;
 740                        func = btrfs_endio_meta_helper;
 741                } else {
 742                        wq = fs_info->endio_workers;
 743                        func = btrfs_endio_helper;
 744                }
 745        }
 746
 747        btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
 748        btrfs_queue_work(wq, &end_io_wq->work);
 749}
 750
 751int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 752                        enum btrfs_wq_endio_type metadata)
 753{
 754        struct btrfs_end_io_wq *end_io_wq;
 755
 756        end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
 757        if (!end_io_wq)
 758                return -ENOMEM;
 759
 760        end_io_wq->private = bio->bi_private;
 761        end_io_wq->end_io = bio->bi_end_io;
 762        end_io_wq->info = info;
 763        end_io_wq->error = 0;
 764        end_io_wq->bio = bio;
 765        end_io_wq->metadata = metadata;
 766
 767        bio->bi_private = end_io_wq;
 768        bio->bi_end_io = end_workqueue_bio;
 769        return 0;
 770}
 771
 772unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
 773{
 774        unsigned long limit = min_t(unsigned long,
 775                                    info->thread_pool_size,
 776                                    info->fs_devices->open_devices);
 777        return 256 * limit;
 778}
 779
 780static void run_one_async_start(struct btrfs_work *work)
 781{
 782        struct async_submit_bio *async;
 783        int ret;
 784
 785        async = container_of(work, struct  async_submit_bio, work);
 786        ret = async->submit_bio_start(async->inode, async->rw, async->bio,
 787                                      async->mirror_num, async->bio_flags,
 788                                      async->bio_offset);
 789        if (ret)
 790                async->error = ret;
 791}
 792
 793static void run_one_async_done(struct btrfs_work *work)
 794{
 795        struct btrfs_fs_info *fs_info;
 796        struct async_submit_bio *async;
 797        int limit;
 798
 799        async = container_of(work, struct  async_submit_bio, work);
 800        fs_info = BTRFS_I(async->inode)->root->fs_info;
 801
 802        limit = btrfs_async_submit_limit(fs_info);
 803        limit = limit * 2 / 3;
 804
 805        if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
 806            waitqueue_active(&fs_info->async_submit_wait))
 807                wake_up(&fs_info->async_submit_wait);
 808
 809        /* If an error occured we just want to clean up the bio and move on */
 810        if (async->error) {
 811                async->bio->bi_error = async->error;
 812                bio_endio(async->bio);
 813                return;
 814        }
 815
 816        async->submit_bio_done(async->inode, async->rw, async->bio,
 817                               async->mirror_num, async->bio_flags,
 818                               async->bio_offset);
 819}
 820
 821static void run_one_async_free(struct btrfs_work *work)
 822{
 823        struct async_submit_bio *async;
 824
 825        async = container_of(work, struct  async_submit_bio, work);
 826        kfree(async);
 827}
 828
 829int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 830                        int rw, struct bio *bio, int mirror_num,
 831                        unsigned long bio_flags,
 832                        u64 bio_offset,
 833                        extent_submit_bio_hook_t *submit_bio_start,
 834                        extent_submit_bio_hook_t *submit_bio_done)
 835{
 836        struct async_submit_bio *async;
 837
 838        async = kmalloc(sizeof(*async), GFP_NOFS);
 839        if (!async)
 840                return -ENOMEM;
 841
 842        async->inode = inode;
 843        async->rw = rw;
 844        async->bio = bio;
 845        async->mirror_num = mirror_num;
 846        async->submit_bio_start = submit_bio_start;
 847        async->submit_bio_done = submit_bio_done;
 848
 849        btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
 850                        run_one_async_done, run_one_async_free);
 851
 852        async->bio_flags = bio_flags;
 853        async->bio_offset = bio_offset;
 854
 855        async->error = 0;
 856
 857        atomic_inc(&fs_info->nr_async_submits);
 858
 859        if (rw & REQ_SYNC)
 860                btrfs_set_work_high_priority(&async->work);
 861
 862        btrfs_queue_work(fs_info->workers, &async->work);
 863
 864        while (atomic_read(&fs_info->async_submit_draining) &&
 865              atomic_read(&fs_info->nr_async_submits)) {
 866                wait_event(fs_info->async_submit_wait,
 867                           (atomic_read(&fs_info->nr_async_submits) == 0));
 868        }
 869
 870        return 0;
 871}
 872
 873static int btree_csum_one_bio(struct bio *bio)
 874{
 875        struct bio_vec *bvec;
 876        struct btrfs_root *root;
 877        int i, ret = 0;
 878
 879        bio_for_each_segment_all(bvec, bio, i) {
 880                root = BTRFS_I(bvec->bv_page->mapping->host)->root;
 881                ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
 882                if (ret)
 883                        break;
 884        }
 885
 886        return ret;
 887}
 888
 889static int __btree_submit_bio_start(struct inode *inode, int rw,
 890                                    struct bio *bio, int mirror_num,
 891                                    unsigned long bio_flags,
 892                                    u64 bio_offset)
 893{
 894        /*
 895         * when we're called for a write, we're already in the async
 896         * submission context.  Just jump into btrfs_map_bio
 897         */
 898        return btree_csum_one_bio(bio);
 899}
 900
 901static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
 902                                 int mirror_num, unsigned long bio_flags,
 903                                 u64 bio_offset)
 904{
 905        int ret;
 906
 907        /*
 908         * when we're called for a write, we're already in the async
 909         * submission context.  Just jump into btrfs_map_bio
 910         */
 911        ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
 912        if (ret) {
 913                bio->bi_error = ret;
 914                bio_endio(bio);
 915        }
 916        return ret;
 917}
 918
 919static int check_async_write(struct inode *inode, unsigned long bio_flags)
 920{
 921        if (bio_flags & EXTENT_BIO_TREE_LOG)
 922                return 0;
 923#ifdef CONFIG_X86
 924        if (cpu_has_xmm4_2)
 925                return 0;
 926#endif
 927        return 1;
 928}
 929
 930static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
 931                                 int mirror_num, unsigned long bio_flags,
 932                                 u64 bio_offset)
 933{
 934        int async = check_async_write(inode, bio_flags);
 935        int ret;
 936
 937        if (!(rw & REQ_WRITE)) {
 938                /*
 939                 * called for a read, do the setup so that checksum validation
 940                 * can happen in the async kernel threads
 941                 */
 942                ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
 943                                          bio, BTRFS_WQ_ENDIO_METADATA);
 944                if (ret)
 945                        goto out_w_error;
 946                ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
 947                                    mirror_num, 0);
 948        } else if (!async) {
 949                ret = btree_csum_one_bio(bio);
 950                if (ret)
 951                        goto out_w_error;
 952                ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
 953                                    mirror_num, 0);
 954        } else {
 955                /*
 956                 * kthread helpers are used to submit writes so that
 957                 * checksumming can happen in parallel across all CPUs
 958                 */
 959                ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
 960                                          inode, rw, bio, mirror_num, 0,
 961                                          bio_offset,
 962                                          __btree_submit_bio_start,
 963                                          __btree_submit_bio_done);
 964        }
 965
 966        if (ret)
 967                goto out_w_error;
 968        return 0;
 969
 970out_w_error:
 971        bio->bi_error = ret;
 972        bio_endio(bio);
 973        return ret;
 974}
 975
 976#ifdef CONFIG_MIGRATION
 977static int btree_migratepage(struct address_space *mapping,
 978                        struct page *newpage, struct page *page,
 979                        enum migrate_mode mode)
 980{
 981        /*
 982         * we can't safely write a btree page from here,
 983         * we haven't done the locking hook
 984         */
 985        if (PageDirty(page))
 986                return -EAGAIN;
 987        /*
 988         * Buffers may be managed in a filesystem specific way.
 989         * We must have no buffers or drop them.
 990         */
 991        if (page_has_private(page) &&
 992            !try_to_release_page(page, GFP_KERNEL))
 993                return -EAGAIN;
 994        return migrate_page(mapping, newpage, page, mode);
 995}
 996#endif
 997
 998
 999static int btree_writepages(struct address_space *mapping,
1000                            struct writeback_control *wbc)
1001{
1002        struct btrfs_fs_info *fs_info;
1003        int ret;
1004
1005        if (wbc->sync_mode == WB_SYNC_NONE) {
1006
1007                if (wbc->for_kupdate)
1008                        return 0;
1009
1010                fs_info = BTRFS_I(mapping->host)->root->fs_info;
1011                /* this is a bit racy, but that's ok */
1012                ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
1013                                             BTRFS_DIRTY_METADATA_THRESH);
1014                if (ret < 0)
1015                        return 0;
1016        }
1017        return btree_write_cache_pages(mapping, wbc);
1018}
1019
1020static int btree_readpage(struct file *file, struct page *page)
1021{
1022        struct extent_io_tree *tree;
1023        tree = &BTRFS_I(page->mapping->host)->io_tree;
1024        return extent_read_full_page(tree, page, btree_get_extent, 0);
1025}
1026
1027static int btree_releasepage(struct page *page, gfp_t gfp_flags)
1028{
1029        if (PageWriteback(page) || PageDirty(page))
1030                return 0;
1031
1032        return try_release_extent_buffer(page);
1033}
1034
1035static void btree_invalidatepage(struct page *page, unsigned int offset,
1036                                 unsigned int length)
1037{
1038        struct extent_io_tree *tree;
1039        tree = &BTRFS_I(page->mapping->host)->io_tree;
1040        extent_invalidatepage(tree, page, offset);
1041        btree_releasepage(page, GFP_NOFS);
1042        if (PagePrivate(page)) {
1043                btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
1044                           "page private not zero on page %llu",
1045                           (unsigned long long)page_offset(page));
1046                ClearPagePrivate(page);
1047                set_page_private(page, 0);
1048                page_cache_release(page);
1049        }
1050}
1051
1052static int btree_set_page_dirty(struct page *page)
1053{
1054#ifdef DEBUG
1055        struct extent_buffer *eb;
1056
1057        BUG_ON(!PagePrivate(page));
1058        eb = (struct extent_buffer *)page->private;
1059        BUG_ON(!eb);
1060        BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1061        BUG_ON(!atomic_read(&eb->refs));
1062        btrfs_assert_tree_locked(eb);
1063#endif
1064        return __set_page_dirty_nobuffers(page);
1065}
1066
1067static const struct address_space_operations btree_aops = {
1068        .readpage       = btree_readpage,
1069        .writepages     = btree_writepages,
1070        .releasepage    = btree_releasepage,
1071        .invalidatepage = btree_invalidatepage,
1072#ifdef CONFIG_MIGRATION
1073        .migratepage    = btree_migratepage,
1074#endif
1075        .set_page_dirty = btree_set_page_dirty,
1076};
1077
1078void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
1079{
1080        struct extent_buffer *buf = NULL;
1081        struct inode *btree_inode = root->fs_info->btree_inode;
1082
1083        buf = btrfs_find_create_tree_block(root, bytenr);
1084        if (!buf)
1085                return;
1086        read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1087                                 buf, 0, WAIT_NONE, btree_get_extent, 0);
1088        free_extent_buffer(buf);
1089}
1090
1091int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
1092                         int mirror_num, struct extent_buffer **eb)
1093{
1094        struct extent_buffer *buf = NULL;
1095        struct inode *btree_inode = root->fs_info->btree_inode;
1096        struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1097        int ret;
1098
1099        buf = btrfs_find_create_tree_block(root, bytenr);
1100        if (!buf)
1101                return 0;
1102
1103        set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1104
1105        ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1106                                       btree_get_extent, mirror_num);
1107        if (ret) {
1108                free_extent_buffer(buf);
1109                return ret;
1110        }
1111
1112        if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1113                free_extent_buffer(buf);
1114                return -EIO;
1115        } else if (extent_buffer_uptodate(buf)) {
1116                *eb = buf;
1117        } else {
1118                free_extent_buffer(buf);
1119        }
1120        return 0;
1121}
1122
1123struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
1124                                            u64 bytenr)
1125{
1126        return find_extent_buffer(fs_info, bytenr);
1127}
1128
1129struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1130                                                 u64 bytenr)
1131{
1132        if (btrfs_test_is_dummy_root(root))
1133                return alloc_test_extent_buffer(root->fs_info, bytenr);
1134        return alloc_extent_buffer(root->fs_info, bytenr);
1135}
1136
1137
1138int btrfs_write_tree_block(struct extent_buffer *buf)
1139{
1140        return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1141                                        buf->start + buf->len - 1);
1142}
1143
1144int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1145{
1146        return filemap_fdatawait_range(buf->pages[0]->mapping,
1147                                       buf->start, buf->start + buf->len - 1);
1148}
1149
1150struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1151                                      u64 parent_transid)
1152{
1153        struct extent_buffer *buf = NULL;
1154        int ret;
1155
1156        buf = btrfs_find_create_tree_block(root, bytenr);
1157        if (!buf)
1158                return ERR_PTR(-ENOMEM);
1159
1160        ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1161        if (ret) {
1162                free_extent_buffer(buf);
1163                return ERR_PTR(ret);
1164        }
1165        return buf;
1166
1167}
1168
1169void clean_tree_block(struct btrfs_trans_handle *trans,
1170                      struct btrfs_fs_info *fs_info,
1171                      struct extent_buffer *buf)
1172{
1173        if (btrfs_header_generation(buf) ==
1174            fs_info->running_transaction->transid) {
1175                btrfs_assert_tree_locked(buf);
1176
1177                if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1178                        __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1179                                             -buf->len,
1180                                             fs_info->dirty_metadata_batch);
1181                        /* ugh, clear_extent_buffer_dirty needs to lock the page */
1182                        btrfs_set_lock_blocking(buf);
1183                        clear_extent_buffer_dirty(buf);
1184                }
1185        }
1186}
1187
1188static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1189{
1190        struct btrfs_subvolume_writers *writers;
1191        int ret;
1192
1193        writers = kmalloc(sizeof(*writers), GFP_NOFS);
1194        if (!writers)
1195                return ERR_PTR(-ENOMEM);
1196
1197        ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
1198        if (ret < 0) {
1199                kfree(writers);
1200                return ERR_PTR(ret);
1201        }
1202
1203        init_waitqueue_head(&writers->wait);
1204        return writers;
1205}
1206
1207static void
1208btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1209{
1210        percpu_counter_destroy(&writers->counter);
1211        kfree(writers);
1212}
1213
1214static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
1215                         struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1216                         u64 objectid)
1217{
1218        root->node = NULL;
1219        root->commit_root = NULL;
1220        root->sectorsize = sectorsize;
1221        root->nodesize = nodesize;
1222        root->stripesize = stripesize;
1223        root->state = 0;
1224        root->orphan_cleanup_state = 0;
1225
1226        root->objectid = objectid;
1227        root->last_trans = 0;
1228        root->highest_objectid = 0;
1229        root->nr_delalloc_inodes = 0;
1230        root->nr_ordered_extents = 0;
1231        root->name = NULL;
1232        root->inode_tree = RB_ROOT;
1233        INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1234        root->block_rsv = NULL;
1235        root->orphan_block_rsv = NULL;
1236
1237        INIT_LIST_HEAD(&root->dirty_list);
1238        INIT_LIST_HEAD(&root->root_list);
1239        INIT_LIST_HEAD(&root->delalloc_inodes);
1240        INIT_LIST_HEAD(&root->delalloc_root);
1241        INIT_LIST_HEAD(&root->ordered_extents);
1242        INIT_LIST_HEAD(&root->ordered_root);
1243        INIT_LIST_HEAD(&root->logged_list[0]);
1244        INIT_LIST_HEAD(&root->logged_list[1]);
1245        spin_lock_init(&root->orphan_lock);
1246        spin_lock_init(&root->inode_lock);
1247        spin_lock_init(&root->delalloc_lock);
1248        spin_lock_init(&root->ordered_extent_lock);
1249        spin_lock_init(&root->accounting_lock);
1250        spin_lock_init(&root->log_extents_lock[0]);
1251        spin_lock_init(&root->log_extents_lock[1]);
1252        mutex_init(&root->objectid_mutex);
1253        mutex_init(&root->log_mutex);
1254        mutex_init(&root->ordered_extent_mutex);
1255        mutex_init(&root->delalloc_mutex);
1256        init_waitqueue_head(&root->log_writer_wait);
1257        init_waitqueue_head(&root->log_commit_wait[0]);
1258        init_waitqueue_head(&root->log_commit_wait[1]);
1259        INIT_LIST_HEAD(&root->log_ctxs[0]);
1260        INIT_LIST_HEAD(&root->log_ctxs[1]);
1261        atomic_set(&root->log_commit[0], 0);
1262        atomic_set(&root->log_commit[1], 0);
1263        atomic_set(&root->log_writers, 0);
1264        atomic_set(&root->log_batch, 0);
1265        atomic_set(&root->orphan_inodes, 0);
1266        atomic_set(&root->refs, 1);
1267        atomic_set(&root->will_be_snapshoted, 0);
1268        root->log_transid = 0;
1269        root->log_transid_committed = -1;
1270        root->last_log_commit = 0;
1271        if (fs_info)
1272                extent_io_tree_init(&root->dirty_log_pages,
1273                                     fs_info->btree_inode->i_mapping);
1274
1275        memset(&root->root_key, 0, sizeof(root->root_key));
1276        memset(&root->root_item, 0, sizeof(root->root_item));
1277        memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1278        if (fs_info)
1279                root->defrag_trans_start = fs_info->generation;
1280        else
1281                root->defrag_trans_start = 0;
1282        root->root_key.objectid = objectid;
1283        root->anon_dev = 0;
1284
1285        spin_lock_init(&root->root_item_lock);
1286}
1287
1288static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1289{
1290        struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1291        if (root)
1292                root->fs_info = fs_info;
1293        return root;
1294}
1295
1296#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1297/* Should only be used by the testing infrastructure */
1298struct btrfs_root *btrfs_alloc_dummy_root(void)
1299{
1300        struct btrfs_root *root;
1301
1302        root = btrfs_alloc_root(NULL);
1303        if (!root)
1304                return ERR_PTR(-ENOMEM);
1305        __setup_root(4096, 4096, 4096, root, NULL, 1);
1306        set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
1307        root->alloc_bytenr = 0;
1308
1309        return root;
1310}
1311#endif
1312
1313struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1314                                     struct btrfs_fs_info *fs_info,
1315                                     u64 objectid)
1316{
1317        struct extent_buffer *leaf;
1318        struct btrfs_root *tree_root = fs_info->tree_root;
1319        struct btrfs_root *root;
1320        struct btrfs_key key;
1321        int ret = 0;
1322        uuid_le uuid;
1323
1324        root = btrfs_alloc_root(fs_info);
1325        if (!root)
1326                return ERR_PTR(-ENOMEM);
1327
1328        __setup_root(tree_root->nodesize, tree_root->sectorsize,
1329                tree_root->stripesize, root, fs_info, objectid);
1330        root->root_key.objectid = objectid;
1331        root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1332        root->root_key.offset = 0;
1333
1334        leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1335        if (IS_ERR(leaf)) {
1336                ret = PTR_ERR(leaf);
1337                leaf = NULL;
1338                goto fail;
1339        }
1340
1341        memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1342        btrfs_set_header_bytenr(leaf, leaf->start);
1343        btrfs_set_header_generation(leaf, trans->transid);
1344        btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1345        btrfs_set_header_owner(leaf, objectid);
1346        root->node = leaf;
1347
1348        write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(),
1349                            BTRFS_FSID_SIZE);
1350        write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
1351                            btrfs_header_chunk_tree_uuid(leaf),
1352                            BTRFS_UUID_SIZE);
1353        btrfs_mark_buffer_dirty(leaf);
1354
1355        root->commit_root = btrfs_root_node(root);
1356        set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1357
1358        root->root_item.flags = 0;
1359        root->root_item.byte_limit = 0;
1360        btrfs_set_root_bytenr(&root->root_item, leaf->start);
1361        btrfs_set_root_generation(&root->root_item, trans->transid);
1362        btrfs_set_root_level(&root->root_item, 0);
1363        btrfs_set_root_refs(&root->root_item, 1);
1364        btrfs_set_root_used(&root->root_item, leaf->len);
1365        btrfs_set_root_last_snapshot(&root->root_item, 0);
1366        btrfs_set_root_dirid(&root->root_item, 0);
1367        uuid_le_gen(&uuid);
1368        memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1369        root->root_item.drop_level = 0;
1370
1371        key.objectid = objectid;
1372        key.type = BTRFS_ROOT_ITEM_KEY;
1373        key.offset = 0;
1374        ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1375        if (ret)
1376                goto fail;
1377
1378        btrfs_tree_unlock(leaf);
1379
1380        return root;
1381
1382fail:
1383        if (leaf) {
1384                btrfs_tree_unlock(leaf);
1385                free_extent_buffer(root->commit_root);
1386                free_extent_buffer(leaf);
1387        }
1388        kfree(root);
1389
1390        return ERR_PTR(ret);
1391}
1392
1393static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1394                                         struct btrfs_fs_info *fs_info)
1395{
1396        struct btrfs_root *root;
1397        struct btrfs_root *tree_root = fs_info->tree_root;
1398        struct extent_buffer *leaf;
1399
1400        root = btrfs_alloc_root(fs_info);
1401        if (!root)
1402                return ERR_PTR(-ENOMEM);
1403
1404        __setup_root(tree_root->nodesize, tree_root->sectorsize,
1405                     tree_root->stripesize, root, fs_info,
1406                     BTRFS_TREE_LOG_OBJECTID);
1407
1408        root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1409        root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1410        root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1411
1412        /*
1413         * DON'T set REF_COWS for log trees
1414         *
1415         * log trees do not get reference counted because they go away
1416         * before a real commit is actually done.  They do store pointers
1417         * to file data extents, and those reference counts still get
1418         * updated (along with back refs to the log tree).
1419         */
1420
1421        leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1422                        NULL, 0, 0, 0);
1423        if (IS_ERR(leaf)) {
1424                kfree(root);
1425                return ERR_CAST(leaf);
1426        }
1427
1428        memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1429        btrfs_set_header_bytenr(leaf, leaf->start);
1430        btrfs_set_header_generation(leaf, trans->transid);
1431        btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1432        btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1433        root->node = leaf;
1434
1435        write_extent_buffer(root->node, root->fs_info->fsid,
1436                            btrfs_header_fsid(), BTRFS_FSID_SIZE);
1437        btrfs_mark_buffer_dirty(root->node);
1438        btrfs_tree_unlock(root->node);
1439        return root;
1440}
1441
1442int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1443                             struct btrfs_fs_info *fs_info)
1444{
1445        struct btrfs_root *log_root;
1446
1447        log_root = alloc_log_tree(trans, fs_info);
1448        if (IS_ERR(log_root))
1449                return PTR_ERR(log_root);
1450        WARN_ON(fs_info->log_root_tree);
1451        fs_info->log_root_tree = log_root;
1452        return 0;
1453}
1454
1455int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1456                       struct btrfs_root *root)
1457{
1458        struct btrfs_root *log_root;
1459        struct btrfs_inode_item *inode_item;
1460
1461        log_root = alloc_log_tree(trans, root->fs_info);
1462        if (IS_ERR(log_root))
1463                return PTR_ERR(log_root);
1464
1465        log_root->last_trans = trans->transid;
1466        log_root->root_key.offset = root->root_key.objectid;
1467
1468        inode_item = &log_root->root_item.inode;
1469        btrfs_set_stack_inode_generation(inode_item, 1);
1470        btrfs_set_stack_inode_size(inode_item, 3);
1471        btrfs_set_stack_inode_nlink(inode_item, 1);
1472        btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
1473        btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1474
1475        btrfs_set_root_node(&log_root->root_item, log_root->node);
1476
1477        WARN_ON(root->log_root);
1478        root->log_root = log_root;
1479        root->log_transid = 0;
1480        root->log_transid_committed = -1;
1481        root->last_log_commit = 0;
1482        return 0;
1483}
1484
1485static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1486                                               struct btrfs_key *key)
1487{
1488        struct btrfs_root *root;
1489        struct btrfs_fs_info *fs_info = tree_root->fs_info;
1490        struct btrfs_path *path;
1491        u64 generation;
1492        int ret;
1493
1494        path = btrfs_alloc_path();
1495        if (!path)
1496                return ERR_PTR(-ENOMEM);
1497
1498        root = btrfs_alloc_root(fs_info);
1499        if (!root) {
1500                ret = -ENOMEM;
1501                goto alloc_fail;
1502        }
1503
1504        __setup_root(tree_root->nodesize, tree_root->sectorsize,
1505                tree_root->stripesize, root, fs_info, key->objectid);
1506
1507        ret = btrfs_find_root(tree_root, key, path,
1508                              &root->root_item, &root->root_key);
1509        if (ret) {
1510                if (ret > 0)
1511                        ret = -ENOENT;
1512                goto find_fail;
1513        }
1514
1515        generation = btrfs_root_generation(&root->root_item);
1516        root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1517                                     generation);
1518        if (IS_ERR(root->node)) {
1519                ret = PTR_ERR(root->node);
1520                goto find_fail;
1521        } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1522                ret = -EIO;
1523                free_extent_buffer(root->node);
1524                goto find_fail;
1525        }
1526        root->commit_root = btrfs_root_node(root);
1527out:
1528        btrfs_free_path(path);
1529        return root;
1530
1531find_fail:
1532        kfree(root);
1533alloc_fail:
1534        root = ERR_PTR(ret);
1535        goto out;
1536}
1537
1538struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1539                                      struct btrfs_key *location)
1540{
1541        struct btrfs_root *root;
1542
1543        root = btrfs_read_tree_root(tree_root, location);
1544        if (IS_ERR(root))
1545                return root;
1546
1547        if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1548                set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1549                btrfs_check_and_init_root_item(&root->root_item);
1550        }
1551
1552        return root;
1553}
1554
1555int btrfs_init_fs_root(struct btrfs_root *root)
1556{
1557        int ret;
1558        struct btrfs_subvolume_writers *writers;
1559
1560        root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1561        root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1562                                        GFP_NOFS);
1563        if (!root->free_ino_pinned || !root->free_ino_ctl) {
1564                ret = -ENOMEM;
1565                goto fail;
1566        }
1567
1568        writers = btrfs_alloc_subvolume_writers();
1569        if (IS_ERR(writers)) {
1570                ret = PTR_ERR(writers);
1571                goto fail;
1572        }
1573        root->subv_writers = writers;
1574
1575        btrfs_init_free_ino_ctl(root);
1576        spin_lock_init(&root->ino_cache_lock);
1577        init_waitqueue_head(&root->ino_cache_wait);
1578
1579        ret = get_anon_bdev(&root->anon_dev);
1580        if (ret)
1581                goto free_writers;
1582        return 0;
1583
1584free_writers:
1585        btrfs_free_subvolume_writers(root->subv_writers);
1586fail:
1587        kfree(root->free_ino_ctl);
1588        kfree(root->free_ino_pinned);
1589        return ret;
1590}
1591
1592static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1593                                               u64 root_id)
1594{
1595        struct btrfs_root *root;
1596
1597        spin_lock(&fs_info->fs_roots_radix_lock);
1598        root = radix_tree_lookup(&fs_info->fs_roots_radix,
1599                                 (unsigned long)root_id);
1600        spin_unlock(&fs_info->fs_roots_radix_lock);
1601        return root;
1602}
1603
1604int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1605                         struct btrfs_root *root)
1606{
1607        int ret;
1608
1609        ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1610        if (ret)
1611                return ret;
1612
1613        spin_lock(&fs_info->fs_roots_radix_lock);
1614        ret = radix_tree_insert(&fs_info->fs_roots_radix,
1615                                (unsigned long)root->root_key.objectid,
1616                                root);
1617        if (ret == 0)
1618                set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1619        spin_unlock(&fs_info->fs_roots_radix_lock);
1620        radix_tree_preload_end();
1621
1622        return ret;
1623}
1624
1625struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1626                                     struct btrfs_key *location,
1627                                     bool check_ref)
1628{
1629        struct btrfs_root *root;
1630        struct btrfs_path *path;
1631        struct btrfs_key key;
1632        int ret;
1633
1634        if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1635                return fs_info->tree_root;
1636        if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1637                return fs_info->extent_root;
1638        if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1639                return fs_info->chunk_root;
1640        if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1641                return fs_info->dev_root;
1642        if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1643                return fs_info->csum_root;
1644        if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1645                return fs_info->quota_root ? fs_info->quota_root :
1646                                             ERR_PTR(-ENOENT);
1647        if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1648                return fs_info->uuid_root ? fs_info->uuid_root :
1649                                            ERR_PTR(-ENOENT);
1650again:
1651        root = btrfs_lookup_fs_root(fs_info, location->objectid);
1652        if (root) {
1653                if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1654                        return ERR_PTR(-ENOENT);
1655                return root;
1656        }
1657
1658        root = btrfs_read_fs_root(fs_info->tree_root, location);
1659        if (IS_ERR(root))
1660                return root;
1661
1662        if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1663                ret = -ENOENT;
1664                goto fail;
1665        }
1666
1667        ret = btrfs_init_fs_root(root);
1668        if (ret)
1669                goto fail;
1670
1671        path = btrfs_alloc_path();
1672        if (!path) {
1673                ret = -ENOMEM;
1674                goto fail;
1675        }
1676        key.objectid = BTRFS_ORPHAN_OBJECTID;
1677        key.type = BTRFS_ORPHAN_ITEM_KEY;
1678        key.offset = location->objectid;
1679
1680        ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1681        btrfs_free_path(path);
1682        if (ret < 0)
1683                goto fail;
1684        if (ret == 0)
1685                set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1686
1687        ret = btrfs_insert_fs_root(fs_info, root);
1688        if (ret) {
1689                if (ret == -EEXIST) {
1690                        free_fs_root(root);
1691                        goto again;
1692                }
1693                goto fail;
1694        }
1695        return root;
1696fail:
1697        free_fs_root(root);
1698        return ERR_PTR(ret);
1699}
1700
1701static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1702{
1703        struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1704        int ret = 0;
1705        struct btrfs_device *device;
1706        struct backing_dev_info *bdi;
1707
1708        rcu_read_lock();
1709        list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1710                if (!device->bdev)
1711                        continue;
1712                bdi = blk_get_backing_dev_info(device->bdev);
1713                if (bdi_congested(bdi, bdi_bits)) {
1714                        ret = 1;
1715                        break;
1716                }
1717        }
1718        rcu_read_unlock();
1719        return ret;
1720}
1721
1722static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1723{
1724        int err;
1725
1726        err = bdi_setup_and_register(bdi, "btrfs");
1727        if (err)
1728                return err;
1729
1730        bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
1731        bdi->congested_fn       = btrfs_congested_fn;
1732        bdi->congested_data     = info;
1733        bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
1734        return 0;
1735}
1736
1737/*
1738 * called by the kthread helper functions to finally call the bio end_io
1739 * functions.  This is where read checksum verification actually happens
1740 */
1741static void end_workqueue_fn(struct btrfs_work *work)
1742{
1743        struct bio *bio;
1744        struct btrfs_end_io_wq *end_io_wq;
1745
1746        end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1747        bio = end_io_wq->bio;
1748
1749        bio->bi_error = end_io_wq->error;
1750        bio->bi_private = end_io_wq->private;
1751        bio->bi_end_io = end_io_wq->end_io;
1752        kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1753        bio_endio(bio);
1754}
1755
1756static int cleaner_kthread(void *arg)
1757{
1758        struct btrfs_root *root = arg;
1759        int again;
1760        struct btrfs_trans_handle *trans;
1761
1762        do {
1763                again = 0;
1764
1765                /* Make the cleaner go to sleep early. */
1766                if (btrfs_need_cleaner_sleep(root))
1767                        goto sleep;
1768
1769                if (!mutex_trylock(&root->fs_info->cleaner_mutex))
1770                        goto sleep;
1771
1772                /*
1773                 * Avoid the problem that we change the status of the fs
1774                 * during the above check and trylock.
1775                 */
1776                if (btrfs_need_cleaner_sleep(root)) {
1777                        mutex_unlock(&root->fs_info->cleaner_mutex);
1778                        goto sleep;
1779                }
1780
1781                btrfs_run_delayed_iputs(root);
1782                again = btrfs_clean_one_deleted_snapshot(root);
1783                mutex_unlock(&root->fs_info->cleaner_mutex);
1784
1785                /*
1786                 * The defragger has dealt with the R/O remount and umount,
1787                 * needn't do anything special here.
1788                 */
1789                btrfs_run_defrag_inodes(root->fs_info);
1790
1791                /*
1792                 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1793                 * with relocation (btrfs_relocate_chunk) and relocation
1794                 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1795                 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1796                 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1797                 * unused block groups.
1798                 */
1799                btrfs_delete_unused_bgs(root->fs_info);
1800sleep:
1801                if (!try_to_freeze() && !again) {
1802                        set_current_state(TASK_INTERRUPTIBLE);
1803                        if (!kthread_should_stop())
1804                                schedule();
1805                        __set_current_state(TASK_RUNNING);
1806                }
1807        } while (!kthread_should_stop());
1808
1809        /*
1810         * Transaction kthread is stopped before us and wakes us up.
1811         * However we might have started a new transaction and COWed some
1812         * tree blocks when deleting unused block groups for example. So
1813         * make sure we commit the transaction we started to have a clean
1814         * shutdown when evicting the btree inode - if it has dirty pages
1815         * when we do the final iput() on it, eviction will trigger a
1816         * writeback for it which will fail with null pointer dereferences
1817         * since work queues and other resources were already released and
1818         * destroyed by the time the iput/eviction/writeback is made.
1819         */
1820        trans = btrfs_attach_transaction(root);
1821        if (IS_ERR(trans)) {
1822                if (PTR_ERR(trans) != -ENOENT)
1823                        btrfs_err(root->fs_info,
1824                                  "cleaner transaction attach returned %ld",
1825                                  PTR_ERR(trans));
1826        } else {
1827                int ret;
1828
1829                ret = btrfs_commit_transaction(trans, root);
1830                if (ret)
1831                        btrfs_err(root->fs_info,
1832                                  "cleaner open transaction commit returned %d",
1833                                  ret);
1834        }
1835
1836        return 0;
1837}
1838
1839static int transaction_kthread(void *arg)
1840{
1841        struct btrfs_root *root = arg;
1842        struct btrfs_trans_handle *trans;
1843        struct btrfs_transaction *cur;
1844        u64 transid;
1845        unsigned long now;
1846        unsigned long delay;
1847        bool cannot_commit;
1848
1849        do {
1850                cannot_commit = false;
1851                delay = HZ * root->fs_info->commit_interval;
1852                mutex_lock(&root->fs_info->transaction_kthread_mutex);
1853
1854                spin_lock(&root->fs_info->trans_lock);
1855                cur = root->fs_info->running_transaction;
1856                if (!cur) {
1857                        spin_unlock(&root->fs_info->trans_lock);
1858                        goto sleep;
1859                }
1860
1861                now = get_seconds();
1862                if (cur->state < TRANS_STATE_BLOCKED &&
1863                    (now < cur->start_time ||
1864                     now - cur->start_time < root->fs_info->commit_interval)) {
1865                        spin_unlock(&root->fs_info->trans_lock);
1866                        delay = HZ * 5;
1867                        goto sleep;
1868                }
1869                transid = cur->transid;
1870                spin_unlock(&root->fs_info->trans_lock);
1871
1872                /* If the file system is aborted, this will always fail. */
1873                trans = btrfs_attach_transaction(root);
1874                if (IS_ERR(trans)) {
1875                        if (PTR_ERR(trans) != -ENOENT)
1876                                cannot_commit = true;
1877                        goto sleep;
1878                }
1879                if (transid == trans->transid) {
1880                        btrfs_commit_transaction(trans, root);
1881                } else {
1882                        btrfs_end_transaction(trans, root);
1883                }
1884sleep:
1885                wake_up_process(root->fs_info->cleaner_kthread);
1886                mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1887
1888                if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1889                                      &root->fs_info->fs_state)))
1890                        btrfs_cleanup_transaction(root);
1891                if (!try_to_freeze()) {
1892                        set_current_state(TASK_INTERRUPTIBLE);
1893                        if (!kthread_should_stop() &&
1894                            (!btrfs_transaction_blocked(root->fs_info) ||
1895                             cannot_commit))
1896                                schedule_timeout(delay);
1897                        __set_current_state(TASK_RUNNING);
1898                }
1899        } while (!kthread_should_stop());
1900        return 0;
1901}
1902
1903/*
1904 * this will find the highest generation in the array of
1905 * root backups.  The index of the highest array is returned,
1906 * or -1 if we can't find anything.
1907 *
1908 * We check to make sure the array is valid by comparing the
1909 * generation of the latest  root in the array with the generation
1910 * in the super block.  If they don't match we pitch it.
1911 */
1912static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1913{
1914        u64 cur;
1915        int newest_index = -1;
1916        struct btrfs_root_backup *root_backup;
1917        int i;
1918
1919        for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1920                root_backup = info->super_copy->super_roots + i;
1921                cur = btrfs_backup_tree_root_gen(root_backup);
1922                if (cur == newest_gen)
1923                        newest_index = i;
1924        }
1925
1926        /* check to see if we actually wrapped around */
1927        if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1928                root_backup = info->super_copy->super_roots;
1929                cur = btrfs_backup_tree_root_gen(root_backup);
1930                if (cur == newest_gen)
1931                        newest_index = 0;
1932        }
1933        return newest_index;
1934}
1935
1936
1937/*
1938 * find the oldest backup so we know where to store new entries
1939 * in the backup array.  This will set the backup_root_index
1940 * field in the fs_info struct
1941 */
1942static void find_oldest_super_backup(struct btrfs_fs_info *info,
1943                                     u64 newest_gen)
1944{
1945        int newest_index = -1;
1946
1947        newest_index = find_newest_super_backup(info, newest_gen);
1948        /* if there was garbage in there, just move along */
1949        if (newest_index == -1) {
1950                info->backup_root_index = 0;
1951        } else {
1952                info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1953        }
1954}
1955
1956/*
1957 * copy all the root pointers into the super backup array.
1958 * this will bump the backup pointer by one when it is
1959 * done
1960 */
1961static void backup_super_roots(struct btrfs_fs_info *info)
1962{
1963        int next_backup;
1964        struct btrfs_root_backup *root_backup;
1965        int last_backup;
1966
1967        next_backup = info->backup_root_index;
1968        last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1969                BTRFS_NUM_BACKUP_ROOTS;
1970
1971        /*
1972         * just overwrite the last backup if we're at the same generation
1973         * this happens only at umount
1974         */
1975        root_backup = info->super_for_commit->super_roots + last_backup;
1976        if (btrfs_backup_tree_root_gen(root_backup) ==
1977            btrfs_header_generation(info->tree_root->node))
1978                next_backup = last_backup;
1979
1980        root_backup = info->super_for_commit->super_roots + next_backup;
1981
1982        /*
1983         * make sure all of our padding and empty slots get zero filled
1984         * regardless of which ones we use today
1985         */
1986        memset(root_backup, 0, sizeof(*root_backup));
1987
1988        info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1989
1990        btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1991        btrfs_set_backup_tree_root_gen(root_backup,
1992                               btrfs_header_generation(info->tree_root->node));
1993
1994        btrfs_set_backup_tree_root_level(root_backup,
1995                               btrfs_header_level(info->tree_root->node));
1996
1997        btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1998        btrfs_set_backup_chunk_root_gen(root_backup,
1999                               btrfs_header_generation(info->chunk_root->node));
2000        btrfs_set_backup_chunk_root_level(root_backup,
2001                               btrfs_header_level(info->chunk_root->node));
2002
2003        btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
2004        btrfs_set_backup_extent_root_gen(root_backup,
2005                               btrfs_header_generation(info->extent_root->node));
2006        btrfs_set_backup_extent_root_level(root_backup,
2007                               btrfs_header_level(info->extent_root->node));
2008
2009        /*
2010         * we might commit during log recovery, which happens before we set
2011         * the fs_root.  Make sure it is valid before we fill it in.
2012         */
2013        if (info->fs_root && info->fs_root->node) {
2014                btrfs_set_backup_fs_root(root_backup,
2015                                         info->fs_root->node->start);
2016                btrfs_set_backup_fs_root_gen(root_backup,
2017                               btrfs_header_generation(info->fs_root->node));
2018                btrfs_set_backup_fs_root_level(root_backup,
2019                               btrfs_header_level(info->fs_root->node));
2020        }
2021
2022        btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
2023        btrfs_set_backup_dev_root_gen(root_backup,
2024                               btrfs_header_generation(info->dev_root->node));
2025        btrfs_set_backup_dev_root_level(root_backup,
2026                                       btrfs_header_level(info->dev_root->node));
2027
2028        btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
2029        btrfs_set_backup_csum_root_gen(root_backup,
2030                               btrfs_header_generation(info->csum_root->node));
2031        btrfs_set_backup_csum_root_level(root_backup,
2032                               btrfs_header_level(info->csum_root->node));
2033
2034        btrfs_set_backup_total_bytes(root_backup,
2035                             btrfs_super_total_bytes(info->super_copy));
2036        btrfs_set_backup_bytes_used(root_backup,
2037                             btrfs_super_bytes_used(info->super_copy));
2038        btrfs_set_backup_num_devices(root_backup,
2039                             btrfs_super_num_devices(info->super_copy));
2040
2041        /*
2042         * if we don't copy this out to the super_copy, it won't get remembered
2043         * for the next commit
2044         */
2045        memcpy(&info->super_copy->super_roots,
2046               &info->super_for_commit->super_roots,
2047               sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
2048}
2049
2050/*
2051 * this copies info out of the root backup array and back into
2052 * the in-memory super block.  It is meant to help iterate through
2053 * the array, so you send it the number of backups you've already
2054 * tried and the last backup index you used.
2055 *
2056 * this returns -1 when it has tried all the backups
2057 */
2058static noinline int next_root_backup(struct btrfs_fs_info *info,
2059                                     struct btrfs_super_block *super,
2060                                     int *num_backups_tried, int *backup_index)
2061{
2062        struct btrfs_root_backup *root_backup;
2063        int newest = *backup_index;
2064
2065        if (*num_backups_tried == 0) {
2066                u64 gen = btrfs_super_generation(super);
2067
2068                newest = find_newest_super_backup(info, gen);
2069                if (newest == -1)
2070                        return -1;
2071
2072                *backup_index = newest;
2073                *num_backups_tried = 1;
2074        } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
2075                /* we've tried all the backups, all done */
2076                return -1;
2077        } else {
2078                /* jump to the next oldest backup */
2079                newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
2080                        BTRFS_NUM_BACKUP_ROOTS;
2081                *backup_index = newest;
2082                *num_backups_tried += 1;
2083        }
2084        root_backup = super->super_roots + newest;
2085
2086        btrfs_set_super_generation(super,
2087                                   btrfs_backup_tree_root_gen(root_backup));
2088        btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
2089        btrfs_set_super_root_level(super,
2090                                   btrfs_backup_tree_root_level(root_backup));
2091        btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
2092
2093        /*
2094         * fixme: the total bytes and num_devices need to match or we should
2095         * need a fsck
2096         */
2097        btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2098        btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2099        return 0;
2100}
2101
2102/* helper to cleanup workers */
2103static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2104{
2105        btrfs_destroy_workqueue(fs_info->fixup_workers);
2106        btrfs_destroy_workqueue(fs_info->delalloc_workers);
2107        btrfs_destroy_workqueue(fs_info->workers);
2108        btrfs_destroy_workqueue(fs_info->endio_workers);
2109        btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2110        btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2111        btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2112        btrfs_destroy_workqueue(fs_info->rmw_workers);
2113        btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2114        btrfs_destroy_workqueue(fs_info->endio_write_workers);
2115        btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2116        btrfs_destroy_workqueue(fs_info->submit_workers);
2117        btrfs_destroy_workqueue(fs_info->delayed_workers);
2118        btrfs_destroy_workqueue(fs_info->caching_workers);
2119        btrfs_destroy_workqueue(fs_info->readahead_workers);
2120        btrfs_destroy_workqueue(fs_info->flush_workers);
2121        btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2122        btrfs_destroy_workqueue(fs_info->extent_workers);
2123}
2124
2125static void free_root_extent_buffers(struct btrfs_root *root)
2126{
2127        if (root) {
2128                free_extent_buffer(root->node);
2129                free_extent_buffer(root->commit_root);
2130                root->node = NULL;
2131                root->commit_root = NULL;
2132        }
2133}
2134
2135/* helper to cleanup tree roots */
2136static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2137{
2138        free_root_extent_buffers(info->tree_root);
2139
2140        free_root_extent_buffers(info->dev_root);
2141        free_root_extent_buffers(info->extent_root);
2142        free_root_extent_buffers(info->csum_root);
2143        free_root_extent_buffers(info->quota_root);
2144        free_root_extent_buffers(info->uuid_root);
2145        if (chunk_root)
2146                free_root_extent_buffers(info->chunk_root);
2147}
2148
2149void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2150{
2151        int ret;
2152        struct btrfs_root *gang[8];
2153        int i;
2154
2155        while (!list_empty(&fs_info->dead_roots)) {
2156                gang[0] = list_entry(fs_info->dead_roots.next,
2157                                     struct btrfs_root, root_list);
2158                list_del(&gang[0]->root_list);
2159
2160                if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2161                        btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2162                } else {
2163                        free_extent_buffer(gang[0]->node);
2164                        free_extent_buffer(gang[0]->commit_root);
2165                        btrfs_put_fs_root(gang[0]);
2166                }
2167        }
2168
2169        while (1) {
2170                ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2171                                             (void **)gang, 0,
2172                                             ARRAY_SIZE(gang));
2173                if (!ret)
2174                        break;
2175                for (i = 0; i < ret; i++)
2176                        btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2177        }
2178
2179        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2180                btrfs_free_log_root_tree(NULL, fs_info);
2181                btrfs_destroy_pinned_extent(fs_info->tree_root,
2182                                            fs_info->pinned_extents);
2183        }
2184}
2185
2186static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2187{
2188        mutex_init(&fs_info->scrub_lock);
2189        atomic_set(&fs_info->scrubs_running, 0);
2190        atomic_set(&fs_info->scrub_pause_req, 0);
2191        atomic_set(&fs_info->scrubs_paused, 0);
2192        atomic_set(&fs_info->scrub_cancel_req, 0);
2193        init_waitqueue_head(&fs_info->scrub_pause_wait);
2194        fs_info->scrub_workers_refcnt = 0;
2195}
2196
2197static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2198{
2199        spin_lock_init(&fs_info->balance_lock);
2200        mutex_init(&fs_info->balance_mutex);
2201        atomic_set(&fs_info->balance_running, 0);
2202        atomic_set(&fs_info->balance_pause_req, 0);
2203        atomic_set(&fs_info->balance_cancel_req, 0);
2204        fs_info->balance_ctl = NULL;
2205        init_waitqueue_head(&fs_info->balance_wait_q);
2206}
2207
2208static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info,
2209                                   struct btrfs_root *tree_root)
2210{
2211        fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2212        set_nlink(fs_info->btree_inode, 1);
2213        /*
2214         * we set the i_size on the btree inode to the max possible int.
2215         * the real end of the address space is determined by all of
2216         * the devices in the system
2217         */
2218        fs_info->btree_inode->i_size = OFFSET_MAX;
2219        fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2220
2221        RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2222        extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2223                             fs_info->btree_inode->i_mapping);
2224        BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2225        extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2226
2227        BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2228
2229        BTRFS_I(fs_info->btree_inode)->root = tree_root;
2230        memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2231               sizeof(struct btrfs_key));
2232        set_bit(BTRFS_INODE_DUMMY,
2233                &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2234        btrfs_insert_inode_hash(fs_info->btree_inode);
2235}
2236
2237static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2238{
2239        fs_info->dev_replace.lock_owner = 0;
2240        atomic_set(&fs_info->dev_replace.nesting_level, 0);
2241        mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2242        mutex_init(&fs_info->dev_replace.lock_management_lock);
2243        mutex_init(&fs_info->dev_replace.lock);
2244        init_waitqueue_head(&fs_info->replace_wait);
2245}
2246
2247static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2248{
2249        spin_lock_init(&fs_info->qgroup_lock);
2250        mutex_init(&fs_info->qgroup_ioctl_lock);
2251        fs_info->qgroup_tree = RB_ROOT;
2252        fs_info->qgroup_op_tree = RB_ROOT;
2253        INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2254        fs_info->qgroup_seq = 1;
2255        fs_info->quota_enabled = 0;
2256        fs_info->pending_quota_state = 0;
2257        fs_info->qgroup_ulist = NULL;
2258        mutex_init(&fs_info->qgroup_rescan_lock);
2259}
2260
2261static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2262                struct btrfs_fs_devices *fs_devices)
2263{
2264        int max_active = fs_info->thread_pool_size;
2265        unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2266
2267        fs_info->workers =
2268                btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI,
2269                                      max_active, 16);
2270
2271        fs_info->delalloc_workers =
2272                btrfs_alloc_workqueue("delalloc", flags, max_active, 2);
2273
2274        fs_info->flush_workers =
2275                btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0);
2276
2277        fs_info->caching_workers =
2278                btrfs_alloc_workqueue("cache", flags, max_active, 0);
2279
2280        /*
2281         * a higher idle thresh on the submit workers makes it much more
2282         * likely that bios will be send down in a sane order to the
2283         * devices
2284         */
2285        fs_info->submit_workers =
2286                btrfs_alloc_workqueue("submit", flags,
2287                                      min_t(u64, fs_devices->num_devices,
2288                                            max_active), 64);
2289
2290        fs_info->fixup_workers =
2291                btrfs_alloc_workqueue("fixup", flags, 1, 0);
2292
2293        /*
2294         * endios are largely parallel and should have a very
2295         * low idle thresh
2296         */
2297        fs_info->endio_workers =
2298                btrfs_alloc_workqueue("endio", flags, max_active, 4);
2299        fs_info->endio_meta_workers =
2300                btrfs_alloc_workqueue("endio-meta", flags, max_active, 4);
2301        fs_info->endio_meta_write_workers =
2302                btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2);
2303        fs_info->endio_raid56_workers =
2304                btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4);
2305        fs_info->endio_repair_workers =
2306                btrfs_alloc_workqueue("endio-repair", flags, 1, 0);
2307        fs_info->rmw_workers =
2308                btrfs_alloc_workqueue("rmw", flags, max_active, 2);
2309        fs_info->endio_write_workers =
2310                btrfs_alloc_workqueue("endio-write", flags, max_active, 2);
2311        fs_info->endio_freespace_worker =
2312                btrfs_alloc_workqueue("freespace-write", flags, max_active, 0);
2313        fs_info->delayed_workers =
2314                btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0);
2315        fs_info->readahead_workers =
2316                btrfs_alloc_workqueue("readahead", flags, max_active, 2);
2317        fs_info->qgroup_rescan_workers =
2318                btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0);
2319        fs_info->extent_workers =
2320                btrfs_alloc_workqueue("extent-refs", flags,
2321                                      min_t(u64, fs_devices->num_devices,
2322                                            max_active), 8);
2323
2324        if (!(fs_info->workers && fs_info->delalloc_workers &&
2325              fs_info->submit_workers && fs_info->flush_workers &&
2326              fs_info->endio_workers && fs_info->endio_meta_workers &&
2327              fs_info->endio_meta_write_workers &&
2328              fs_info->endio_repair_workers &&
2329              fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2330              fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2331              fs_info->caching_workers && fs_info->readahead_workers &&
2332              fs_info->fixup_workers && fs_info->delayed_workers &&
2333              fs_info->extent_workers &&
2334              fs_info->qgroup_rescan_workers)) {
2335                return -ENOMEM;
2336        }
2337
2338        return 0;
2339}
2340
2341static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2342                            struct btrfs_fs_devices *fs_devices)
2343{
2344        int ret;
2345        struct btrfs_root *tree_root = fs_info->tree_root;
2346        struct btrfs_root *log_tree_root;
2347        struct btrfs_super_block *disk_super = fs_info->super_copy;
2348        u64 bytenr = btrfs_super_log_root(disk_super);
2349
2350        if (fs_devices->rw_devices == 0) {
2351                printk(KERN_WARNING "BTRFS: log replay required "
2352                       "on RO media\n");
2353                return -EIO;
2354        }
2355
2356        log_tree_root = btrfs_alloc_root(fs_info);
2357        if (!log_tree_root)
2358                return -ENOMEM;
2359
2360        __setup_root(tree_root->nodesize, tree_root->sectorsize,
2361                        tree_root->stripesize, log_tree_root, fs_info,
2362                        BTRFS_TREE_LOG_OBJECTID);
2363
2364        log_tree_root->node = read_tree_block(tree_root, bytenr,
2365                        fs_info->generation + 1);
2366        if (IS_ERR(log_tree_root->node)) {
2367                printk(KERN_ERR "BTRFS: failed to read log tree\n");
2368                ret = PTR_ERR(log_tree_root->node);
2369                kfree(log_tree_root);
2370                return ret;
2371        } else if (!extent_buffer_uptodate(log_tree_root->node)) {
2372                printk(KERN_ERR "BTRFS: failed to read log tree\n");
2373                free_extent_buffer(log_tree_root->node);
2374                kfree(log_tree_root);
2375                return -EIO;
2376        }
2377        /* returns with log_tree_root freed on success */
2378        ret = btrfs_recover_log_trees(log_tree_root);
2379        if (ret) {
2380                btrfs_error(tree_root->fs_info, ret,
2381                            "Failed to recover log tree");
2382                free_extent_buffer(log_tree_root->node);
2383                kfree(log_tree_root);
2384                return ret;
2385        }
2386
2387        if (fs_info->sb->s_flags & MS_RDONLY) {
2388                ret = btrfs_commit_super(tree_root);
2389                if (ret)
2390                        return ret;
2391        }
2392
2393        return 0;
2394}
2395
2396static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
2397                            struct btrfs_root *tree_root)
2398{
2399        struct btrfs_root *root;
2400        struct btrfs_key location;
2401        int ret;
2402
2403        location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2404        location.type = BTRFS_ROOT_ITEM_KEY;
2405        location.offset = 0;
2406
2407        root = btrfs_read_tree_root(tree_root, &location);
2408        if (IS_ERR(root))
2409                return PTR_ERR(root);
2410        set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2411        fs_info->extent_root = root;
2412
2413        location.objectid = BTRFS_DEV_TREE_OBJECTID;
2414        root = btrfs_read_tree_root(tree_root, &location);
2415        if (IS_ERR(root))
2416                return PTR_ERR(root);
2417        set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2418        fs_info->dev_root = root;
2419        btrfs_init_devices_late(fs_info);
2420
2421        location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2422        root = btrfs_read_tree_root(tree_root, &location);
2423        if (IS_ERR(root))
2424                return PTR_ERR(root);
2425        set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2426        fs_info->csum_root = root;
2427
2428        location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2429        root = btrfs_read_tree_root(tree_root, &location);
2430        if (!IS_ERR(root)) {
2431                set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2432                fs_info->quota_enabled = 1;
2433                fs_info->pending_quota_state = 1;
2434                fs_info->quota_root = root;
2435        }
2436
2437        location.objectid = BTRFS_UUID_TREE_OBJECTID;
2438        root = btrfs_read_tree_root(tree_root, &location);
2439        if (IS_ERR(root)) {
2440                ret = PTR_ERR(root);
2441                if (ret != -ENOENT)
2442                        return ret;
2443        } else {
2444                set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2445                fs_info->uuid_root = root;
2446        }
2447
2448        return 0;
2449}
2450
2451int open_ctree(struct super_block *sb,
2452               struct btrfs_fs_devices *fs_devices,
2453               char *options)
2454{
2455        u32 sectorsize;
2456        u32 nodesize;
2457        u32 stripesize;
2458        u64 generation;
2459        u64 features;
2460        struct btrfs_key location;
2461        struct buffer_head *bh;
2462        struct btrfs_super_block *disk_super;
2463        struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2464        struct btrfs_root *tree_root;
2465        struct btrfs_root *chunk_root;
2466        int ret;
2467        int err = -EINVAL;
2468        int num_backups_tried = 0;
2469        int backup_index = 0;
2470        int max_active;
2471
2472        tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
2473        chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
2474        if (!tree_root || !chunk_root) {
2475                err = -ENOMEM;
2476                goto fail;
2477        }
2478
2479        ret = init_srcu_struct(&fs_info->subvol_srcu);
2480        if (ret) {
2481                err = ret;
2482                goto fail;
2483        }
2484
2485        ret = setup_bdi(fs_info, &fs_info->bdi);
2486        if (ret) {
2487                err = ret;
2488                goto fail_srcu;
2489        }
2490
2491        ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2492        if (ret) {
2493                err = ret;
2494                goto fail_bdi;
2495        }
2496        fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
2497                                        (1 + ilog2(nr_cpu_ids));
2498
2499        ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2500        if (ret) {
2501                err = ret;
2502                goto fail_dirty_metadata_bytes;
2503        }
2504
2505        ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
2506        if (ret) {
2507                err = ret;
2508                goto fail_delalloc_bytes;
2509        }
2510
2511        fs_info->btree_inode = new_inode(sb);
2512        if (!fs_info->btree_inode) {
2513                err = -ENOMEM;
2514                goto fail_bio_counter;
2515        }
2516
2517        mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2518
2519        INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2520        INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2521        INIT_LIST_HEAD(&fs_info->trans_list);
2522        INIT_LIST_HEAD(&fs_info->dead_roots);
2523        INIT_LIST_HEAD(&fs_info->delayed_iputs);
2524        INIT_LIST_HEAD(&fs_info->delalloc_roots);
2525        INIT_LIST_HEAD(&fs_info->caching_block_groups);
2526        spin_lock_init(&fs_info->delalloc_root_lock);
2527        spin_lock_init(&fs_info->trans_lock);
2528        spin_lock_init(&fs_info->fs_roots_radix_lock);
2529        spin_lock_init(&fs_info->delayed_iput_lock);
2530        spin_lock_init(&fs_info->defrag_inodes_lock);
2531        spin_lock_init(&fs_info->free_chunk_lock);
2532        spin_lock_init(&fs_info->tree_mod_seq_lock);
2533        spin_lock_init(&fs_info->super_lock);
2534        spin_lock_init(&fs_info->qgroup_op_lock);
2535        spin_lock_init(&fs_info->buffer_lock);
2536        spin_lock_init(&fs_info->unused_bgs_lock);
2537        rwlock_init(&fs_info->tree_mod_log_lock);
2538        mutex_init(&fs_info->unused_bg_unpin_mutex);
2539        mutex_init(&fs_info->delete_unused_bgs_mutex);
2540        mutex_init(&fs_info->reloc_mutex);
2541        mutex_init(&fs_info->delalloc_root_mutex);
2542        seqlock_init(&fs_info->profiles_lock);
2543        init_rwsem(&fs_info->delayed_iput_sem);
2544
2545        INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2546        INIT_LIST_HEAD(&fs_info->space_info);
2547        INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2548        INIT_LIST_HEAD(&fs_info->unused_bgs);
2549        btrfs_mapping_init(&fs_info->mapping_tree);
2550        btrfs_init_block_rsv(&fs_info->global_block_rsv,
2551                             BTRFS_BLOCK_RSV_GLOBAL);
2552        btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2553                             BTRFS_BLOCK_RSV_DELALLOC);
2554        btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2555        btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2556        btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2557        btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2558                             BTRFS_BLOCK_RSV_DELOPS);
2559        atomic_set(&fs_info->nr_async_submits, 0);
2560        atomic_set(&fs_info->async_delalloc_pages, 0);
2561        atomic_set(&fs_info->async_submit_draining, 0);
2562        atomic_set(&fs_info->nr_async_bios, 0);
2563        atomic_set(&fs_info->defrag_running, 0);
2564        atomic_set(&fs_info->qgroup_op_seq, 0);
2565        atomic64_set(&fs_info->tree_mod_seq, 0);
2566        fs_info->sb = sb;
2567        fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2568        fs_info->metadata_ratio = 0;
2569        fs_info->defrag_inodes = RB_ROOT;
2570        fs_info->free_chunk_space = 0;
2571        fs_info->tree_mod_log = RB_ROOT;
2572        fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2573        fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2574        /* readahead state */
2575        INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
2576        spin_lock_init(&fs_info->reada_lock);
2577
2578        fs_info->thread_pool_size = min_t(unsigned long,
2579                                          num_online_cpus() + 2, 8);
2580
2581        INIT_LIST_HEAD(&fs_info->ordered_roots);
2582        spin_lock_init(&fs_info->ordered_root_lock);
2583        fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2584                                        GFP_NOFS);
2585        if (!fs_info->delayed_root) {
2586                err = -ENOMEM;
2587                goto fail_iput;
2588        }
2589        btrfs_init_delayed_root(fs_info->delayed_root);
2590
2591        btrfs_init_scrub(fs_info);
2592#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2593        fs_info->check_integrity_print_mask = 0;
2594#endif
2595        btrfs_init_balance(fs_info);
2596        btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2597
2598        sb->s_blocksize = 4096;
2599        sb->s_blocksize_bits = blksize_bits(4096);
2600        sb->s_bdi = &fs_info->bdi;
2601
2602        btrfs_init_btree_inode(fs_info, tree_root);
2603
2604        spin_lock_init(&fs_info->block_group_cache_lock);
2605        fs_info->block_group_cache_tree = RB_ROOT;
2606        fs_info->first_logical_byte = (u64)-1;
2607
2608        extent_io_tree_init(&fs_info->freed_extents[0],
2609                             fs_info->btree_inode->i_mapping);
2610        extent_io_tree_init(&fs_info->freed_extents[1],
2611                             fs_info->btree_inode->i_mapping);
2612        fs_info->pinned_extents = &fs_info->freed_extents[0];
2613        fs_info->do_barriers = 1;
2614
2615
2616        mutex_init(&fs_info->ordered_operations_mutex);
2617        mutex_init(&fs_info->tree_log_mutex);
2618        mutex_init(&fs_info->chunk_mutex);
2619        mutex_init(&fs_info->transaction_kthread_mutex);
2620        mutex_init(&fs_info->cleaner_mutex);
2621        mutex_init(&fs_info->volume_mutex);
2622        mutex_init(&fs_info->ro_block_group_mutex);
2623        init_rwsem(&fs_info->commit_root_sem);
2624        init_rwsem(&fs_info->cleanup_work_sem);
2625        init_rwsem(&fs_info->subvol_sem);
2626        sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2627
2628        btrfs_init_dev_replace_locks(fs_info);
2629        btrfs_init_qgroup(fs_info);
2630
2631        btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2632        btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2633
2634        init_waitqueue_head(&fs_info->transaction_throttle);
2635        init_waitqueue_head(&fs_info->transaction_wait);
2636        init_waitqueue_head(&fs_info->transaction_blocked_wait);
2637        init_waitqueue_head(&fs_info->async_submit_wait);
2638
2639        INIT_LIST_HEAD(&fs_info->pinned_chunks);
2640
2641        ret = btrfs_alloc_stripe_hash_table(fs_info);
2642        if (ret) {
2643                err = ret;
2644                goto fail_alloc;
2645        }
2646
2647        __setup_root(4096, 4096, 4096, tree_root,
2648                     fs_info, BTRFS_ROOT_TREE_OBJECTID);
2649
2650        invalidate_bdev(fs_devices->latest_bdev);
2651
2652        /*
2653         * Read super block and check the signature bytes only
2654         */
2655        bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2656        if (!bh) {
2657                err = -EINVAL;
2658                goto fail_alloc;
2659        }
2660
2661        /*
2662         * We want to check superblock checksum, the type is stored inside.
2663         * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2664         */
2665        if (btrfs_check_super_csum(bh->b_data)) {
2666                printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
2667                err = -EINVAL;
2668                goto fail_alloc;
2669        }
2670
2671        /*
2672         * super_copy is zeroed at allocation time and we never touch the
2673         * following bytes up to INFO_SIZE, the checksum is calculated from
2674         * the whole block of INFO_SIZE
2675         */
2676        memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2677        memcpy(fs_info->super_for_commit, fs_info->super_copy,
2678               sizeof(*fs_info->super_for_commit));
2679        brelse(bh);
2680
2681        memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2682
2683        ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2684        if (ret) {
2685                printk(KERN_ERR "BTRFS: superblock contains fatal errors\n");
2686                err = -EINVAL;
2687                goto fail_alloc;
2688        }
2689
2690        disk_super = fs_info->super_copy;
2691        if (!btrfs_super_root(disk_super))
2692                goto fail_alloc;
2693
2694        /* check FS state, whether FS is broken. */
2695        if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2696                set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2697
2698        /*
2699         * run through our array of backup supers and setup
2700         * our ring pointer to the oldest one
2701         */
2702        generation = btrfs_super_generation(disk_super);
2703        find_oldest_super_backup(fs_info, generation);
2704
2705        /*
2706         * In the long term, we'll store the compression type in the super
2707         * block, and it'll be used for per file compression control.
2708         */
2709        fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2710
2711        ret = btrfs_parse_options(tree_root, options);
2712        if (ret) {
2713                err = ret;
2714                goto fail_alloc;
2715        }
2716
2717        features = btrfs_super_incompat_flags(disk_super) &
2718                ~BTRFS_FEATURE_INCOMPAT_SUPP;
2719        if (features) {
2720                printk(KERN_ERR "BTRFS: couldn't mount because of "
2721                       "unsupported optional features (%Lx).\n",
2722                       features);
2723                err = -EINVAL;
2724                goto fail_alloc;
2725        }
2726
2727        /*
2728         * Leafsize and nodesize were always equal, this is only a sanity check.
2729         */
2730        if (le32_to_cpu(disk_super->__unused_leafsize) !=
2731            btrfs_super_nodesize(disk_super)) {
2732                printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2733                       "blocksizes don't match.  node %d leaf %d\n",
2734                       btrfs_super_nodesize(disk_super),
2735                       le32_to_cpu(disk_super->__unused_leafsize));
2736                err = -EINVAL;
2737                goto fail_alloc;
2738        }
2739        if (btrfs_super_nodesize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2740                printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2741                       "blocksize (%d) was too large\n",
2742                       btrfs_super_nodesize(disk_super));
2743                err = -EINVAL;
2744                goto fail_alloc;
2745        }
2746
2747        features = btrfs_super_incompat_flags(disk_super);
2748        features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2749        if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2750                features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2751
2752        if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2753                printk(KERN_INFO "BTRFS: has skinny extents\n");
2754
2755        /*
2756         * flag our filesystem as having big metadata blocks if
2757         * they are bigger than the page size
2758         */
2759        if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) {
2760                if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2761                        printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
2762                features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2763        }
2764
2765        nodesize = btrfs_super_nodesize(disk_super);
2766        sectorsize = btrfs_super_sectorsize(disk_super);
2767        stripesize = btrfs_super_stripesize(disk_super);
2768        fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2769        fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2770
2771        /*
2772         * mixed block groups end up with duplicate but slightly offset
2773         * extent buffers for the same range.  It leads to corruptions
2774         */
2775        if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2776            (sectorsize != nodesize)) {
2777                printk(KERN_ERR "BTRFS: unequal leaf/node/sector sizes "
2778                                "are not allowed for mixed block groups on %s\n",
2779                                sb->s_id);
2780                goto fail_alloc;
2781        }
2782
2783        /*
2784         * Needn't use the lock because there is no other task which will
2785         * update the flag.
2786         */
2787        btrfs_set_super_incompat_flags(disk_super, features);
2788
2789        features = btrfs_super_compat_ro_flags(disk_super) &
2790                ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2791        if (!(sb->s_flags & MS_RDONLY) && features) {
2792                printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2793                       "unsupported option features (%Lx).\n",
2794                       features);
2795                err = -EINVAL;
2796                goto fail_alloc;
2797        }
2798
2799        max_active = fs_info->thread_pool_size;
2800
2801        ret = btrfs_init_workqueues(fs_info, fs_devices);
2802        if (ret) {
2803                err = ret;
2804                goto fail_sb_buffer;
2805        }
2806
2807        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2808        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2809                                    4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2810
2811        tree_root->nodesize = nodesize;
2812        tree_root->sectorsize = sectorsize;
2813        tree_root->stripesize = stripesize;
2814
2815        sb->s_blocksize = sectorsize;
2816        sb->s_blocksize_bits = blksize_bits(sectorsize);
2817
2818        if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
2819                printk(KERN_ERR "BTRFS: valid FS not found on %s\n", sb->s_id);
2820                goto fail_sb_buffer;
2821        }
2822
2823        if (sectorsize != PAGE_SIZE) {
2824                printk(KERN_ERR "BTRFS: incompatible sector size (%lu) "
2825                       "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2826                goto fail_sb_buffer;
2827        }
2828
2829        mutex_lock(&fs_info->chunk_mutex);
2830        ret = btrfs_read_sys_array(tree_root);
2831        mutex_unlock(&fs_info->chunk_mutex);
2832        if (ret) {
2833                printk(KERN_ERR "BTRFS: failed to read the system "
2834                       "array on %s\n", sb->s_id);
2835                goto fail_sb_buffer;
2836        }
2837
2838        generation = btrfs_super_chunk_root_generation(disk_super);
2839
2840        __setup_root(nodesize, sectorsize, stripesize, chunk_root,
2841                     fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2842
2843        chunk_root->node = read_tree_block(chunk_root,
2844                                           btrfs_super_chunk_root(disk_super),
2845                                           generation);
2846        if (IS_ERR(chunk_root->node) ||
2847            !extent_buffer_uptodate(chunk_root->node)) {
2848                printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
2849                       sb->s_id);
2850                if (!IS_ERR(chunk_root->node))
2851                        free_extent_buffer(chunk_root->node);
2852                chunk_root->node = NULL;
2853                goto fail_tree_roots;
2854        }
2855        btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2856        chunk_root->commit_root = btrfs_root_node(chunk_root);
2857
2858        read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2859           btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2860
2861        ret = btrfs_read_chunk_tree(chunk_root);
2862        if (ret) {
2863                printk(KERN_ERR "BTRFS: failed to read chunk tree on %s\n",
2864                       sb->s_id);
2865                goto fail_tree_roots;
2866        }
2867
2868        /*
2869         * keep the device that is marked to be the target device for the
2870         * dev_replace procedure
2871         */
2872        btrfs_close_extra_devices(fs_devices, 0);
2873
2874        if (!fs_devices->latest_bdev) {
2875                printk(KERN_ERR "BTRFS: failed to read devices on %s\n",
2876                       sb->s_id);
2877                goto fail_tree_roots;
2878        }
2879
2880retry_root_backup:
2881        generation = btrfs_super_generation(disk_super);
2882
2883        tree_root->node = read_tree_block(tree_root,
2884                                          btrfs_super_root(disk_super),
2885                                          generation);
2886        if (IS_ERR(tree_root->node) ||
2887            !extent_buffer_uptodate(tree_root->node)) {
2888                printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
2889                       sb->s_id);
2890                if (!IS_ERR(tree_root->node))
2891                        free_extent_buffer(tree_root->node);
2892                tree_root->node = NULL;
2893                goto recovery_tree_root;
2894        }
2895
2896        btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2897        tree_root->commit_root = btrfs_root_node(tree_root);
2898        btrfs_set_root_refs(&tree_root->root_item, 1);
2899
2900        ret = btrfs_read_roots(fs_info, tree_root);
2901        if (ret)
2902                goto recovery_tree_root;
2903
2904        fs_info->generation = generation;
2905        fs_info->last_trans_committed = generation;
2906
2907        ret = btrfs_recover_balance(fs_info);
2908        if (ret) {
2909                printk(KERN_ERR "BTRFS: failed to recover balance\n");
2910                goto fail_block_groups;
2911        }
2912
2913        ret = btrfs_init_dev_stats(fs_info);
2914        if (ret) {
2915                printk(KERN_ERR "BTRFS: failed to init dev_stats: %d\n",
2916                       ret);
2917                goto fail_block_groups;
2918        }
2919
2920        ret = btrfs_init_dev_replace(fs_info);
2921        if (ret) {
2922                pr_err("BTRFS: failed to init dev_replace: %d\n", ret);
2923                goto fail_block_groups;
2924        }
2925
2926        btrfs_close_extra_devices(fs_devices, 1);
2927
2928        ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
2929        if (ret) {
2930                pr_err("BTRFS: failed to init sysfs fsid interface: %d\n", ret);
2931                goto fail_block_groups;
2932        }
2933
2934        ret = btrfs_sysfs_add_device(fs_devices);
2935        if (ret) {
2936                pr_err("BTRFS: failed to init sysfs device interface: %d\n", ret);
2937                goto fail_fsdev_sysfs;
2938        }
2939
2940        ret = btrfs_sysfs_add_one(fs_info);
2941        if (ret) {
2942                pr_err("BTRFS: failed to init sysfs interface: %d\n", ret);
2943                goto fail_fsdev_sysfs;
2944        }
2945
2946        ret = btrfs_init_space_info(fs_info);
2947        if (ret) {
2948                printk(KERN_ERR "BTRFS: Failed to initial space info: %d\n", ret);
2949                goto fail_sysfs;
2950        }
2951
2952        ret = btrfs_read_block_groups(fs_info->extent_root);
2953        if (ret) {
2954                printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret);
2955                goto fail_sysfs;
2956        }
2957        fs_info->num_tolerated_disk_barrier_failures =
2958                btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2959        if (fs_info->fs_devices->missing_devices >
2960             fs_info->num_tolerated_disk_barrier_failures &&
2961            !(sb->s_flags & MS_RDONLY)) {
2962                pr_warn("BTRFS: missing devices(%llu) exceeds the limit(%d), writeable mount is not allowed\n",
2963                        fs_info->fs_devices->missing_devices,
2964                        fs_info->num_tolerated_disk_barrier_failures);
2965                goto fail_sysfs;
2966        }
2967
2968        fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2969                                               "btrfs-cleaner");
2970        if (IS_ERR(fs_info->cleaner_kthread))
2971                goto fail_sysfs;
2972
2973        fs_info->transaction_kthread = kthread_run(transaction_kthread,
2974                                                   tree_root,
2975                                                   "btrfs-transaction");
2976        if (IS_ERR(fs_info->transaction_kthread))
2977                goto fail_cleaner;
2978
2979        if (!btrfs_test_opt(tree_root, SSD) &&
2980            !btrfs_test_opt(tree_root, NOSSD) &&
2981            !fs_info->fs_devices->rotating) {
2982                printk(KERN_INFO "BTRFS: detected SSD devices, enabling SSD "
2983                       "mode\n");
2984                btrfs_set_opt(fs_info->mount_opt, SSD);
2985        }
2986
2987        /*
2988         * Mount does not set all options immediatelly, we can do it now and do
2989         * not have to wait for transaction commit
2990         */
2991        btrfs_apply_pending_changes(fs_info);
2992
2993#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2994        if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2995                ret = btrfsic_mount(tree_root, fs_devices,
2996                                    btrfs_test_opt(tree_root,
2997                                        CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2998                                    1 : 0,
2999                                    fs_info->check_integrity_print_mask);
3000                if (ret)
3001                        printk(KERN_WARNING "BTRFS: failed to initialize"
3002                               " integrity check module %s\n", sb->s_id);
3003        }
3004#endif
3005        ret = btrfs_read_qgroup_config(fs_info);
3006        if (ret)
3007                goto fail_trans_kthread;
3008
3009        /* do not make disk changes in broken FS */
3010        if (btrfs_super_log_root(disk_super) != 0) {
3011                ret = btrfs_replay_log(fs_info, fs_devices);
3012                if (ret) {
3013                        err = ret;
3014                        goto fail_qgroup;
3015                }
3016        }
3017
3018        ret = btrfs_find_orphan_roots(tree_root);
3019        if (ret)
3020                goto fail_qgroup;
3021
3022        if (!(sb->s_flags & MS_RDONLY)) {
3023                ret = btrfs_cleanup_fs_roots(fs_info);
3024                if (ret)
3025                        goto fail_qgroup;
3026
3027                mutex_lock(&fs_info->cleaner_mutex);
3028                ret = btrfs_recover_relocation(tree_root);
3029                mutex_unlock(&fs_info->cleaner_mutex);
3030                if (ret < 0) {
3031                        printk(KERN_WARNING
3032                               "BTRFS: failed to recover relocation\n");
3033                        err = -EINVAL;
3034                        goto fail_qgroup;
3035                }
3036        }
3037
3038        location.objectid = BTRFS_FS_TREE_OBJECTID;
3039        location.type = BTRFS_ROOT_ITEM_KEY;
3040        location.offset = 0;
3041
3042        fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3043        if (IS_ERR(fs_info->fs_root)) {
3044                err = PTR_ERR(fs_info->fs_root);
3045                goto fail_qgroup;
3046        }
3047
3048        if (sb->s_flags & MS_RDONLY)
3049                return 0;
3050
3051        down_read(&fs_info->cleanup_work_sem);
3052        if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3053            (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3054                up_read(&fs_info->cleanup_work_sem);
3055                close_ctree(tree_root);
3056                return ret;
3057        }
3058        up_read(&fs_info->cleanup_work_sem);
3059
3060        ret = btrfs_resume_balance_async(fs_info);
3061        if (ret) {
3062                printk(KERN_WARNING "BTRFS: failed to resume balance\n");
3063                close_ctree(tree_root);
3064                return ret;
3065        }
3066
3067        ret = btrfs_resume_dev_replace_async(fs_info);
3068        if (ret) {
3069                pr_warn("BTRFS: failed to resume dev_replace\n");
3070                close_ctree(tree_root);
3071                return ret;
3072        }
3073
3074        btrfs_qgroup_rescan_resume(fs_info);
3075
3076        if (!fs_info->uuid_root) {
3077                pr_info("BTRFS: creating UUID tree\n");
3078                ret = btrfs_create_uuid_tree(fs_info);
3079                if (ret) {
3080                        pr_warn("BTRFS: failed to create the UUID tree %d\n",
3081                                ret);
3082                        close_ctree(tree_root);
3083                        return ret;
3084                }
3085        } else if (btrfs_test_opt(tree_root, RESCAN_UUID_TREE) ||
3086                   fs_info->generation !=
3087                                btrfs_super_uuid_tree_generation(disk_super)) {
3088                pr_info("BTRFS: checking UUID tree\n");
3089                ret = btrfs_check_uuid_tree(fs_info);
3090                if (ret) {
3091                        pr_warn("BTRFS: failed to check the UUID tree %d\n",
3092                                ret);
3093                        close_ctree(tree_root);
3094                        return ret;
3095                }
3096        } else {
3097                fs_info->update_uuid_tree_gen = 1;
3098        }
3099
3100        fs_info->open = 1;
3101
3102        return 0;
3103
3104fail_qgroup:
3105        btrfs_free_qgroup_config(fs_info);
3106fail_trans_kthread:
3107        kthread_stop(fs_info->transaction_kthread);
3108        btrfs_cleanup_transaction(fs_info->tree_root);
3109        btrfs_free_fs_roots(fs_info);
3110fail_cleaner:
3111        kthread_stop(fs_info->cleaner_kthread);
3112
3113        /*
3114         * make sure we're done with the btree inode before we stop our
3115         * kthreads
3116         */
3117        filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3118
3119fail_sysfs:
3120        btrfs_sysfs_remove_one(fs_info);
3121
3122fail_fsdev_sysfs:
3123        btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3124
3125fail_block_groups:
3126        btrfs_put_block_group_cache(fs_info);
3127        btrfs_free_block_groups(fs_info);
3128
3129fail_tree_roots:
3130        free_root_pointers(fs_info, 1);
3131        invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3132
3133fail_sb_buffer:
3134        btrfs_stop_all_workers(fs_info);
3135fail_alloc:
3136fail_iput:
3137        btrfs_mapping_tree_free(&fs_info->mapping_tree);
3138
3139        iput(fs_info->btree_inode);
3140fail_bio_counter:
3141        percpu_counter_destroy(&fs_info->bio_counter);
3142fail_delalloc_bytes:
3143        percpu_counter_destroy(&fs_info->delalloc_bytes);
3144fail_dirty_metadata_bytes:
3145        percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3146fail_bdi:
3147        bdi_destroy(&fs_info->bdi);
3148fail_srcu:
3149        cleanup_srcu_struct(&fs_info->subvol_srcu);
3150fail:
3151        btrfs_free_stripe_hash_table(fs_info);
3152        btrfs_close_devices(fs_info->fs_devices);
3153        return err;
3154
3155recovery_tree_root:
3156        if (!btrfs_test_opt(tree_root, RECOVERY))
3157                goto fail_tree_roots;
3158
3159        free_root_pointers(fs_info, 0);
3160
3161        /* don't use the log in recovery mode, it won't be valid */
3162        btrfs_set_super_log_root(disk_super, 0);
3163
3164        /* we can't trust the free space cache either */
3165        btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3166
3167        ret = next_root_backup(fs_info, fs_info->super_copy,
3168                               &num_backups_tried, &backup_index);
3169        if (ret == -1)
3170                goto fail_block_groups;
3171        goto retry_root_backup;
3172}
3173
3174static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3175{
3176        if (uptodate) {
3177                set_buffer_uptodate(bh);
3178        } else {
3179                struct btrfs_device *device = (struct btrfs_device *)
3180                        bh->b_private;
3181
3182                printk_ratelimited_in_rcu(KERN_WARNING "BTRFS: lost page write due to "
3183                                          "I/O error on %s\n",
3184                                          rcu_str_deref(device->name));
3185                /* note, we dont' set_buffer_write_io_error because we have
3186                 * our own ways of dealing with the IO errors
3187                 */
3188                clear_buffer_uptodate(bh);
3189                btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3190        }
3191        unlock_buffer(bh);
3192        put_bh(bh);
3193}
3194
3195struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3196{
3197        struct buffer_head *bh;
3198        struct buffer_head *latest = NULL;
3199        struct btrfs_super_block *super;
3200        int i;
3201        u64 transid = 0;
3202        u64 bytenr;
3203
3204        /* we would like to check all the supers, but that would make
3205         * a btrfs mount succeed after a mkfs from a different FS.
3206         * So, we need to add a special mount option to scan for
3207         * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3208         */
3209        for (i = 0; i < 1; i++) {
3210                bytenr = btrfs_sb_offset(i);
3211                if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3212                                        i_size_read(bdev->bd_inode))
3213                        break;
3214                bh = __bread(bdev, bytenr / 4096,
3215                                        BTRFS_SUPER_INFO_SIZE);
3216                if (!bh)
3217                        continue;
3218
3219                super = (struct btrfs_super_block *)bh->b_data;
3220                if (btrfs_super_bytenr(super) != bytenr ||
3221                    btrfs_super_magic(super) != BTRFS_MAGIC) {
3222                        brelse(bh);
3223                        continue;
3224                }
3225
3226                if (!latest || btrfs_super_generation(super) > transid) {
3227                        brelse(latest);
3228                        latest = bh;
3229                        transid = btrfs_super_generation(super);
3230                } else {
3231                        brelse(bh);
3232                }
3233        }
3234        return latest;
3235}
3236
3237/*
3238 * this should be called twice, once with wait == 0 and
3239 * once with wait == 1.  When wait == 0 is done, all the buffer heads
3240 * we write are pinned.
3241 *
3242 * They are released when wait == 1 is done.
3243 * max_mirrors must be the same for both runs, and it indicates how
3244 * many supers on this one device should be written.
3245 *
3246 * max_mirrors == 0 means to write them all.
3247 */
3248static int write_dev_supers(struct btrfs_device *device,
3249                            struct btrfs_super_block *sb,
3250                            int do_barriers, int wait, int max_mirrors)
3251{
3252        struct buffer_head *bh;
3253        int i;
3254        int ret;
3255        int errors = 0;
3256        u32 crc;
3257        u64 bytenr;
3258
3259        if (max_mirrors == 0)
3260                max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3261
3262        for (i = 0; i < max_mirrors; i++) {
3263                bytenr = btrfs_sb_offset(i);
3264                if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3265                    device->commit_total_bytes)
3266                        break;
3267
3268                if (wait) {
3269                        bh = __find_get_block(device->bdev, bytenr / 4096,
3270                                              BTRFS_SUPER_INFO_SIZE);
3271                        if (!bh) {
3272                                errors++;
3273                                continue;
3274                        }
3275                        wait_on_buffer(bh);
3276                        if (!buffer_uptodate(bh))
3277                                errors++;
3278
3279                        /* drop our reference */
3280                        brelse(bh);
3281
3282                        /* drop the reference from the wait == 0 run */
3283                        brelse(bh);
3284                        continue;
3285                } else {
3286                        btrfs_set_super_bytenr(sb, bytenr);
3287
3288                        crc = ~(u32)0;
3289                        crc = btrfs_csum_data((char *)sb +
3290                                              BTRFS_CSUM_SIZE, crc,
3291                                              BTRFS_SUPER_INFO_SIZE -
3292                                              BTRFS_CSUM_SIZE);
3293                        btrfs_csum_final(crc, sb->csum);
3294
3295                        /*
3296                         * one reference for us, and we leave it for the
3297                         * caller
3298                         */
3299                        bh = __getblk(device->bdev, bytenr / 4096,
3300                                      BTRFS_SUPER_INFO_SIZE);
3301                        if (!bh) {
3302                                printk(KERN_ERR "BTRFS: couldn't get super "
3303                                       "buffer head for bytenr %Lu\n", bytenr);
3304                                errors++;
3305                                continue;
3306                        }
3307
3308                        memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3309
3310                        /* one reference for submit_bh */
3311                        get_bh(bh);
3312
3313                        set_buffer_uptodate(bh);
3314                        lock_buffer(bh);
3315                        bh->b_end_io = btrfs_end_buffer_write_sync;
3316                        bh->b_private = device;
3317                }
3318
3319                /*
3320                 * we fua the first super.  The others we allow
3321                 * to go down lazy.
3322                 */
3323                if (i == 0)
3324                        ret = btrfsic_submit_bh(WRITE_FUA, bh);
3325                else
3326                        ret = btrfsic_submit_bh(WRITE_SYNC, bh);
3327                if (ret)
3328                        errors++;
3329        }
3330        return errors < i ? 0 : -1;
3331}
3332
3333/*
3334 * endio for the write_dev_flush, this will wake anyone waiting
3335 * for the barrier when it is done
3336 */
3337static void btrfs_end_empty_barrier(struct bio *bio)
3338{
3339        if (bio->bi_private)
3340                complete(bio->bi_private);
3341        bio_put(bio);
3342}
3343
3344/*
3345 * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
3346 * sent down.  With wait == 1, it waits for the previous flush.
3347 *
3348 * any device where the flush fails with eopnotsupp are flagged as not-barrier
3349 * capable
3350 */
3351static int write_dev_flush(struct btrfs_device *device, int wait)
3352{
3353        struct bio *bio;
3354        int ret = 0;
3355
3356        if (device->nobarriers)
3357                return 0;
3358
3359        if (wait) {
3360                bio = device->flush_bio;
3361                if (!bio)
3362                        return 0;
3363
3364                wait_for_completion(&device->flush_wait);
3365
3366                if (bio->bi_error) {
3367                        ret = bio->bi_error;
3368                        btrfs_dev_stat_inc_and_print(device,
3369                                BTRFS_DEV_STAT_FLUSH_ERRS);
3370                }
3371
3372                /* drop the reference from the wait == 0 run */
3373                bio_put(bio);
3374                device->flush_bio = NULL;
3375
3376                return ret;
3377        }
3378
3379        /*
3380         * one reference for us, and we leave it for the
3381         * caller
3382         */
3383        device->flush_bio = NULL;
3384        bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
3385        if (!bio)
3386                return -ENOMEM;
3387
3388        bio->bi_end_io = btrfs_end_empty_barrier;
3389        bio->bi_bdev = device->bdev;
3390        init_completion(&device->flush_wait);
3391        bio->bi_private = &device->flush_wait;
3392        device->flush_bio = bio;
3393
3394        bio_get(bio);
3395        btrfsic_submit_bio(WRITE_FLUSH, bio);
3396
3397        return 0;
3398}
3399
3400/*
3401 * send an empty flush down to each device in parallel,
3402 * then wait for them
3403 */
3404static int barrier_all_devices(struct btrfs_fs_info *info)
3405{
3406        struct list_head *head;
3407        struct btrfs_device *dev;
3408        int errors_send = 0;
3409        int errors_wait = 0;
3410        int ret;
3411
3412        /* send down all the barriers */
3413        head = &info->fs_devices->devices;
3414        list_for_each_entry_rcu(dev, head, dev_list) {
3415                if (dev->missing)
3416                        continue;
3417                if (!dev->bdev) {
3418                        errors_send++;
3419                        continue;
3420                }
3421                if (!dev->in_fs_metadata || !dev->writeable)
3422                        continue;
3423
3424                ret = write_dev_flush(dev, 0);
3425                if (ret)
3426                        errors_send++;
3427        }
3428
3429        /* wait for all the barriers */
3430        list_for_each_entry_rcu(dev, head, dev_list) {
3431                if (dev->missing)
3432                        continue;
3433                if (!dev->bdev) {
3434                        errors_wait++;
3435                        continue;
3436                }
3437                if (!dev->in_fs_metadata || !dev->writeable)
3438                        continue;
3439
3440                ret = write_dev_flush(dev, 1);
3441                if (ret)
3442                        errors_wait++;
3443        }
3444        if (errors_send > info->num_tolerated_disk_barrier_failures ||
3445            errors_wait > info->num_tolerated_disk_barrier_failures)
3446                return -EIO;
3447        return 0;
3448}
3449
3450int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3451{
3452        if ((flags & (BTRFS_BLOCK_GROUP_DUP |
3453                      BTRFS_BLOCK_GROUP_RAID0 |
3454                      BTRFS_AVAIL_ALLOC_BIT_SINGLE)) ||
3455            ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0))
3456                return 0;
3457
3458        if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3459                     BTRFS_BLOCK_GROUP_RAID5 |
3460                     BTRFS_BLOCK_GROUP_RAID10))
3461                return 1;
3462
3463        if (flags & BTRFS_BLOCK_GROUP_RAID6)
3464                return 2;
3465
3466        pr_warn("BTRFS: unknown raid type: %llu\n", flags);
3467        return 0;
3468}
3469
3470int btrfs_calc_num_tolerated_disk_barrier_failures(
3471        struct btrfs_fs_info *fs_info)
3472{
3473        struct btrfs_ioctl_space_info space;
3474        struct btrfs_space_info *sinfo;
3475        u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3476                       BTRFS_BLOCK_GROUP_SYSTEM,
3477                       BTRFS_BLOCK_GROUP_METADATA,
3478                       BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3479        int i;
3480        int c;
3481        int num_tolerated_disk_barrier_failures =
3482                (int)fs_info->fs_devices->num_devices;
3483
3484        for (i = 0; i < ARRAY_SIZE(types); i++) {
3485                struct btrfs_space_info *tmp;
3486
3487                sinfo = NULL;
3488                rcu_read_lock();
3489                list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3490                        if (tmp->flags == types[i]) {
3491                                sinfo = tmp;
3492                                break;
3493                        }
3494                }
3495                rcu_read_unlock();
3496
3497                if (!sinfo)
3498                        continue;
3499
3500                down_read(&sinfo->groups_sem);
3501                for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3502                        u64 flags;
3503
3504                        if (list_empty(&sinfo->block_groups[c]))
3505                                continue;
3506
3507                        btrfs_get_block_group_info(&sinfo->block_groups[c],
3508                                                   &space);
3509                        if (space.total_bytes == 0 || space.used_bytes == 0)
3510                                continue;
3511                        flags = space.flags;
3512
3513                        num_tolerated_disk_barrier_failures = min(
3514                                num_tolerated_disk_barrier_failures,
3515                                btrfs_get_num_tolerated_disk_barrier_failures(
3516                                        flags));
3517                }
3518                up_read(&sinfo->groups_sem);
3519        }
3520
3521        return num_tolerated_disk_barrier_failures;
3522}
3523
3524static int write_all_supers(struct btrfs_root *root, int max_mirrors)
3525{
3526        struct list_head *head;
3527        struct btrfs_device *dev;
3528        struct btrfs_super_block *sb;
3529        struct btrfs_dev_item *dev_item;
3530        int ret;
3531        int do_barriers;
3532        int max_errors;
3533        int total_errors = 0;
3534        u64 flags;
3535
3536        do_barriers = !btrfs_test_opt(root, NOBARRIER);
3537        backup_super_roots(root->fs_info);
3538
3539        sb = root->fs_info->super_for_commit;
3540        dev_item = &sb->dev_item;
3541
3542        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3543        head = &root->fs_info->fs_devices->devices;
3544        max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
3545
3546        if (do_barriers) {
3547                ret = barrier_all_devices(root->fs_info);
3548                if (ret) {
3549                        mutex_unlock(
3550                                &root->fs_info->fs_devices->device_list_mutex);
3551                        btrfs_error(root->fs_info, ret,
3552                                    "errors while submitting device barriers.");
3553                        return ret;
3554                }
3555        }
3556
3557        list_for_each_entry_rcu(dev, head, dev_list) {
3558                if (!dev->bdev) {
3559                        total_errors++;
3560                        continue;
3561                }
3562                if (!dev->in_fs_metadata || !dev->writeable)
3563                        continue;
3564
3565                btrfs_set_stack_device_generation(dev_item, 0);
3566                btrfs_set_stack_device_type(dev_item, dev->type);
3567                btrfs_set_stack_device_id(dev_item, dev->devid);
3568                btrfs_set_stack_device_total_bytes(dev_item,
3569                                                   dev->commit_total_bytes);
3570                btrfs_set_stack_device_bytes_used(dev_item,
3571                                                  dev->commit_bytes_used);
3572                btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3573                btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3574                btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3575                memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3576                memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3577
3578                flags = btrfs_super_flags(sb);
3579                btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3580
3581                ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
3582                if (ret)
3583                        total_errors++;
3584        }
3585        if (total_errors > max_errors) {
3586                btrfs_err(root->fs_info, "%d errors while writing supers",
3587                       total_errors);
3588                mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3589
3590                /* FUA is masked off if unsupported and can't be the reason */
3591                btrfs_error(root->fs_info, -EIO,
3592                            "%d errors while writing supers", total_errors);
3593                return -EIO;
3594        }
3595
3596        total_errors = 0;
3597        list_for_each_entry_rcu(dev, head, dev_list) {
3598                if (!dev->bdev)
3599                        continue;
3600                if (!dev->in_fs_metadata || !dev->writeable)
3601                        continue;
3602
3603                ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
3604                if (ret)
3605                        total_errors++;
3606        }
3607        mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3608        if (total_errors > max_errors) {
3609                btrfs_error(root->fs_info, -EIO,
3610                            "%d errors while writing supers", total_errors);
3611                return -EIO;
3612        }
3613        return 0;
3614}
3615
3616int write_ctree_super(struct btrfs_trans_handle *trans,
3617                      struct btrfs_root *root, int max_mirrors)
3618{
3619        return write_all_supers(root, max_mirrors);
3620}
3621
3622/* Drop a fs root from the radix tree and free it. */
3623void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3624                                  struct btrfs_root *root)
3625{
3626        spin_lock(&fs_info->fs_roots_radix_lock);
3627        radix_tree_delete(&fs_info->fs_roots_radix,
3628                          (unsigned long)root->root_key.objectid);
3629        spin_unlock(&fs_info->fs_roots_radix_lock);
3630
3631        if (btrfs_root_refs(&root->root_item) == 0)
3632                synchronize_srcu(&fs_info->subvol_srcu);
3633
3634        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3635                btrfs_free_log(NULL, root);
3636
3637        if (root->free_ino_pinned)
3638                __btrfs_remove_free_space_cache(root->free_ino_pinned);
3639        if (root->free_ino_ctl)
3640                __btrfs_remove_free_space_cache(root->free_ino_ctl);
3641        free_fs_root(root);
3642}
3643
3644static void free_fs_root(struct btrfs_root *root)
3645{
3646        iput(root->ino_cache_inode);
3647        WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3648        btrfs_free_block_rsv(root, root->orphan_block_rsv);
3649        root->orphan_block_rsv = NULL;
3650        if (root->anon_dev)
3651                free_anon_bdev(root->anon_dev);
3652        if (root->subv_writers)
3653                btrfs_free_subvolume_writers(root->subv_writers);
3654        free_extent_buffer(root->node);
3655        free_extent_buffer(root->commit_root);
3656        kfree(root->free_ino_ctl);
3657        kfree(root->free_ino_pinned);
3658        kfree(root->name);
3659        btrfs_put_fs_root(root);
3660}
3661
3662void btrfs_free_fs_root(struct btrfs_root *root)
3663{
3664        free_fs_root(root);
3665}
3666
3667int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3668{
3669        u64 root_objectid = 0;
3670        struct btrfs_root *gang[8];
3671        int i = 0;
3672        int err = 0;
3673        unsigned int ret = 0;
3674        int index;
3675
3676        while (1) {
3677                index = srcu_read_lock(&fs_info->subvol_srcu);
3678                ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3679                                             (void **)gang, root_objectid,
3680                                             ARRAY_SIZE(gang));
3681                if (!ret) {
3682                        srcu_read_unlock(&fs_info->subvol_srcu, index);
3683                        break;
3684                }
3685                root_objectid = gang[ret - 1]->root_key.objectid + 1;
3686
3687                for (i = 0; i < ret; i++) {
3688                        /* Avoid to grab roots in dead_roots */
3689                        if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3690                                gang[i] = NULL;
3691                                continue;
3692                        }
3693                        /* grab all the search result for later use */
3694                        gang[i] = btrfs_grab_fs_root(gang[i]);
3695                }
3696                srcu_read_unlock(&fs_info->subvol_srcu, index);
3697
3698                for (i = 0; i < ret; i++) {
3699                        if (!gang[i])
3700                                continue;
3701                        root_objectid = gang[i]->root_key.objectid;
3702                        err = btrfs_orphan_cleanup(gang[i]);
3703                        if (err)
3704                                break;
3705                        btrfs_put_fs_root(gang[i]);
3706                }
3707                root_objectid++;
3708        }
3709
3710        /* release the uncleaned roots due to error */
3711        for (; i < ret; i++) {
3712                if (gang[i])
3713                        btrfs_put_fs_root(gang[i]);
3714        }
3715        return err;
3716}
3717
3718int btrfs_commit_super(struct btrfs_root *root)
3719{
3720        struct btrfs_trans_handle *trans;
3721
3722        mutex_lock(&root->fs_info->cleaner_mutex);
3723        btrfs_run_delayed_iputs(root);
3724        mutex_unlock(&root->fs_info->cleaner_mutex);
3725        wake_up_process(root->fs_info->cleaner_kthread);
3726
3727        /* wait until ongoing cleanup work done */
3728        down_write(&root->fs_info->cleanup_work_sem);
3729        up_write(&root->fs_info->cleanup_work_sem);
3730
3731        trans = btrfs_join_transaction(root);
3732        if (IS_ERR(trans))
3733                return PTR_ERR(trans);
3734        return btrfs_commit_transaction(trans, root);
3735}
3736
3737void close_ctree(struct btrfs_root *root)
3738{
3739        struct btrfs_fs_info *fs_info = root->fs_info;
3740        int ret;
3741
3742        fs_info->closing = 1;
3743        smp_mb();
3744
3745        /* wait for the uuid_scan task to finish */
3746        down(&fs_info->uuid_tree_rescan_sem);
3747        /* avoid complains from lockdep et al., set sem back to initial state */
3748        up(&fs_info->uuid_tree_rescan_sem);
3749
3750        /* pause restriper - we want to resume on mount */
3751        btrfs_pause_balance(fs_info);
3752
3753        btrfs_dev_replace_suspend_for_unmount(fs_info);
3754
3755        btrfs_scrub_cancel(fs_info);
3756
3757        /* wait for any defraggers to finish */
3758        wait_event(fs_info->transaction_wait,
3759                   (atomic_read(&fs_info->defrag_running) == 0));
3760
3761        /* clear out the rbtree of defraggable inodes */
3762        btrfs_cleanup_defrag_inodes(fs_info);
3763
3764        cancel_work_sync(&fs_info->async_reclaim_work);
3765
3766        if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3767                /*
3768                 * If the cleaner thread is stopped and there are
3769                 * block groups queued for removal, the deletion will be
3770                 * skipped when we quit the cleaner thread.
3771                 */
3772                btrfs_delete_unused_bgs(root->fs_info);
3773
3774                ret = btrfs_commit_super(root);
3775                if (ret)
3776                        btrfs_err(fs_info, "commit super ret %d", ret);
3777        }
3778
3779        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3780                btrfs_error_commit_super(root);
3781
3782        kthread_stop(fs_info->transaction_kthread);
3783        kthread_stop(fs_info->cleaner_kthread);
3784
3785        fs_info->closing = 2;
3786        smp_mb();
3787
3788        btrfs_free_qgroup_config(fs_info);
3789
3790        if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3791                btrfs_info(fs_info, "at unmount delalloc count %lld",
3792                       percpu_counter_sum(&fs_info->delalloc_bytes));
3793        }
3794
3795        btrfs_sysfs_remove_one(fs_info);
3796        btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3797
3798        btrfs_free_fs_roots(fs_info);
3799
3800        btrfs_put_block_group_cache(fs_info);
3801
3802        btrfs_free_block_groups(fs_info);
3803
3804        /*
3805         * we must make sure there is not any read request to
3806         * submit after we stopping all workers.
3807         */
3808        invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3809        btrfs_stop_all_workers(fs_info);
3810
3811        fs_info->open = 0;
3812        free_root_pointers(fs_info, 1);
3813
3814        iput(fs_info->btree_inode);
3815
3816#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3817        if (btrfs_test_opt(root, CHECK_INTEGRITY))
3818                btrfsic_unmount(root, fs_info->fs_devices);
3819#endif
3820
3821        btrfs_close_devices(fs_info->fs_devices);
3822        btrfs_mapping_tree_free(&fs_info->mapping_tree);
3823
3824        percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3825        percpu_counter_destroy(&fs_info->delalloc_bytes);
3826        percpu_counter_destroy(&fs_info->bio_counter);
3827        bdi_destroy(&fs_info->bdi);
3828        cleanup_srcu_struct(&fs_info->subvol_srcu);
3829
3830        btrfs_free_stripe_hash_table(fs_info);
3831
3832        __btrfs_free_block_rsv(root->orphan_block_rsv);
3833        root->orphan_block_rsv = NULL;
3834
3835        lock_chunks(root);
3836        while (!list_empty(&fs_info->pinned_chunks)) {
3837                struct extent_map *em;
3838
3839                em = list_first_entry(&fs_info->pinned_chunks,
3840                                      struct extent_map, list);
3841                list_del_init(&em->list);
3842                free_extent_map(em);
3843        }
3844        unlock_chunks(root);
3845}
3846
3847int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3848                          int atomic)
3849{
3850        int ret;
3851        struct inode *btree_inode = buf->pages[0]->mapping->host;
3852
3853        ret = extent_buffer_uptodate(buf);
3854        if (!ret)
3855                return ret;
3856
3857        ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3858                                    parent_transid, atomic);
3859        if (ret == -EAGAIN)
3860                return ret;
3861        return !ret;
3862}
3863
3864int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3865{
3866        return set_extent_buffer_uptodate(buf);
3867}
3868
3869void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3870{
3871        struct btrfs_root *root;
3872        u64 transid = btrfs_header_generation(buf);
3873        int was_dirty;
3874
3875#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3876        /*
3877         * This is a fast path so only do this check if we have sanity tests
3878         * enabled.  Normal people shouldn't be marking dummy buffers as dirty
3879         * outside of the sanity tests.
3880         */
3881        if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
3882                return;
3883#endif
3884        root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3885        btrfs_assert_tree_locked(buf);
3886        if (transid != root->fs_info->generation)
3887                WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
3888                       "found %llu running %llu\n",
3889                        buf->start, transid, root->fs_info->generation);
3890        was_dirty = set_extent_buffer_dirty(buf);
3891        if (!was_dirty)
3892                __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
3893                                     buf->len,
3894                                     root->fs_info->dirty_metadata_batch);
3895#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3896        if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
3897                btrfs_print_leaf(root, buf);
3898                ASSERT(0);
3899        }
3900#endif
3901}
3902
3903static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
3904                                        int flush_delayed)
3905{
3906        /*
3907         * looks as though older kernels can get into trouble with
3908         * this code, they end up stuck in balance_dirty_pages forever
3909         */
3910        int ret;
3911
3912        if (current->flags & PF_MEMALLOC)
3913                return;
3914
3915        if (flush_delayed)
3916                btrfs_balance_delayed_items(root);
3917
3918        ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
3919                                     BTRFS_DIRTY_METADATA_THRESH);
3920        if (ret > 0) {
3921                balance_dirty_pages_ratelimited(
3922                                   root->fs_info->btree_inode->i_mapping);
3923        }
3924        return;
3925}
3926
3927void btrfs_btree_balance_dirty(struct btrfs_root *root)
3928{
3929        __btrfs_btree_balance_dirty(root, 1);
3930}
3931
3932void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
3933{
3934        __btrfs_btree_balance_dirty(root, 0);
3935}
3936
3937int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3938{
3939        struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3940        return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3941}
3942
3943static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3944                              int read_only)
3945{
3946        struct btrfs_super_block *sb = fs_info->super_copy;
3947        int ret = 0;
3948
3949        if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
3950                printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n",
3951                                btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
3952                ret = -EINVAL;
3953        }
3954        if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
3955                printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n",
3956                                btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
3957                ret = -EINVAL;
3958        }
3959        if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
3960                printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n",
3961                                btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
3962                ret = -EINVAL;
3963        }
3964
3965        /*
3966         * The common minimum, we don't know if we can trust the nodesize/sectorsize
3967         * items yet, they'll be verified later. Issue just a warning.
3968         */
3969        if (!IS_ALIGNED(btrfs_super_root(sb), 4096))
3970                printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
3971                                btrfs_super_root(sb));
3972        if (!IS_ALIGNED(btrfs_super_chunk_root(sb), 4096))
3973                printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n",
3974                                btrfs_super_chunk_root(sb));
3975        if (!IS_ALIGNED(btrfs_super_log_root(sb), 4096))
3976                printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
3977                                btrfs_super_log_root(sb));
3978
3979        /*
3980         * Check the lower bound, the alignment and other constraints are
3981         * checked later.
3982         */
3983        if (btrfs_super_nodesize(sb) < 4096) {
3984                printk(KERN_ERR "BTRFS: nodesize too small: %u < 4096\n",
3985                                btrfs_super_nodesize(sb));
3986                ret = -EINVAL;
3987        }
3988        if (btrfs_super_sectorsize(sb) < 4096) {
3989                printk(KERN_ERR "BTRFS: sectorsize too small: %u < 4096\n",
3990                                btrfs_super_sectorsize(sb));
3991                ret = -EINVAL;
3992        }
3993
3994        if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
3995                printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
3996                                fs_info->fsid, sb->dev_item.fsid);
3997                ret = -EINVAL;
3998        }
3999
4000        /*
4001         * Hint to catch really bogus numbers, bitflips or so, more exact checks are
4002         * done later
4003         */
4004        if (btrfs_super_num_devices(sb) > (1UL << 31))
4005                printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
4006                                btrfs_super_num_devices(sb));
4007        if (btrfs_super_num_devices(sb) == 0) {
4008                printk(KERN_ERR "BTRFS: number of devices is 0\n");
4009                ret = -EINVAL;
4010        }
4011
4012        if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
4013                printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
4014                                btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
4015                ret = -EINVAL;
4016        }
4017
4018        /*
4019         * Obvious sys_chunk_array corruptions, it must hold at least one key
4020         * and one chunk
4021         */
4022        if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4023                printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n",
4024                                btrfs_super_sys_array_size(sb),
4025                                BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
4026                ret = -EINVAL;
4027        }
4028        if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
4029                        + sizeof(struct btrfs_chunk)) {
4030                printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n",
4031                                btrfs_super_sys_array_size(sb),
4032                                sizeof(struct btrfs_disk_key)
4033                                + sizeof(struct btrfs_chunk));
4034                ret = -EINVAL;
4035        }
4036
4037        /*
4038         * The generation is a global counter, we'll trust it more than the others
4039         * but it's still possible that it's the one that's wrong.
4040         */
4041        if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
4042                printk(KERN_WARNING
4043                        "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n",
4044                        btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb));
4045        if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
4046            && btrfs_super_cache_generation(sb) != (u64)-1)
4047                printk(KERN_WARNING
4048                        "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n",
4049                        btrfs_super_generation(sb), btrfs_super_cache_generation(sb));
4050
4051        return ret;
4052}
4053
4054static void btrfs_error_commit_super(struct btrfs_root *root)
4055{
4056        mutex_lock(&root->fs_info->cleaner_mutex);
4057        btrfs_run_delayed_iputs(root);
4058        mutex_unlock(&root->fs_info->cleaner_mutex);
4059
4060        down_write(&root->fs_info->cleanup_work_sem);
4061        up_write(&root->fs_info->cleanup_work_sem);
4062
4063        /* cleanup FS via transaction */
4064        btrfs_cleanup_transaction(root);
4065}
4066
4067static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4068{
4069        struct btrfs_ordered_extent *ordered;
4070
4071        spin_lock(&root->ordered_extent_lock);
4072        /*
4073         * This will just short circuit the ordered completion stuff which will
4074         * make sure the ordered extent gets properly cleaned up.
4075         */
4076        list_for_each_entry(ordered, &root->ordered_extents,
4077                            root_extent_list)
4078                set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4079        spin_unlock(&root->ordered_extent_lock);
4080}
4081
4082static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4083{
4084        struct btrfs_root *root;
4085        struct list_head splice;
4086
4087        INIT_LIST_HEAD(&splice);
4088
4089        spin_lock(&fs_info->ordered_root_lock);
4090        list_splice_init(&fs_info->ordered_roots, &splice);
4091        while (!list_empty(&splice)) {
4092                root = list_first_entry(&splice, struct btrfs_root,
4093                                        ordered_root);
4094                list_move_tail(&root->ordered_root,
4095                               &fs_info->ordered_roots);
4096
4097                spin_unlock(&fs_info->ordered_root_lock);
4098                btrfs_destroy_ordered_extents(root);
4099
4100                cond_resched();
4101                spin_lock(&fs_info->ordered_root_lock);
4102        }
4103        spin_unlock(&fs_info->ordered_root_lock);
4104}
4105
4106static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4107                                      struct btrfs_root *root)
4108{
4109        struct rb_node *node;
4110        struct btrfs_delayed_ref_root *delayed_refs;
4111        struct btrfs_delayed_ref_node *ref;
4112        int ret = 0;
4113
4114        delayed_refs = &trans->delayed_refs;
4115
4116        spin_lock(&delayed_refs->lock);
4117        if (atomic_read(&delayed_refs->num_entries) == 0) {
4118                spin_unlock(&delayed_refs->lock);
4119                btrfs_info(root->fs_info, "delayed_refs has NO entry");
4120                return ret;
4121        }
4122
4123        while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
4124                struct btrfs_delayed_ref_head *head;
4125                struct btrfs_delayed_ref_node *tmp;
4126                bool pin_bytes = false;
4127
4128                head = rb_entry(node, struct btrfs_delayed_ref_head,
4129                                href_node);
4130                if (!mutex_trylock(&head->mutex)) {
4131                        atomic_inc(&head->node.refs);
4132                        spin_unlock(&delayed_refs->lock);
4133
4134                        mutex_lock(&head->mutex);
4135                        mutex_unlock(&head->mutex);
4136                        btrfs_put_delayed_ref(&head->node);
4137                        spin_lock(&delayed_refs->lock);
4138                        continue;
4139                }
4140                spin_lock(&head->lock);
4141                list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
4142                                                 list) {
4143                        ref->in_tree = 0;
4144                        list_del(&ref->list);
4145                        atomic_dec(&delayed_refs->num_entries);
4146                        btrfs_put_delayed_ref(ref);
4147                }
4148                if (head->must_insert_reserved)
4149                        pin_bytes = true;
4150                btrfs_free_delayed_extent_op(head->extent_op);
4151                delayed_refs->num_heads--;
4152                if (head->processing == 0)
4153                        delayed_refs->num_heads_ready--;
4154                atomic_dec(&delayed_refs->num_entries);
4155                head->node.in_tree = 0;
4156                rb_erase(&head->href_node, &delayed_refs->href_root);
4157                spin_unlock(&head->lock);
4158                spin_unlock(&delayed_refs->lock);
4159                mutex_unlock(&head->mutex);
4160
4161                if (pin_bytes)
4162                        btrfs_pin_extent(root, head->node.bytenr,
4163                                         head->node.num_bytes, 1);
4164                btrfs_put_delayed_ref(&head->node);
4165                cond_resched();
4166                spin_lock(&delayed_refs->lock);
4167        }
4168
4169        spin_unlock(&delayed_refs->lock);
4170
4171        return ret;
4172}
4173
4174static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4175{
4176        struct btrfs_inode *btrfs_inode;
4177        struct list_head splice;
4178
4179        INIT_LIST_HEAD(&splice);
4180
4181        spin_lock(&root->delalloc_lock);
4182        list_splice_init(&root->delalloc_inodes, &splice);
4183
4184        while (!list_empty(&splice)) {
4185                btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4186                                               delalloc_inodes);
4187
4188                list_del_init(&btrfs_inode->delalloc_inodes);
4189                clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
4190                          &btrfs_inode->runtime_flags);
4191                spin_unlock(&root->delalloc_lock);
4192
4193                btrfs_invalidate_inodes(btrfs_inode->root);
4194
4195                spin_lock(&root->delalloc_lock);
4196        }
4197
4198        spin_unlock(&root->delalloc_lock);
4199}
4200
4201static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4202{
4203        struct btrfs_root *root;
4204        struct list_head splice;
4205
4206        INIT_LIST_HEAD(&splice);
4207
4208        spin_lock(&fs_info->delalloc_root_lock);
4209        list_splice_init(&fs_info->delalloc_roots, &splice);
4210        while (!list_empty(&splice)) {
4211                root = list_first_entry(&splice, struct btrfs_root,
4212                                         delalloc_root);
4213                list_del_init(&root->delalloc_root);
4214                root = btrfs_grab_fs_root(root);
4215                BUG_ON(!root);
4216                spin_unlock(&fs_info->delalloc_root_lock);
4217
4218                btrfs_destroy_delalloc_inodes(root);
4219                btrfs_put_fs_root(root);
4220
4221                spin_lock(&fs_info->delalloc_root_lock);
4222        }
4223        spin_unlock(&fs_info->delalloc_root_lock);
4224}
4225
4226static int btrfs_destroy_marked_extents(struct btrfs_root *root,
4227                                        struct extent_io_tree *dirty_pages,
4228                                        int mark)
4229{
4230        int ret;
4231        struct extent_buffer *eb;
4232        u64 start = 0;
4233        u64 end;
4234
4235        while (1) {
4236                ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4237                                            mark, NULL);
4238                if (ret)
4239                        break;
4240
4241                clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
4242                while (start <= end) {
4243                        eb = btrfs_find_tree_block(root->fs_info, start);
4244                        start += root->nodesize;
4245                        if (!eb)
4246                                continue;
4247                        wait_on_extent_buffer_writeback(eb);
4248
4249                        if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4250                                               &eb->bflags))
4251                                clear_extent_buffer_dirty(eb);
4252                        free_extent_buffer_stale(eb);
4253                }
4254        }
4255
4256        return ret;
4257}
4258
4259static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
4260                                       struct extent_io_tree *pinned_extents)
4261{
4262        struct extent_io_tree *unpin;
4263        u64 start;
4264        u64 end;
4265        int ret;
4266        bool loop = true;
4267
4268        unpin = pinned_extents;
4269again:
4270        while (1) {
4271                ret = find_first_extent_bit(unpin, 0, &start, &end,
4272                                            EXTENT_DIRTY, NULL);
4273                if (ret)
4274                        break;
4275
4276                clear_extent_dirty(unpin, start, end, GFP_NOFS);
4277                btrfs_error_unpin_extent_range(root, start, end);
4278                cond_resched();
4279        }
4280
4281        if (loop) {
4282                if (unpin == &root->fs_info->freed_extents[0])
4283                        unpin = &root->fs_info->freed_extents[1];
4284                else
4285                        unpin = &root->fs_info->freed_extents[0];
4286                loop = false;
4287                goto again;
4288        }
4289
4290        return 0;
4291}
4292
4293static void btrfs_free_pending_ordered(struct btrfs_transaction *cur_trans,
4294                                       struct btrfs_fs_info *fs_info)
4295{
4296        struct btrfs_ordered_extent *ordered;
4297
4298        spin_lock(&fs_info->trans_lock);
4299        while (!list_empty(&cur_trans->pending_ordered)) {
4300                ordered = list_first_entry(&cur_trans->pending_ordered,
4301                                           struct btrfs_ordered_extent,
4302                                           trans_list);
4303                list_del_init(&ordered->trans_list);
4304                spin_unlock(&fs_info->trans_lock);
4305
4306                btrfs_put_ordered_extent(ordered);
4307                spin_lock(&fs_info->trans_lock);
4308        }
4309        spin_unlock(&fs_info->trans_lock);
4310}
4311
4312void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4313                                   struct btrfs_root *root)
4314{
4315        btrfs_destroy_delayed_refs(cur_trans, root);
4316
4317        cur_trans->state = TRANS_STATE_COMMIT_START;
4318        wake_up(&root->fs_info->transaction_blocked_wait);
4319
4320        cur_trans->state = TRANS_STATE_UNBLOCKED;
4321        wake_up(&root->fs_info->transaction_wait);
4322
4323        btrfs_free_pending_ordered(cur_trans, root->fs_info);
4324        btrfs_destroy_delayed_inodes(root);
4325        btrfs_assert_delayed_root_empty(root);
4326
4327        btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
4328                                     EXTENT_DIRTY);
4329        btrfs_destroy_pinned_extent(root,
4330                                    root->fs_info->pinned_extents);
4331
4332        cur_trans->state =TRANS_STATE_COMPLETED;
4333        wake_up(&cur_trans->commit_wait);
4334
4335        /*
4336        memset(cur_trans, 0, sizeof(*cur_trans));
4337        kmem_cache_free(btrfs_transaction_cachep, cur_trans);
4338        */
4339}
4340
4341static int btrfs_cleanup_transaction(struct btrfs_root *root)
4342{
4343        struct btrfs_transaction *t;
4344
4345        mutex_lock(&root->fs_info->transaction_kthread_mutex);
4346
4347        spin_lock(&root->fs_info->trans_lock);
4348        while (!list_empty(&root->fs_info->trans_list)) {
4349                t = list_first_entry(&root->fs_info->trans_list,
4350                                     struct btrfs_transaction, list);
4351                if (t->state >= TRANS_STATE_COMMIT_START) {
4352                        atomic_inc(&t->use_count);
4353                        spin_unlock(&root->fs_info->trans_lock);
4354                        btrfs_wait_for_commit(root, t->transid);
4355                        btrfs_put_transaction(t);
4356                        spin_lock(&root->fs_info->trans_lock);
4357                        continue;
4358                }
4359                if (t == root->fs_info->running_transaction) {
4360                        t->state = TRANS_STATE_COMMIT_DOING;
4361                        spin_unlock(&root->fs_info->trans_lock);
4362                        /*
4363                         * We wait for 0 num_writers since we don't hold a trans
4364                         * handle open currently for this transaction.
4365                         */
4366                        wait_event(t->writer_wait,
4367                                   atomic_read(&t->num_writers) == 0);
4368                } else {
4369                        spin_unlock(&root->fs_info->trans_lock);
4370                }
4371                btrfs_cleanup_one_transaction(t, root);
4372
4373                spin_lock(&root->fs_info->trans_lock);
4374                if (t == root->fs_info->running_transaction)
4375                        root->fs_info->running_transaction = NULL;
4376                list_del_init(&t->list);
4377                spin_unlock(&root->fs_info->trans_lock);
4378
4379                btrfs_put_transaction(t);
4380                trace_btrfs_transaction_commit(root);
4381                spin_lock(&root->fs_info->trans_lock);
4382        }
4383        spin_unlock(&root->fs_info->trans_lock);
4384        btrfs_destroy_all_ordered_extents(root->fs_info);
4385        btrfs_destroy_delayed_inodes(root);
4386        btrfs_assert_delayed_root_empty(root);
4387        btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents);
4388        btrfs_destroy_all_delalloc_inodes(root->fs_info);
4389        mutex_unlock(&root->fs_info->transaction_kthread_mutex);
4390
4391        return 0;
4392}
4393
4394static const struct extent_io_ops btree_extent_io_ops = {
4395        .readpage_end_io_hook = btree_readpage_end_io_hook,
4396        .readpage_io_failed_hook = btree_io_failed_hook,
4397        .submit_bio_hook = btree_submit_bio_hook,
4398        /* note we're sharing with inode.c for the merge bio hook */
4399        .merge_bio_hook = btrfs_merge_bio_hook,
4400};
4401