linux/fs/btrfs/disk-io.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/fs.h>
  20#include <linux/blkdev.h>
  21#include <linux/scatterlist.h>
  22#include <linux/swap.h>
  23#include <linux/radix-tree.h>
  24#include <linux/writeback.h>
  25#include <linux/buffer_head.h>
  26#include <linux/workqueue.h>
  27#include <linux/kthread.h>
  28#include <linux/freezer.h>
  29#include <linux/crc32c.h>
  30#include <linux/slab.h>
  31#include <linux/migrate.h>
  32#include <linux/ratelimit.h>
  33#include <linux/uuid.h>
  34#include <asm/unaligned.h>
  35#include "compat.h"
  36#include "ctree.h"
  37#include "disk-io.h"
  38#include "transaction.h"
  39#include "btrfs_inode.h"
  40#include "volumes.h"
  41#include "print-tree.h"
  42#include "async-thread.h"
  43#include "locking.h"
  44#include "tree-log.h"
  45#include "free-space-cache.h"
  46#include "inode-map.h"
  47#include "check-integrity.h"
  48#include "rcu-string.h"
  49#include "dev-replace.h"
  50#include "raid56.h"
  51
  52#ifdef CONFIG_X86
  53#include <asm/cpufeature.h>
  54#endif
  55
  56static struct extent_io_ops btree_extent_io_ops;
  57static void end_workqueue_fn(struct btrfs_work *work);
  58static void free_fs_root(struct btrfs_root *root);
  59static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
  60                                    int read_only);
  61static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
  62                                             struct btrfs_root *root);
  63static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
  64static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
  65                                      struct btrfs_root *root);
  66static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t);
  67static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
  68static int btrfs_destroy_marked_extents(struct btrfs_root *root,
  69                                        struct extent_io_tree *dirty_pages,
  70                                        int mark);
  71static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
  72                                       struct extent_io_tree *pinned_extents);
  73static int btrfs_cleanup_transaction(struct btrfs_root *root);
  74static void btrfs_error_commit_super(struct btrfs_root *root);
  75
  76/*
  77 * end_io_wq structs are used to do processing in task context when an IO is
  78 * complete.  This is used during reads to verify checksums, and it is used
  79 * by writes to insert metadata for new file extents after IO is complete.
  80 */
  81struct end_io_wq {
  82        struct bio *bio;
  83        bio_end_io_t *end_io;
  84        void *private;
  85        struct btrfs_fs_info *info;
  86        int error;
  87        int metadata;
  88        struct list_head list;
  89        struct btrfs_work work;
  90};
  91
  92/*
  93 * async submit bios are used to offload expensive checksumming
  94 * onto the worker threads.  They checksum file and metadata bios
  95 * just before they are sent down the IO stack.
  96 */
  97struct async_submit_bio {
  98        struct inode *inode;
  99        struct bio *bio;
 100        struct list_head list;
 101        extent_submit_bio_hook_t *submit_bio_start;
 102        extent_submit_bio_hook_t *submit_bio_done;
 103        int rw;
 104        int mirror_num;
 105        unsigned long bio_flags;
 106        /*
 107         * bio_offset is optional, can be used if the pages in the bio
 108         * can't tell us where in the file the bio should go
 109         */
 110        u64 bio_offset;
 111        struct btrfs_work work;
 112        int error;
 113};
 114
 115/*
 116 * Lockdep class keys for extent_buffer->lock's in this root.  For a given
 117 * eb, the lockdep key is determined by the btrfs_root it belongs to and
 118 * the level the eb occupies in the tree.
 119 *
 120 * Different roots are used for different purposes and may nest inside each
 121 * other and they require separate keysets.  As lockdep keys should be
 122 * static, assign keysets according to the purpose of the root as indicated
 123 * by btrfs_root->objectid.  This ensures that all special purpose roots
 124 * have separate keysets.
 125 *
 126 * Lock-nesting across peer nodes is always done with the immediate parent
 127 * node locked thus preventing deadlock.  As lockdep doesn't know this, use
 128 * subclass to avoid triggering lockdep warning in such cases.
 129 *
 130 * The key is set by the readpage_end_io_hook after the buffer has passed
 131 * csum validation but before the pages are unlocked.  It is also set by
 132 * btrfs_init_new_buffer on freshly allocated blocks.
 133 *
 134 * We also add a check to make sure the highest level of the tree is the
 135 * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
 136 * needs update as well.
 137 */
 138#ifdef CONFIG_DEBUG_LOCK_ALLOC
 139# if BTRFS_MAX_LEVEL != 8
 140#  error
 141# endif
 142
 143static struct btrfs_lockdep_keyset {
 144        u64                     id;             /* root objectid */
 145        const char              *name_stem;     /* lock name stem */
 146        char                    names[BTRFS_MAX_LEVEL + 1][20];
 147        struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
 148} btrfs_lockdep_keysets[] = {
 149        { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
 150        { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
 151        { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
 152        { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
 153        { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
 154        { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
 155        { .id = BTRFS_QUOTA_TREE_OBJECTID,      .name_stem = "quota"    },
 156        { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
 157        { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
 158        { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
 159        { .id = 0,                              .name_stem = "tree"     },
 160};
 161
 162void __init btrfs_init_lockdep(void)
 163{
 164        int i, j;
 165
 166        /* initialize lockdep class names */
 167        for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
 168                struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
 169
 170                for (j = 0; j < ARRAY_SIZE(ks->names); j++)
 171                        snprintf(ks->names[j], sizeof(ks->names[j]),
 172                                 "btrfs-%s-%02d", ks->name_stem, j);
 173        }
 174}
 175
 176void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
 177                                    int level)
 178{
 179        struct btrfs_lockdep_keyset *ks;
 180
 181        BUG_ON(level >= ARRAY_SIZE(ks->keys));
 182
 183        /* find the matching keyset, id 0 is the default entry */
 184        for (ks = btrfs_lockdep_keysets; ks->id; ks++)
 185                if (ks->id == objectid)
 186                        break;
 187
 188        lockdep_set_class_and_name(&eb->lock,
 189                                   &ks->keys[level], ks->names[level]);
 190}
 191
 192#endif
 193
 194/*
 195 * extents on the btree inode are pretty simple, there's one extent
 196 * that covers the entire device
 197 */
 198static struct extent_map *btree_get_extent(struct inode *inode,
 199                struct page *page, size_t pg_offset, u64 start, u64 len,
 200                int create)
 201{
 202        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 203        struct extent_map *em;
 204        int ret;
 205
 206        read_lock(&em_tree->lock);
 207        em = lookup_extent_mapping(em_tree, start, len);
 208        if (em) {
 209                em->bdev =
 210                        BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 211                read_unlock(&em_tree->lock);
 212                goto out;
 213        }
 214        read_unlock(&em_tree->lock);
 215
 216        em = alloc_extent_map();
 217        if (!em) {
 218                em = ERR_PTR(-ENOMEM);
 219                goto out;
 220        }
 221        em->start = 0;
 222        em->len = (u64)-1;
 223        em->block_len = (u64)-1;
 224        em->block_start = 0;
 225        em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 226
 227        write_lock(&em_tree->lock);
 228        ret = add_extent_mapping(em_tree, em, 0);
 229        if (ret == -EEXIST) {
 230                free_extent_map(em);
 231                em = lookup_extent_mapping(em_tree, start, len);
 232                if (!em)
 233                        em = ERR_PTR(-EIO);
 234        } else if (ret) {
 235                free_extent_map(em);
 236                em = ERR_PTR(ret);
 237        }
 238        write_unlock(&em_tree->lock);
 239
 240out:
 241        return em;
 242}
 243
 244u32 btrfs_csum_data(char *data, u32 seed, size_t len)
 245{
 246        return crc32c(seed, data, len);
 247}
 248
 249void btrfs_csum_final(u32 crc, char *result)
 250{
 251        put_unaligned_le32(~crc, result);
 252}
 253
 254/*
 255 * compute the csum for a btree block, and either verify it or write it
 256 * into the csum field of the block.
 257 */
 258static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
 259                           int verify)
 260{
 261        u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 262        char *result = NULL;
 263        unsigned long len;
 264        unsigned long cur_len;
 265        unsigned long offset = BTRFS_CSUM_SIZE;
 266        char *kaddr;
 267        unsigned long map_start;
 268        unsigned long map_len;
 269        int err;
 270        u32 crc = ~(u32)0;
 271        unsigned long inline_result;
 272
 273        len = buf->len - offset;
 274        while (len > 0) {
 275                err = map_private_extent_buffer(buf, offset, 32,
 276                                        &kaddr, &map_start, &map_len);
 277                if (err)
 278                        return 1;
 279                cur_len = min(len, map_len - (offset - map_start));
 280                crc = btrfs_csum_data(kaddr + offset - map_start,
 281                                      crc, cur_len);
 282                len -= cur_len;
 283                offset += cur_len;
 284        }
 285        if (csum_size > sizeof(inline_result)) {
 286                result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
 287                if (!result)
 288                        return 1;
 289        } else {
 290                result = (char *)&inline_result;
 291        }
 292
 293        btrfs_csum_final(crc, result);
 294
 295        if (verify) {
 296                if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
 297                        u32 val;
 298                        u32 found = 0;
 299                        memcpy(&found, result, csum_size);
 300
 301                        read_extent_buffer(buf, &val, 0, csum_size);
 302                        printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
 303                                       "failed on %llu wanted %X found %X "
 304                                       "level %d\n",
 305                                       root->fs_info->sb->s_id,
 306                                       (unsigned long long)buf->start, val, found,
 307                                       btrfs_header_level(buf));
 308                        if (result != (char *)&inline_result)
 309                                kfree(result);
 310                        return 1;
 311                }
 312        } else {
 313                write_extent_buffer(buf, result, 0, csum_size);
 314        }
 315        if (result != (char *)&inline_result)
 316                kfree(result);
 317        return 0;
 318}
 319
 320/*
 321 * we can't consider a given block up to date unless the transid of the
 322 * block matches the transid in the parent node's pointer.  This is how we
 323 * detect blocks that either didn't get written at all or got written
 324 * in the wrong place.
 325 */
 326static int verify_parent_transid(struct extent_io_tree *io_tree,
 327                                 struct extent_buffer *eb, u64 parent_transid,
 328                                 int atomic)
 329{
 330        struct extent_state *cached_state = NULL;
 331        int ret;
 332
 333        if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
 334                return 0;
 335
 336        if (atomic)
 337                return -EAGAIN;
 338
 339        lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
 340                         0, &cached_state);
 341        if (extent_buffer_uptodate(eb) &&
 342            btrfs_header_generation(eb) == parent_transid) {
 343                ret = 0;
 344                goto out;
 345        }
 346        printk_ratelimited("parent transid verify failed on %llu wanted %llu "
 347                       "found %llu\n",
 348                       (unsigned long long)eb->start,
 349                       (unsigned long long)parent_transid,
 350                       (unsigned long long)btrfs_header_generation(eb));
 351        ret = 1;
 352        clear_extent_buffer_uptodate(eb);
 353out:
 354        unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
 355                             &cached_state, GFP_NOFS);
 356        return ret;
 357}
 358
 359/*
 360 * Return 0 if the superblock checksum type matches the checksum value of that
 361 * algorithm. Pass the raw disk superblock data.
 362 */
 363static int btrfs_check_super_csum(char *raw_disk_sb)
 364{
 365        struct btrfs_super_block *disk_sb =
 366                (struct btrfs_super_block *)raw_disk_sb;
 367        u16 csum_type = btrfs_super_csum_type(disk_sb);
 368        int ret = 0;
 369
 370        if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
 371                u32 crc = ~(u32)0;
 372                const int csum_size = sizeof(crc);
 373                char result[csum_size];
 374
 375                /*
 376                 * The super_block structure does not span the whole
 377                 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
 378                 * is filled with zeros and is included in the checkum.
 379                 */
 380                crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
 381                                crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
 382                btrfs_csum_final(crc, result);
 383
 384                if (memcmp(raw_disk_sb, result, csum_size))
 385                        ret = 1;
 386
 387                if (ret && btrfs_super_generation(disk_sb) < 10) {
 388                        printk(KERN_WARNING "btrfs: super block crcs don't match, older mkfs detected\n");
 389                        ret = 0;
 390                }
 391        }
 392
 393        if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
 394                printk(KERN_ERR "btrfs: unsupported checksum algorithm %u\n",
 395                                csum_type);
 396                ret = 1;
 397        }
 398
 399        return ret;
 400}
 401
 402/*
 403 * helper to read a given tree block, doing retries as required when
 404 * the checksums don't match and we have alternate mirrors to try.
 405 */
 406static int btree_read_extent_buffer_pages(struct btrfs_root *root,
 407                                          struct extent_buffer *eb,
 408                                          u64 start, u64 parent_transid)
 409{
 410        struct extent_io_tree *io_tree;
 411        int failed = 0;
 412        int ret;
 413        int num_copies = 0;
 414        int mirror_num = 0;
 415        int failed_mirror = 0;
 416
 417        clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 418        io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
 419        while (1) {
 420                ret = read_extent_buffer_pages(io_tree, eb, start,
 421                                               WAIT_COMPLETE,
 422                                               btree_get_extent, mirror_num);
 423                if (!ret) {
 424                        if (!verify_parent_transid(io_tree, eb,
 425                                                   parent_transid, 0))
 426                                break;
 427                        else
 428                                ret = -EIO;
 429                }
 430
 431                /*
 432                 * This buffer's crc is fine, but its contents are corrupted, so
 433                 * there is no reason to read the other copies, they won't be
 434                 * any less wrong.
 435                 */
 436                if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
 437                        break;
 438
 439                num_copies = btrfs_num_copies(root->fs_info,
 440                                              eb->start, eb->len);
 441                if (num_copies == 1)
 442                        break;
 443
 444                if (!failed_mirror) {
 445                        failed = 1;
 446                        failed_mirror = eb->read_mirror;
 447                }
 448
 449                mirror_num++;
 450                if (mirror_num == failed_mirror)
 451                        mirror_num++;
 452
 453                if (mirror_num > num_copies)
 454                        break;
 455        }
 456
 457        if (failed && !ret && failed_mirror)
 458                repair_eb_io_failure(root, eb, failed_mirror);
 459
 460        return ret;
 461}
 462
 463/*
 464 * checksum a dirty tree block before IO.  This has extra checks to make sure
 465 * we only fill in the checksum field in the first page of a multi-page block
 466 */
 467
 468static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
 469{
 470        struct extent_io_tree *tree;
 471        u64 start = page_offset(page);
 472        u64 found_start;
 473        struct extent_buffer *eb;
 474
 475        tree = &BTRFS_I(page->mapping->host)->io_tree;
 476
 477        eb = (struct extent_buffer *)page->private;
 478        if (page != eb->pages[0])
 479                return 0;
 480        found_start = btrfs_header_bytenr(eb);
 481        if (found_start != start) {
 482                WARN_ON(1);
 483                return 0;
 484        }
 485        if (!PageUptodate(page)) {
 486                WARN_ON(1);
 487                return 0;
 488        }
 489        csum_tree_block(root, eb, 0);
 490        return 0;
 491}
 492
 493static int check_tree_block_fsid(struct btrfs_root *root,
 494                                 struct extent_buffer *eb)
 495{
 496        struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
 497        u8 fsid[BTRFS_UUID_SIZE];
 498        int ret = 1;
 499
 500        read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
 501                           BTRFS_FSID_SIZE);
 502        while (fs_devices) {
 503                if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
 504                        ret = 0;
 505                        break;
 506                }
 507                fs_devices = fs_devices->seed;
 508        }
 509        return ret;
 510}
 511
 512#define CORRUPT(reason, eb, root, slot)                         \
 513        printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
 514               "root=%llu, slot=%d\n", reason,                  \
 515               (unsigned long long)btrfs_header_bytenr(eb),     \
 516               (unsigned long long)root->objectid, slot)
 517
 518static noinline int check_leaf(struct btrfs_root *root,
 519                               struct extent_buffer *leaf)
 520{
 521        struct btrfs_key key;
 522        struct btrfs_key leaf_key;
 523        u32 nritems = btrfs_header_nritems(leaf);
 524        int slot;
 525
 526        if (nritems == 0)
 527                return 0;
 528
 529        /* Check the 0 item */
 530        if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
 531            BTRFS_LEAF_DATA_SIZE(root)) {
 532                CORRUPT("invalid item offset size pair", leaf, root, 0);
 533                return -EIO;
 534        }
 535
 536        /*
 537         * Check to make sure each items keys are in the correct order and their
 538         * offsets make sense.  We only have to loop through nritems-1 because
 539         * we check the current slot against the next slot, which verifies the
 540         * next slot's offset+size makes sense and that the current's slot
 541         * offset is correct.
 542         */
 543        for (slot = 0; slot < nritems - 1; slot++) {
 544                btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
 545                btrfs_item_key_to_cpu(leaf, &key, slot + 1);
 546
 547                /* Make sure the keys are in the right order */
 548                if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
 549                        CORRUPT("bad key order", leaf, root, slot);
 550                        return -EIO;
 551                }
 552
 553                /*
 554                 * Make sure the offset and ends are right, remember that the
 555                 * item data starts at the end of the leaf and grows towards the
 556                 * front.
 557                 */
 558                if (btrfs_item_offset_nr(leaf, slot) !=
 559                        btrfs_item_end_nr(leaf, slot + 1)) {
 560                        CORRUPT("slot offset bad", leaf, root, slot);
 561                        return -EIO;
 562                }
 563
 564                /*
 565                 * Check to make sure that we don't point outside of the leaf,
 566                 * just incase all the items are consistent to eachother, but
 567                 * all point outside of the leaf.
 568                 */
 569                if (btrfs_item_end_nr(leaf, slot) >
 570                    BTRFS_LEAF_DATA_SIZE(root)) {
 571                        CORRUPT("slot end outside of leaf", leaf, root, slot);
 572                        return -EIO;
 573                }
 574        }
 575
 576        return 0;
 577}
 578
 579static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
 580                               struct extent_state *state, int mirror)
 581{
 582        struct extent_io_tree *tree;
 583        u64 found_start;
 584        int found_level;
 585        struct extent_buffer *eb;
 586        struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
 587        int ret = 0;
 588        int reads_done;
 589
 590        if (!page->private)
 591                goto out;
 592
 593        tree = &BTRFS_I(page->mapping->host)->io_tree;
 594        eb = (struct extent_buffer *)page->private;
 595
 596        /* the pending IO might have been the only thing that kept this buffer
 597         * in memory.  Make sure we have a ref for all this other checks
 598         */
 599        extent_buffer_get(eb);
 600
 601        reads_done = atomic_dec_and_test(&eb->io_pages);
 602        if (!reads_done)
 603                goto err;
 604
 605        eb->read_mirror = mirror;
 606        if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
 607                ret = -EIO;
 608                goto err;
 609        }
 610
 611        found_start = btrfs_header_bytenr(eb);
 612        if (found_start != eb->start) {
 613                printk_ratelimited(KERN_INFO "btrfs bad tree block start "
 614                               "%llu %llu\n",
 615                               (unsigned long long)found_start,
 616                               (unsigned long long)eb->start);
 617                ret = -EIO;
 618                goto err;
 619        }
 620        if (check_tree_block_fsid(root, eb)) {
 621                printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
 622                               (unsigned long long)eb->start);
 623                ret = -EIO;
 624                goto err;
 625        }
 626        found_level = btrfs_header_level(eb);
 627        if (found_level >= BTRFS_MAX_LEVEL) {
 628                btrfs_info(root->fs_info, "bad tree block level %d\n",
 629                           (int)btrfs_header_level(eb));
 630                ret = -EIO;
 631                goto err;
 632        }
 633
 634        btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
 635                                       eb, found_level);
 636
 637        ret = csum_tree_block(root, eb, 1);
 638        if (ret) {
 639                ret = -EIO;
 640                goto err;
 641        }
 642
 643        /*
 644         * If this is a leaf block and it is corrupt, set the corrupt bit so
 645         * that we don't try and read the other copies of this block, just
 646         * return -EIO.
 647         */
 648        if (found_level == 0 && check_leaf(root, eb)) {
 649                set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 650                ret = -EIO;
 651        }
 652
 653        if (!ret)
 654                set_extent_buffer_uptodate(eb);
 655err:
 656        if (reads_done &&
 657            test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
 658                btree_readahead_hook(root, eb, eb->start, ret);
 659
 660        if (ret) {
 661                /*
 662                 * our io error hook is going to dec the io pages
 663                 * again, we have to make sure it has something
 664                 * to decrement
 665                 */
 666                atomic_inc(&eb->io_pages);
 667                clear_extent_buffer_uptodate(eb);
 668        }
 669        free_extent_buffer(eb);
 670out:
 671        return ret;
 672}
 673
 674static int btree_io_failed_hook(struct page *page, int failed_mirror)
 675{
 676        struct extent_buffer *eb;
 677        struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
 678
 679        eb = (struct extent_buffer *)page->private;
 680        set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
 681        eb->read_mirror = failed_mirror;
 682        atomic_dec(&eb->io_pages);
 683        if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
 684                btree_readahead_hook(root, eb, eb->start, -EIO);
 685        return -EIO;    /* we fixed nothing */
 686}
 687
 688static void end_workqueue_bio(struct bio *bio, int err)
 689{
 690        struct end_io_wq *end_io_wq = bio->bi_private;
 691        struct btrfs_fs_info *fs_info;
 692
 693        fs_info = end_io_wq->info;
 694        end_io_wq->error = err;
 695        end_io_wq->work.func = end_workqueue_fn;
 696        end_io_wq->work.flags = 0;
 697
 698        if (bio->bi_rw & REQ_WRITE) {
 699                if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
 700                        btrfs_queue_worker(&fs_info->endio_meta_write_workers,
 701                                           &end_io_wq->work);
 702                else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
 703                        btrfs_queue_worker(&fs_info->endio_freespace_worker,
 704                                           &end_io_wq->work);
 705                else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
 706                        btrfs_queue_worker(&fs_info->endio_raid56_workers,
 707                                           &end_io_wq->work);
 708                else
 709                        btrfs_queue_worker(&fs_info->endio_write_workers,
 710                                           &end_io_wq->work);
 711        } else {
 712                if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
 713                        btrfs_queue_worker(&fs_info->endio_raid56_workers,
 714                                           &end_io_wq->work);
 715                else if (end_io_wq->metadata)
 716                        btrfs_queue_worker(&fs_info->endio_meta_workers,
 717                                           &end_io_wq->work);
 718                else
 719                        btrfs_queue_worker(&fs_info->endio_workers,
 720                                           &end_io_wq->work);
 721        }
 722}
 723
 724/*
 725 * For the metadata arg you want
 726 *
 727 * 0 - if data
 728 * 1 - if normal metadta
 729 * 2 - if writing to the free space cache area
 730 * 3 - raid parity work
 731 */
 732int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 733                        int metadata)
 734{
 735        struct end_io_wq *end_io_wq;
 736        end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
 737        if (!end_io_wq)
 738                return -ENOMEM;
 739
 740        end_io_wq->private = bio->bi_private;
 741        end_io_wq->end_io = bio->bi_end_io;
 742        end_io_wq->info = info;
 743        end_io_wq->error = 0;
 744        end_io_wq->bio = bio;
 745        end_io_wq->metadata = metadata;
 746
 747        bio->bi_private = end_io_wq;
 748        bio->bi_end_io = end_workqueue_bio;
 749        return 0;
 750}
 751
 752unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
 753{
 754        unsigned long limit = min_t(unsigned long,
 755                                    info->workers.max_workers,
 756                                    info->fs_devices->open_devices);
 757        return 256 * limit;
 758}
 759
 760static void run_one_async_start(struct btrfs_work *work)
 761{
 762        struct async_submit_bio *async;
 763        int ret;
 764
 765        async = container_of(work, struct  async_submit_bio, work);
 766        ret = async->submit_bio_start(async->inode, async->rw, async->bio,
 767                                      async->mirror_num, async->bio_flags,
 768                                      async->bio_offset);
 769        if (ret)
 770                async->error = ret;
 771}
 772
 773static void run_one_async_done(struct btrfs_work *work)
 774{
 775        struct btrfs_fs_info *fs_info;
 776        struct async_submit_bio *async;
 777        int limit;
 778
 779        async = container_of(work, struct  async_submit_bio, work);
 780        fs_info = BTRFS_I(async->inode)->root->fs_info;
 781
 782        limit = btrfs_async_submit_limit(fs_info);
 783        limit = limit * 2 / 3;
 784
 785        if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
 786            waitqueue_active(&fs_info->async_submit_wait))
 787                wake_up(&fs_info->async_submit_wait);
 788
 789        /* If an error occured we just want to clean up the bio and move on */
 790        if (async->error) {
 791                bio_endio(async->bio, async->error);
 792                return;
 793        }
 794
 795        async->submit_bio_done(async->inode, async->rw, async->bio,
 796                               async->mirror_num, async->bio_flags,
 797                               async->bio_offset);
 798}
 799
 800static void run_one_async_free(struct btrfs_work *work)
 801{
 802        struct async_submit_bio *async;
 803
 804        async = container_of(work, struct  async_submit_bio, work);
 805        kfree(async);
 806}
 807
 808int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 809                        int rw, struct bio *bio, int mirror_num,
 810                        unsigned long bio_flags,
 811                        u64 bio_offset,
 812                        extent_submit_bio_hook_t *submit_bio_start,
 813                        extent_submit_bio_hook_t *submit_bio_done)
 814{
 815        struct async_submit_bio *async;
 816
 817        async = kmalloc(sizeof(*async), GFP_NOFS);
 818        if (!async)
 819                return -ENOMEM;
 820
 821        async->inode = inode;
 822        async->rw = rw;
 823        async->bio = bio;
 824        async->mirror_num = mirror_num;
 825        async->submit_bio_start = submit_bio_start;
 826        async->submit_bio_done = submit_bio_done;
 827
 828        async->work.func = run_one_async_start;
 829        async->work.ordered_func = run_one_async_done;
 830        async->work.ordered_free = run_one_async_free;
 831
 832        async->work.flags = 0;
 833        async->bio_flags = bio_flags;
 834        async->bio_offset = bio_offset;
 835
 836        async->error = 0;
 837
 838        atomic_inc(&fs_info->nr_async_submits);
 839
 840        if (rw & REQ_SYNC)
 841                btrfs_set_work_high_prio(&async->work);
 842
 843        btrfs_queue_worker(&fs_info->workers, &async->work);
 844
 845        while (atomic_read(&fs_info->async_submit_draining) &&
 846              atomic_read(&fs_info->nr_async_submits)) {
 847                wait_event(fs_info->async_submit_wait,
 848                           (atomic_read(&fs_info->nr_async_submits) == 0));
 849        }
 850
 851        return 0;
 852}
 853
 854static int btree_csum_one_bio(struct bio *bio)
 855{
 856        struct bio_vec *bvec = bio->bi_io_vec;
 857        int bio_index = 0;
 858        struct btrfs_root *root;
 859        int ret = 0;
 860
 861        WARN_ON(bio->bi_vcnt <= 0);
 862        while (bio_index < bio->bi_vcnt) {
 863                root = BTRFS_I(bvec->bv_page->mapping->host)->root;
 864                ret = csum_dirty_buffer(root, bvec->bv_page);
 865                if (ret)
 866                        break;
 867                bio_index++;
 868                bvec++;
 869        }
 870        return ret;
 871}
 872
 873static int __btree_submit_bio_start(struct inode *inode, int rw,
 874                                    struct bio *bio, int mirror_num,
 875                                    unsigned long bio_flags,
 876                                    u64 bio_offset)
 877{
 878        /*
 879         * when we're called for a write, we're already in the async
 880         * submission context.  Just jump into btrfs_map_bio
 881         */
 882        return btree_csum_one_bio(bio);
 883}
 884
 885static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
 886                                 int mirror_num, unsigned long bio_flags,
 887                                 u64 bio_offset)
 888{
 889        int ret;
 890
 891        /*
 892         * when we're called for a write, we're already in the async
 893         * submission context.  Just jump into btrfs_map_bio
 894         */
 895        ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
 896        if (ret)
 897                bio_endio(bio, ret);
 898        return ret;
 899}
 900
 901static int check_async_write(struct inode *inode, unsigned long bio_flags)
 902{
 903        if (bio_flags & EXTENT_BIO_TREE_LOG)
 904                return 0;
 905#ifdef CONFIG_X86
 906        if (cpu_has_xmm4_2)
 907                return 0;
 908#endif
 909        return 1;
 910}
 911
 912static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
 913                                 int mirror_num, unsigned long bio_flags,
 914                                 u64 bio_offset)
 915{
 916        int async = check_async_write(inode, bio_flags);
 917        int ret;
 918
 919        if (!(rw & REQ_WRITE)) {
 920                /*
 921                 * called for a read, do the setup so that checksum validation
 922                 * can happen in the async kernel threads
 923                 */
 924                ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
 925                                          bio, 1);
 926                if (ret)
 927                        goto out_w_error;
 928                ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
 929                                    mirror_num, 0);
 930        } else if (!async) {
 931                ret = btree_csum_one_bio(bio);
 932                if (ret)
 933                        goto out_w_error;
 934                ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
 935                                    mirror_num, 0);
 936        } else {
 937                /*
 938                 * kthread helpers are used to submit writes so that
 939                 * checksumming can happen in parallel across all CPUs
 940                 */
 941                ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
 942                                          inode, rw, bio, mirror_num, 0,
 943                                          bio_offset,
 944                                          __btree_submit_bio_start,
 945                                          __btree_submit_bio_done);
 946        }
 947
 948        if (ret) {
 949out_w_error:
 950                bio_endio(bio, ret);
 951        }
 952        return ret;
 953}
 954
 955#ifdef CONFIG_MIGRATION
 956static int btree_migratepage(struct address_space *mapping,
 957                        struct page *newpage, struct page *page,
 958                        enum migrate_mode mode)
 959{
 960        /*
 961         * we can't safely write a btree page from here,
 962         * we haven't done the locking hook
 963         */
 964        if (PageDirty(page))
 965                return -EAGAIN;
 966        /*
 967         * Buffers may be managed in a filesystem specific way.
 968         * We must have no buffers or drop them.
 969         */
 970        if (page_has_private(page) &&
 971            !try_to_release_page(page, GFP_KERNEL))
 972                return -EAGAIN;
 973        return migrate_page(mapping, newpage, page, mode);
 974}
 975#endif
 976
 977
 978static int btree_writepages(struct address_space *mapping,
 979                            struct writeback_control *wbc)
 980{
 981        struct extent_io_tree *tree;
 982        struct btrfs_fs_info *fs_info;
 983        int ret;
 984
 985        tree = &BTRFS_I(mapping->host)->io_tree;
 986        if (wbc->sync_mode == WB_SYNC_NONE) {
 987
 988                if (wbc->for_kupdate)
 989                        return 0;
 990
 991                fs_info = BTRFS_I(mapping->host)->root->fs_info;
 992                /* this is a bit racy, but that's ok */
 993                ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
 994                                             BTRFS_DIRTY_METADATA_THRESH);
 995                if (ret < 0)
 996                        return 0;
 997        }
 998        return btree_write_cache_pages(mapping, wbc);
 999}
1000
1001static int btree_readpage(struct file *file, struct page *page)
1002{
1003        struct extent_io_tree *tree;
1004        tree = &BTRFS_I(page->mapping->host)->io_tree;
1005        return extent_read_full_page(tree, page, btree_get_extent, 0);
1006}
1007
1008static int btree_releasepage(struct page *page, gfp_t gfp_flags)
1009{
1010        if (PageWriteback(page) || PageDirty(page))
1011                return 0;
1012
1013        return try_release_extent_buffer(page);
1014}
1015
1016static void btree_invalidatepage(struct page *page, unsigned int offset,
1017                                 unsigned int length)
1018{
1019        struct extent_io_tree *tree;
1020        tree = &BTRFS_I(page->mapping->host)->io_tree;
1021        extent_invalidatepage(tree, page, offset);
1022        btree_releasepage(page, GFP_NOFS);
1023        if (PagePrivate(page)) {
1024                printk(KERN_WARNING "btrfs warning page private not zero "
1025                       "on page %llu\n", (unsigned long long)page_offset(page));
1026                ClearPagePrivate(page);
1027                set_page_private(page, 0);
1028                page_cache_release(page);
1029        }
1030}
1031
1032static int btree_set_page_dirty(struct page *page)
1033{
1034#ifdef DEBUG
1035        struct extent_buffer *eb;
1036
1037        BUG_ON(!PagePrivate(page));
1038        eb = (struct extent_buffer *)page->private;
1039        BUG_ON(!eb);
1040        BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1041        BUG_ON(!atomic_read(&eb->refs));
1042        btrfs_assert_tree_locked(eb);
1043#endif
1044        return __set_page_dirty_nobuffers(page);
1045}
1046
1047static const struct address_space_operations btree_aops = {
1048        .readpage       = btree_readpage,
1049        .writepages     = btree_writepages,
1050        .releasepage    = btree_releasepage,
1051        .invalidatepage = btree_invalidatepage,
1052#ifdef CONFIG_MIGRATION
1053        .migratepage    = btree_migratepage,
1054#endif
1055        .set_page_dirty = btree_set_page_dirty,
1056};
1057
1058int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1059                         u64 parent_transid)
1060{
1061        struct extent_buffer *buf = NULL;
1062        struct inode *btree_inode = root->fs_info->btree_inode;
1063        int ret = 0;
1064
1065        buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1066        if (!buf)
1067                return 0;
1068        read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1069                                 buf, 0, WAIT_NONE, btree_get_extent, 0);
1070        free_extent_buffer(buf);
1071        return ret;
1072}
1073
1074int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1075                         int mirror_num, struct extent_buffer **eb)
1076{
1077        struct extent_buffer *buf = NULL;
1078        struct inode *btree_inode = root->fs_info->btree_inode;
1079        struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1080        int ret;
1081
1082        buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1083        if (!buf)
1084                return 0;
1085
1086        set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1087
1088        ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1089                                       btree_get_extent, mirror_num);
1090        if (ret) {
1091                free_extent_buffer(buf);
1092                return ret;
1093        }
1094
1095        if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1096                free_extent_buffer(buf);
1097                return -EIO;
1098        } else if (extent_buffer_uptodate(buf)) {
1099                *eb = buf;
1100        } else {
1101                free_extent_buffer(buf);
1102        }
1103        return 0;
1104}
1105
1106struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1107                                            u64 bytenr, u32 blocksize)
1108{
1109        struct inode *btree_inode = root->fs_info->btree_inode;
1110        struct extent_buffer *eb;
1111        eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1112                                bytenr, blocksize);
1113        return eb;
1114}
1115
1116struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1117                                                 u64 bytenr, u32 blocksize)
1118{
1119        struct inode *btree_inode = root->fs_info->btree_inode;
1120        struct extent_buffer *eb;
1121
1122        eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1123                                 bytenr, blocksize);
1124        return eb;
1125}
1126
1127
1128int btrfs_write_tree_block(struct extent_buffer *buf)
1129{
1130        return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1131                                        buf->start + buf->len - 1);
1132}
1133
1134int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1135{
1136        return filemap_fdatawait_range(buf->pages[0]->mapping,
1137                                       buf->start, buf->start + buf->len - 1);
1138}
1139
1140struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1141                                      u32 blocksize, u64 parent_transid)
1142{
1143        struct extent_buffer *buf = NULL;
1144        int ret;
1145
1146        buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1147        if (!buf)
1148                return NULL;
1149
1150        ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1151        return buf;
1152
1153}
1154
1155void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1156                      struct extent_buffer *buf)
1157{
1158        struct btrfs_fs_info *fs_info = root->fs_info;
1159
1160        if (btrfs_header_generation(buf) ==
1161            fs_info->running_transaction->transid) {
1162                btrfs_assert_tree_locked(buf);
1163
1164                if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1165                        __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1166                                             -buf->len,
1167                                             fs_info->dirty_metadata_batch);
1168                        /* ugh, clear_extent_buffer_dirty needs to lock the page */
1169                        btrfs_set_lock_blocking(buf);
1170                        clear_extent_buffer_dirty(buf);
1171                }
1172        }
1173}
1174
1175static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1176                         u32 stripesize, struct btrfs_root *root,
1177                         struct btrfs_fs_info *fs_info,
1178                         u64 objectid)
1179{
1180        root->node = NULL;
1181        root->commit_root = NULL;
1182        root->sectorsize = sectorsize;
1183        root->nodesize = nodesize;
1184        root->leafsize = leafsize;
1185        root->stripesize = stripesize;
1186        root->ref_cows = 0;
1187        root->track_dirty = 0;
1188        root->in_radix = 0;
1189        root->orphan_item_inserted = 0;
1190        root->orphan_cleanup_state = 0;
1191
1192        root->objectid = objectid;
1193        root->last_trans = 0;
1194        root->highest_objectid = 0;
1195        root->nr_delalloc_inodes = 0;
1196        root->nr_ordered_extents = 0;
1197        root->name = NULL;
1198        root->inode_tree = RB_ROOT;
1199        INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1200        root->block_rsv = NULL;
1201        root->orphan_block_rsv = NULL;
1202
1203        INIT_LIST_HEAD(&root->dirty_list);
1204        INIT_LIST_HEAD(&root->root_list);
1205        INIT_LIST_HEAD(&root->delalloc_inodes);
1206        INIT_LIST_HEAD(&root->delalloc_root);
1207        INIT_LIST_HEAD(&root->ordered_extents);
1208        INIT_LIST_HEAD(&root->ordered_root);
1209        INIT_LIST_HEAD(&root->logged_list[0]);
1210        INIT_LIST_HEAD(&root->logged_list[1]);
1211        spin_lock_init(&root->orphan_lock);
1212        spin_lock_init(&root->inode_lock);
1213        spin_lock_init(&root->delalloc_lock);
1214        spin_lock_init(&root->ordered_extent_lock);
1215        spin_lock_init(&root->accounting_lock);
1216        spin_lock_init(&root->log_extents_lock[0]);
1217        spin_lock_init(&root->log_extents_lock[1]);
1218        mutex_init(&root->objectid_mutex);
1219        mutex_init(&root->log_mutex);
1220        init_waitqueue_head(&root->log_writer_wait);
1221        init_waitqueue_head(&root->log_commit_wait[0]);
1222        init_waitqueue_head(&root->log_commit_wait[1]);
1223        atomic_set(&root->log_commit[0], 0);
1224        atomic_set(&root->log_commit[1], 0);
1225        atomic_set(&root->log_writers, 0);
1226        atomic_set(&root->log_batch, 0);
1227        atomic_set(&root->orphan_inodes, 0);
1228        atomic_set(&root->refs, 1);
1229        root->log_transid = 0;
1230        root->last_log_commit = 0;
1231        extent_io_tree_init(&root->dirty_log_pages,
1232                             fs_info->btree_inode->i_mapping);
1233
1234        memset(&root->root_key, 0, sizeof(root->root_key));
1235        memset(&root->root_item, 0, sizeof(root->root_item));
1236        memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1237        memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1238        root->defrag_trans_start = fs_info->generation;
1239        init_completion(&root->kobj_unregister);
1240        root->defrag_running = 0;
1241        root->root_key.objectid = objectid;
1242        root->anon_dev = 0;
1243
1244        spin_lock_init(&root->root_item_lock);
1245}
1246
1247static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1248{
1249        struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1250        if (root)
1251                root->fs_info = fs_info;
1252        return root;
1253}
1254
1255struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1256                                     struct btrfs_fs_info *fs_info,
1257                                     u64 objectid)
1258{
1259        struct extent_buffer *leaf;
1260        struct btrfs_root *tree_root = fs_info->tree_root;
1261        struct btrfs_root *root;
1262        struct btrfs_key key;
1263        int ret = 0;
1264        u64 bytenr;
1265        uuid_le uuid;
1266
1267        root = btrfs_alloc_root(fs_info);
1268        if (!root)
1269                return ERR_PTR(-ENOMEM);
1270
1271        __setup_root(tree_root->nodesize, tree_root->leafsize,
1272                     tree_root->sectorsize, tree_root->stripesize,
1273                     root, fs_info, objectid);
1274        root->root_key.objectid = objectid;
1275        root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1276        root->root_key.offset = 0;
1277
1278        leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
1279                                      0, objectid, NULL, 0, 0, 0);
1280        if (IS_ERR(leaf)) {
1281                ret = PTR_ERR(leaf);
1282                leaf = NULL;
1283                goto fail;
1284        }
1285
1286        bytenr = leaf->start;
1287        memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1288        btrfs_set_header_bytenr(leaf, leaf->start);
1289        btrfs_set_header_generation(leaf, trans->transid);
1290        btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1291        btrfs_set_header_owner(leaf, objectid);
1292        root->node = leaf;
1293
1294        write_extent_buffer(leaf, fs_info->fsid,
1295                            (unsigned long)btrfs_header_fsid(leaf),
1296                            BTRFS_FSID_SIZE);
1297        write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
1298                            (unsigned long)btrfs_header_chunk_tree_uuid(leaf),
1299                            BTRFS_UUID_SIZE);
1300        btrfs_mark_buffer_dirty(leaf);
1301
1302        root->commit_root = btrfs_root_node(root);
1303        root->track_dirty = 1;
1304
1305
1306        root->root_item.flags = 0;
1307        root->root_item.byte_limit = 0;
1308        btrfs_set_root_bytenr(&root->root_item, leaf->start);
1309        btrfs_set_root_generation(&root->root_item, trans->transid);
1310        btrfs_set_root_level(&root->root_item, 0);
1311        btrfs_set_root_refs(&root->root_item, 1);
1312        btrfs_set_root_used(&root->root_item, leaf->len);
1313        btrfs_set_root_last_snapshot(&root->root_item, 0);
1314        btrfs_set_root_dirid(&root->root_item, 0);
1315        uuid_le_gen(&uuid);
1316        memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1317        root->root_item.drop_level = 0;
1318
1319        key.objectid = objectid;
1320        key.type = BTRFS_ROOT_ITEM_KEY;
1321        key.offset = 0;
1322        ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1323        if (ret)
1324                goto fail;
1325
1326        btrfs_tree_unlock(leaf);
1327
1328        return root;
1329
1330fail:
1331        if (leaf) {
1332                btrfs_tree_unlock(leaf);
1333                free_extent_buffer(leaf);
1334        }
1335        kfree(root);
1336
1337        return ERR_PTR(ret);
1338}
1339
1340static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1341                                         struct btrfs_fs_info *fs_info)
1342{
1343        struct btrfs_root *root;
1344        struct btrfs_root *tree_root = fs_info->tree_root;
1345        struct extent_buffer *leaf;
1346
1347        root = btrfs_alloc_root(fs_info);
1348        if (!root)
1349                return ERR_PTR(-ENOMEM);
1350
1351        __setup_root(tree_root->nodesize, tree_root->leafsize,
1352                     tree_root->sectorsize, tree_root->stripesize,
1353                     root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1354
1355        root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1356        root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1357        root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1358        /*
1359         * log trees do not get reference counted because they go away
1360         * before a real commit is actually done.  They do store pointers
1361         * to file data extents, and those reference counts still get
1362         * updated (along with back refs to the log tree).
1363         */
1364        root->ref_cows = 0;
1365
1366        leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1367                                      BTRFS_TREE_LOG_OBJECTID, NULL,
1368                                      0, 0, 0);
1369        if (IS_ERR(leaf)) {
1370                kfree(root);
1371                return ERR_CAST(leaf);
1372        }
1373
1374        memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1375        btrfs_set_header_bytenr(leaf, leaf->start);
1376        btrfs_set_header_generation(leaf, trans->transid);
1377        btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1378        btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1379        root->node = leaf;
1380
1381        write_extent_buffer(root->node, root->fs_info->fsid,
1382                            (unsigned long)btrfs_header_fsid(root->node),
1383                            BTRFS_FSID_SIZE);
1384        btrfs_mark_buffer_dirty(root->node);
1385        btrfs_tree_unlock(root->node);
1386        return root;
1387}
1388
1389int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1390                             struct btrfs_fs_info *fs_info)
1391{
1392        struct btrfs_root *log_root;
1393
1394        log_root = alloc_log_tree(trans, fs_info);
1395        if (IS_ERR(log_root))
1396                return PTR_ERR(log_root);
1397        WARN_ON(fs_info->log_root_tree);
1398        fs_info->log_root_tree = log_root;
1399        return 0;
1400}
1401
1402int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1403                       struct btrfs_root *root)
1404{
1405        struct btrfs_root *log_root;
1406        struct btrfs_inode_item *inode_item;
1407
1408        log_root = alloc_log_tree(trans, root->fs_info);
1409        if (IS_ERR(log_root))
1410                return PTR_ERR(log_root);
1411
1412        log_root->last_trans = trans->transid;
1413        log_root->root_key.offset = root->root_key.objectid;
1414
1415        inode_item = &log_root->root_item.inode;
1416        inode_item->generation = cpu_to_le64(1);
1417        inode_item->size = cpu_to_le64(3);
1418        inode_item->nlink = cpu_to_le32(1);
1419        inode_item->nbytes = cpu_to_le64(root->leafsize);
1420        inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1421
1422        btrfs_set_root_node(&log_root->root_item, log_root->node);
1423
1424        WARN_ON(root->log_root);
1425        root->log_root = log_root;
1426        root->log_transid = 0;
1427        root->last_log_commit = 0;
1428        return 0;
1429}
1430
1431struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1432                                        struct btrfs_key *key)
1433{
1434        struct btrfs_root *root;
1435        struct btrfs_fs_info *fs_info = tree_root->fs_info;
1436        struct btrfs_path *path;
1437        u64 generation;
1438        u32 blocksize;
1439        int ret;
1440
1441        path = btrfs_alloc_path();
1442        if (!path)
1443                return ERR_PTR(-ENOMEM);
1444
1445        root = btrfs_alloc_root(fs_info);
1446        if (!root) {
1447                ret = -ENOMEM;
1448                goto alloc_fail;
1449        }
1450
1451        __setup_root(tree_root->nodesize, tree_root->leafsize,
1452                     tree_root->sectorsize, tree_root->stripesize,
1453                     root, fs_info, key->objectid);
1454
1455        ret = btrfs_find_root(tree_root, key, path,
1456                              &root->root_item, &root->root_key);
1457        if (ret) {
1458                if (ret > 0)
1459                        ret = -ENOENT;
1460                goto find_fail;
1461        }
1462
1463        generation = btrfs_root_generation(&root->root_item);
1464        blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1465        root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1466                                     blocksize, generation);
1467        if (!root->node) {
1468                ret = -ENOMEM;
1469                goto find_fail;
1470        } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1471                ret = -EIO;
1472                goto read_fail;
1473        }
1474        root->commit_root = btrfs_root_node(root);
1475out:
1476        btrfs_free_path(path);
1477        return root;
1478
1479read_fail:
1480        free_extent_buffer(root->node);
1481find_fail:
1482        kfree(root);
1483alloc_fail:
1484        root = ERR_PTR(ret);
1485        goto out;
1486}
1487
1488struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1489                                      struct btrfs_key *location)
1490{
1491        struct btrfs_root *root;
1492
1493        root = btrfs_read_tree_root(tree_root, location);
1494        if (IS_ERR(root))
1495                return root;
1496
1497        if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1498                root->ref_cows = 1;
1499                btrfs_check_and_init_root_item(&root->root_item);
1500        }
1501
1502        return root;
1503}
1504
1505int btrfs_init_fs_root(struct btrfs_root *root)
1506{
1507        int ret;
1508
1509        root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1510        root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1511                                        GFP_NOFS);
1512        if (!root->free_ino_pinned || !root->free_ino_ctl) {
1513                ret = -ENOMEM;
1514                goto fail;
1515        }
1516
1517        btrfs_init_free_ino_ctl(root);
1518        mutex_init(&root->fs_commit_mutex);
1519        spin_lock_init(&root->cache_lock);
1520        init_waitqueue_head(&root->cache_wait);
1521
1522        ret = get_anon_bdev(&root->anon_dev);
1523        if (ret)
1524                goto fail;
1525        return 0;
1526fail:
1527        kfree(root->free_ino_ctl);
1528        kfree(root->free_ino_pinned);
1529        return ret;
1530}
1531
1532struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1533                                        u64 root_id)
1534{
1535        struct btrfs_root *root;
1536
1537        spin_lock(&fs_info->fs_roots_radix_lock);
1538        root = radix_tree_lookup(&fs_info->fs_roots_radix,
1539                                 (unsigned long)root_id);
1540        spin_unlock(&fs_info->fs_roots_radix_lock);
1541        return root;
1542}
1543
1544int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1545                         struct btrfs_root *root)
1546{
1547        int ret;
1548
1549        ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1550        if (ret)
1551                return ret;
1552
1553        spin_lock(&fs_info->fs_roots_radix_lock);
1554        ret = radix_tree_insert(&fs_info->fs_roots_radix,
1555                                (unsigned long)root->root_key.objectid,
1556                                root);
1557        if (ret == 0)
1558                root->in_radix = 1;
1559        spin_unlock(&fs_info->fs_roots_radix_lock);
1560        radix_tree_preload_end();
1561
1562        return ret;
1563}
1564
1565struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1566                                              struct btrfs_key *location)
1567{
1568        struct btrfs_root *root;
1569        int ret;
1570
1571        if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1572                return fs_info->tree_root;
1573        if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1574                return fs_info->extent_root;
1575        if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1576                return fs_info->chunk_root;
1577        if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1578                return fs_info->dev_root;
1579        if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1580                return fs_info->csum_root;
1581        if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1582                return fs_info->quota_root ? fs_info->quota_root :
1583                                             ERR_PTR(-ENOENT);
1584again:
1585        root = btrfs_lookup_fs_root(fs_info, location->objectid);
1586        if (root)
1587                return root;
1588
1589        root = btrfs_read_fs_root(fs_info->tree_root, location);
1590        if (IS_ERR(root))
1591                return root;
1592
1593        if (btrfs_root_refs(&root->root_item) == 0) {
1594                ret = -ENOENT;
1595                goto fail;
1596        }
1597
1598        ret = btrfs_init_fs_root(root);
1599        if (ret)
1600                goto fail;
1601
1602        ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1603        if (ret < 0)
1604                goto fail;
1605        if (ret == 0)
1606                root->orphan_item_inserted = 1;
1607
1608        ret = btrfs_insert_fs_root(fs_info, root);
1609        if (ret) {
1610                if (ret == -EEXIST) {
1611                        free_fs_root(root);
1612                        goto again;
1613                }
1614                goto fail;
1615        }
1616        return root;
1617fail:
1618        free_fs_root(root);
1619        return ERR_PTR(ret);
1620}
1621
1622static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1623{
1624        struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1625        int ret = 0;
1626        struct btrfs_device *device;
1627        struct backing_dev_info *bdi;
1628
1629        rcu_read_lock();
1630        list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1631                if (!device->bdev)
1632                        continue;
1633                bdi = blk_get_backing_dev_info(device->bdev);
1634                if (bdi && bdi_congested(bdi, bdi_bits)) {
1635                        ret = 1;
1636                        break;
1637                }
1638        }
1639        rcu_read_unlock();
1640        return ret;
1641}
1642
1643/*
1644 * If this fails, caller must call bdi_destroy() to get rid of the
1645 * bdi again.
1646 */
1647static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1648{
1649        int err;
1650
1651        bdi->capabilities = BDI_CAP_MAP_COPY;
1652        err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1653        if (err)
1654                return err;
1655
1656        bdi->ra_pages   = default_backing_dev_info.ra_pages;
1657        bdi->congested_fn       = btrfs_congested_fn;
1658        bdi->congested_data     = info;
1659        return 0;
1660}
1661
1662/*
1663 * called by the kthread helper functions to finally call the bio end_io
1664 * functions.  This is where read checksum verification actually happens
1665 */
1666static void end_workqueue_fn(struct btrfs_work *work)
1667{
1668        struct bio *bio;
1669        struct end_io_wq *end_io_wq;
1670        struct btrfs_fs_info *fs_info;
1671        int error;
1672
1673        end_io_wq = container_of(work, struct end_io_wq, work);
1674        bio = end_io_wq->bio;
1675        fs_info = end_io_wq->info;
1676
1677        error = end_io_wq->error;
1678        bio->bi_private = end_io_wq->private;
1679        bio->bi_end_io = end_io_wq->end_io;
1680        kfree(end_io_wq);
1681        bio_endio(bio, error);
1682}
1683
1684static int cleaner_kthread(void *arg)
1685{
1686        struct btrfs_root *root = arg;
1687        int again;
1688
1689        do {
1690                again = 0;
1691
1692                /* Make the cleaner go to sleep early. */
1693                if (btrfs_need_cleaner_sleep(root))
1694                        goto sleep;
1695
1696                if (!mutex_trylock(&root->fs_info->cleaner_mutex))
1697                        goto sleep;
1698
1699                /*
1700                 * Avoid the problem that we change the status of the fs
1701                 * during the above check and trylock.
1702                 */
1703                if (btrfs_need_cleaner_sleep(root)) {
1704                        mutex_unlock(&root->fs_info->cleaner_mutex);
1705                        goto sleep;
1706                }
1707
1708                btrfs_run_delayed_iputs(root);
1709                again = btrfs_clean_one_deleted_snapshot(root);
1710                mutex_unlock(&root->fs_info->cleaner_mutex);
1711
1712                /*
1713                 * The defragger has dealt with the R/O remount and umount,
1714                 * needn't do anything special here.
1715                 */
1716                btrfs_run_defrag_inodes(root->fs_info);
1717sleep:
1718                if (!try_to_freeze() && !again) {
1719                        set_current_state(TASK_INTERRUPTIBLE);
1720                        if (!kthread_should_stop())
1721                                schedule();
1722                        __set_current_state(TASK_RUNNING);
1723                }
1724        } while (!kthread_should_stop());
1725        return 0;
1726}
1727
1728static int transaction_kthread(void *arg)
1729{
1730        struct btrfs_root *root = arg;
1731        struct btrfs_trans_handle *trans;
1732        struct btrfs_transaction *cur;
1733        u64 transid;
1734        unsigned long now;
1735        unsigned long delay;
1736        bool cannot_commit;
1737
1738        do {
1739                cannot_commit = false;
1740                delay = HZ * 30;
1741                mutex_lock(&root->fs_info->transaction_kthread_mutex);
1742
1743                spin_lock(&root->fs_info->trans_lock);
1744                cur = root->fs_info->running_transaction;
1745                if (!cur) {
1746                        spin_unlock(&root->fs_info->trans_lock);
1747                        goto sleep;
1748                }
1749
1750                now = get_seconds();
1751                if (cur->state < TRANS_STATE_BLOCKED &&
1752                    (now < cur->start_time || now - cur->start_time < 30)) {
1753                        spin_unlock(&root->fs_info->trans_lock);
1754                        delay = HZ * 5;
1755                        goto sleep;
1756                }
1757                transid = cur->transid;
1758                spin_unlock(&root->fs_info->trans_lock);
1759
1760                /* If the file system is aborted, this will always fail. */
1761                trans = btrfs_attach_transaction(root);
1762                if (IS_ERR(trans)) {
1763                        if (PTR_ERR(trans) != -ENOENT)
1764                                cannot_commit = true;
1765                        goto sleep;
1766                }
1767                if (transid == trans->transid) {
1768                        btrfs_commit_transaction(trans, root);
1769                } else {
1770                        btrfs_end_transaction(trans, root);
1771                }
1772sleep:
1773                wake_up_process(root->fs_info->cleaner_kthread);
1774                mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1775
1776                if (!try_to_freeze()) {
1777                        set_current_state(TASK_INTERRUPTIBLE);
1778                        if (!kthread_should_stop() &&
1779                            (!btrfs_transaction_blocked(root->fs_info) ||
1780                             cannot_commit))
1781                                schedule_timeout(delay);
1782                        __set_current_state(TASK_RUNNING);
1783                }
1784        } while (!kthread_should_stop());
1785        return 0;
1786}
1787
1788/*
1789 * this will find the highest generation in the array of
1790 * root backups.  The index of the highest array is returned,
1791 * or -1 if we can't find anything.
1792 *
1793 * We check to make sure the array is valid by comparing the
1794 * generation of the latest  root in the array with the generation
1795 * in the super block.  If they don't match we pitch it.
1796 */
1797static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1798{
1799        u64 cur;
1800        int newest_index = -1;
1801        struct btrfs_root_backup *root_backup;
1802        int i;
1803
1804        for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1805                root_backup = info->super_copy->super_roots + i;
1806                cur = btrfs_backup_tree_root_gen(root_backup);
1807                if (cur == newest_gen)
1808                        newest_index = i;
1809        }
1810
1811        /* check to see if we actually wrapped around */
1812        if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1813                root_backup = info->super_copy->super_roots;
1814                cur = btrfs_backup_tree_root_gen(root_backup);
1815                if (cur == newest_gen)
1816                        newest_index = 0;
1817        }
1818        return newest_index;
1819}
1820
1821
1822/*
1823 * find the oldest backup so we know where to store new entries
1824 * in the backup array.  This will set the backup_root_index
1825 * field in the fs_info struct
1826 */
1827static void find_oldest_super_backup(struct btrfs_fs_info *info,
1828                                     u64 newest_gen)
1829{
1830        int newest_index = -1;
1831
1832        newest_index = find_newest_super_backup(info, newest_gen);
1833        /* if there was garbage in there, just move along */
1834        if (newest_index == -1) {
1835                info->backup_root_index = 0;
1836        } else {
1837                info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1838        }
1839}
1840
1841/*
1842 * copy all the root pointers into the super backup array.
1843 * this will bump the backup pointer by one when it is
1844 * done
1845 */
1846static void backup_super_roots(struct btrfs_fs_info *info)
1847{
1848        int next_backup;
1849        struct btrfs_root_backup *root_backup;
1850        int last_backup;
1851
1852        next_backup = info->backup_root_index;
1853        last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1854                BTRFS_NUM_BACKUP_ROOTS;
1855
1856        /*
1857         * just overwrite the last backup if we're at the same generation
1858         * this happens only at umount
1859         */
1860        root_backup = info->super_for_commit->super_roots + last_backup;
1861        if (btrfs_backup_tree_root_gen(root_backup) ==
1862            btrfs_header_generation(info->tree_root->node))
1863                next_backup = last_backup;
1864
1865        root_backup = info->super_for_commit->super_roots + next_backup;
1866
1867        /*
1868         * make sure all of our padding and empty slots get zero filled
1869         * regardless of which ones we use today
1870         */
1871        memset(root_backup, 0, sizeof(*root_backup));
1872
1873        info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1874
1875        btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1876        btrfs_set_backup_tree_root_gen(root_backup,
1877                               btrfs_header_generation(info->tree_root->node));
1878
1879        btrfs_set_backup_tree_root_level(root_backup,
1880                               btrfs_header_level(info->tree_root->node));
1881
1882        btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1883        btrfs_set_backup_chunk_root_gen(root_backup,
1884                               btrfs_header_generation(info->chunk_root->node));
1885        btrfs_set_backup_chunk_root_level(root_backup,
1886                               btrfs_header_level(info->chunk_root->node));
1887
1888        btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1889        btrfs_set_backup_extent_root_gen(root_backup,
1890                               btrfs_header_generation(info->extent_root->node));
1891        btrfs_set_backup_extent_root_level(root_backup,
1892                               btrfs_header_level(info->extent_root->node));
1893
1894        /*
1895         * we might commit during log recovery, which happens before we set
1896         * the fs_root.  Make sure it is valid before we fill it in.
1897         */
1898        if (info->fs_root && info->fs_root->node) {
1899                btrfs_set_backup_fs_root(root_backup,
1900                                         info->fs_root->node->start);
1901                btrfs_set_backup_fs_root_gen(root_backup,
1902                               btrfs_header_generation(info->fs_root->node));
1903                btrfs_set_backup_fs_root_level(root_backup,
1904                               btrfs_header_level(info->fs_root->node));
1905        }
1906
1907        btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1908        btrfs_set_backup_dev_root_gen(root_backup,
1909                               btrfs_header_generation(info->dev_root->node));
1910        btrfs_set_backup_dev_root_level(root_backup,
1911                                       btrfs_header_level(info->dev_root->node));
1912
1913        btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1914        btrfs_set_backup_csum_root_gen(root_backup,
1915                               btrfs_header_generation(info->csum_root->node));
1916        btrfs_set_backup_csum_root_level(root_backup,
1917                               btrfs_header_level(info->csum_root->node));
1918
1919        btrfs_set_backup_total_bytes(root_backup,
1920                             btrfs_super_total_bytes(info->super_copy));
1921        btrfs_set_backup_bytes_used(root_backup,
1922                             btrfs_super_bytes_used(info->super_copy));
1923        btrfs_set_backup_num_devices(root_backup,
1924                             btrfs_super_num_devices(info->super_copy));
1925
1926        /*
1927         * if we don't copy this out to the super_copy, it won't get remembered
1928         * for the next commit
1929         */
1930        memcpy(&info->super_copy->super_roots,
1931               &info->super_for_commit->super_roots,
1932               sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1933}
1934
1935/*
1936 * this copies info out of the root backup array and back into
1937 * the in-memory super block.  It is meant to help iterate through
1938 * the array, so you send it the number of backups you've already
1939 * tried and the last backup index you used.
1940 *
1941 * this returns -1 when it has tried all the backups
1942 */
1943static noinline int next_root_backup(struct btrfs_fs_info *info,
1944                                     struct btrfs_super_block *super,
1945                                     int *num_backups_tried, int *backup_index)
1946{
1947        struct btrfs_root_backup *root_backup;
1948        int newest = *backup_index;
1949
1950        if (*num_backups_tried == 0) {
1951                u64 gen = btrfs_super_generation(super);
1952
1953                newest = find_newest_super_backup(info, gen);
1954                if (newest == -1)
1955                        return -1;
1956
1957                *backup_index = newest;
1958                *num_backups_tried = 1;
1959        } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1960                /* we've tried all the backups, all done */
1961                return -1;
1962        } else {
1963                /* jump to the next oldest backup */
1964                newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1965                        BTRFS_NUM_BACKUP_ROOTS;
1966                *backup_index = newest;
1967                *num_backups_tried += 1;
1968        }
1969        root_backup = super->super_roots + newest;
1970
1971        btrfs_set_super_generation(super,
1972                                   btrfs_backup_tree_root_gen(root_backup));
1973        btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1974        btrfs_set_super_root_level(super,
1975                                   btrfs_backup_tree_root_level(root_backup));
1976        btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1977
1978        /*
1979         * fixme: the total bytes and num_devices need to match or we should
1980         * need a fsck
1981         */
1982        btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1983        btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1984        return 0;
1985}
1986
1987/* helper to cleanup workers */
1988static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1989{
1990        btrfs_stop_workers(&fs_info->generic_worker);
1991        btrfs_stop_workers(&fs_info->fixup_workers);
1992        btrfs_stop_workers(&fs_info->delalloc_workers);
1993        btrfs_stop_workers(&fs_info->workers);
1994        btrfs_stop_workers(&fs_info->endio_workers);
1995        btrfs_stop_workers(&fs_info->endio_meta_workers);
1996        btrfs_stop_workers(&fs_info->endio_raid56_workers);
1997        btrfs_stop_workers(&fs_info->rmw_workers);
1998        btrfs_stop_workers(&fs_info->endio_meta_write_workers);
1999        btrfs_stop_workers(&fs_info->endio_write_workers);
2000        btrfs_stop_workers(&fs_info->endio_freespace_worker);
2001        btrfs_stop_workers(&fs_info->submit_workers);
2002        btrfs_stop_workers(&fs_info->delayed_workers);
2003        btrfs_stop_workers(&fs_info->caching_workers);
2004        btrfs_stop_workers(&fs_info->readahead_workers);
2005        btrfs_stop_workers(&fs_info->flush_workers);
2006        btrfs_stop_workers(&fs_info->qgroup_rescan_workers);
2007}
2008
2009/* helper to cleanup tree roots */
2010static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2011{
2012        free_extent_buffer(info->tree_root->node);
2013        free_extent_buffer(info->tree_root->commit_root);
2014        info->tree_root->node = NULL;
2015        info->tree_root->commit_root = NULL;
2016
2017        if (info->dev_root) {
2018                free_extent_buffer(info->dev_root->node);
2019                free_extent_buffer(info->dev_root->commit_root);
2020                info->dev_root->node = NULL;
2021                info->dev_root->commit_root = NULL;
2022        }
2023        if (info->extent_root) {
2024                free_extent_buffer(info->extent_root->node);
2025                free_extent_buffer(info->extent_root->commit_root);
2026                info->extent_root->node = NULL;
2027                info->extent_root->commit_root = NULL;
2028        }
2029        if (info->csum_root) {
2030                free_extent_buffer(info->csum_root->node);
2031                free_extent_buffer(info->csum_root->commit_root);
2032                info->csum_root->node = NULL;
2033                info->csum_root->commit_root = NULL;
2034        }
2035        if (info->quota_root) {
2036                free_extent_buffer(info->quota_root->node);
2037                free_extent_buffer(info->quota_root->commit_root);
2038                info->quota_root->node = NULL;
2039                info->quota_root->commit_root = NULL;
2040        }
2041        if (chunk_root) {
2042                free_extent_buffer(info->chunk_root->node);
2043                free_extent_buffer(info->chunk_root->commit_root);
2044                info->chunk_root->node = NULL;
2045                info->chunk_root->commit_root = NULL;
2046        }
2047}
2048
2049static void del_fs_roots(struct btrfs_fs_info *fs_info)
2050{
2051        int ret;
2052        struct btrfs_root *gang[8];
2053        int i;
2054
2055        while (!list_empty(&fs_info->dead_roots)) {
2056                gang[0] = list_entry(fs_info->dead_roots.next,
2057                                     struct btrfs_root, root_list);
2058                list_del(&gang[0]->root_list);
2059
2060                if (gang[0]->in_radix) {
2061                        btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2062                } else {
2063                        free_extent_buffer(gang[0]->node);
2064                        free_extent_buffer(gang[0]->commit_root);
2065                        btrfs_put_fs_root(gang[0]);
2066                }
2067        }
2068
2069        while (1) {
2070                ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2071                                             (void **)gang, 0,
2072                                             ARRAY_SIZE(gang));
2073                if (!ret)
2074                        break;
2075                for (i = 0; i < ret; i++)
2076                        btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2077        }
2078}
2079
2080int open_ctree(struct super_block *sb,
2081               struct btrfs_fs_devices *fs_devices,
2082               char *options)
2083{
2084        u32 sectorsize;
2085        u32 nodesize;
2086        u32 leafsize;
2087        u32 blocksize;
2088        u32 stripesize;
2089        u64 generation;
2090        u64 features;
2091        struct btrfs_key location;
2092        struct buffer_head *bh;
2093        struct btrfs_super_block *disk_super;
2094        struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2095        struct btrfs_root *tree_root;
2096        struct btrfs_root *extent_root;
2097        struct btrfs_root *csum_root;
2098        struct btrfs_root *chunk_root;
2099        struct btrfs_root *dev_root;
2100        struct btrfs_root *quota_root;
2101        struct btrfs_root *log_tree_root;
2102        int ret;
2103        int err = -EINVAL;
2104        int num_backups_tried = 0;
2105        int backup_index = 0;
2106
2107        tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
2108        chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
2109        if (!tree_root || !chunk_root) {
2110                err = -ENOMEM;
2111                goto fail;
2112        }
2113
2114        ret = init_srcu_struct(&fs_info->subvol_srcu);
2115        if (ret) {
2116                err = ret;
2117                goto fail;
2118        }
2119
2120        ret = setup_bdi(fs_info, &fs_info->bdi);
2121        if (ret) {
2122                err = ret;
2123                goto fail_srcu;
2124        }
2125
2126        ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
2127        if (ret) {
2128                err = ret;
2129                goto fail_bdi;
2130        }
2131        fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
2132                                        (1 + ilog2(nr_cpu_ids));
2133
2134        ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
2135        if (ret) {
2136                err = ret;
2137                goto fail_dirty_metadata_bytes;
2138        }
2139
2140        fs_info->btree_inode = new_inode(sb);
2141        if (!fs_info->btree_inode) {
2142                err = -ENOMEM;
2143                goto fail_delalloc_bytes;
2144        }
2145
2146        mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2147
2148        INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2149        INIT_LIST_HEAD(&fs_info->trans_list);
2150        INIT_LIST_HEAD(&fs_info->dead_roots);
2151        INIT_LIST_HEAD(&fs_info->delayed_iputs);
2152        INIT_LIST_HEAD(&fs_info->delalloc_roots);
2153        INIT_LIST_HEAD(&fs_info->caching_block_groups);
2154        spin_lock_init(&fs_info->delalloc_root_lock);
2155        spin_lock_init(&fs_info->trans_lock);
2156        spin_lock_init(&fs_info->fs_roots_radix_lock);
2157        spin_lock_init(&fs_info->delayed_iput_lock);
2158        spin_lock_init(&fs_info->defrag_inodes_lock);
2159        spin_lock_init(&fs_info->free_chunk_lock);
2160        spin_lock_init(&fs_info->tree_mod_seq_lock);
2161        spin_lock_init(&fs_info->super_lock);
2162        rwlock_init(&fs_info->tree_mod_log_lock);
2163        mutex_init(&fs_info->reloc_mutex);
2164        seqlock_init(&fs_info->profiles_lock);
2165
2166        init_completion(&fs_info->kobj_unregister);
2167        INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2168        INIT_LIST_HEAD(&fs_info->space_info);
2169        INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2170        btrfs_mapping_init(&fs_info->mapping_tree);
2171        btrfs_init_block_rsv(&fs_info->global_block_rsv,
2172                             BTRFS_BLOCK_RSV_GLOBAL);
2173        btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2174                             BTRFS_BLOCK_RSV_DELALLOC);
2175        btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2176        btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2177        btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2178        btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2179                             BTRFS_BLOCK_RSV_DELOPS);
2180        atomic_set(&fs_info->nr_async_submits, 0);
2181        atomic_set(&fs_info->async_delalloc_pages, 0);
2182        atomic_set(&fs_info->async_submit_draining, 0);
2183        atomic_set(&fs_info->nr_async_bios, 0);
2184        atomic_set(&fs_info->defrag_running, 0);
2185        atomic64_set(&fs_info->tree_mod_seq, 0);
2186        fs_info->sb = sb;
2187        fs_info->max_inline = 8192 * 1024;
2188        fs_info->metadata_ratio = 0;
2189        fs_info->defrag_inodes = RB_ROOT;
2190        fs_info->free_chunk_space = 0;
2191        fs_info->tree_mod_log = RB_ROOT;
2192
2193        /* readahead state */
2194        INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
2195        spin_lock_init(&fs_info->reada_lock);
2196
2197        fs_info->thread_pool_size = min_t(unsigned long,
2198                                          num_online_cpus() + 2, 8);
2199
2200        INIT_LIST_HEAD(&fs_info->ordered_roots);
2201        spin_lock_init(&fs_info->ordered_root_lock);
2202        fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2203                                        GFP_NOFS);
2204        if (!fs_info->delayed_root) {
2205                err = -ENOMEM;
2206                goto fail_iput;
2207        }
2208        btrfs_init_delayed_root(fs_info->delayed_root);
2209
2210        mutex_init(&fs_info->scrub_lock);
2211        atomic_set(&fs_info->scrubs_running, 0);
2212        atomic_set(&fs_info->scrub_pause_req, 0);
2213        atomic_set(&fs_info->scrubs_paused, 0);
2214        atomic_set(&fs_info->scrub_cancel_req, 0);
2215        init_waitqueue_head(&fs_info->scrub_pause_wait);
2216        init_rwsem(&fs_info->scrub_super_lock);
2217        fs_info->scrub_workers_refcnt = 0;
2218#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2219        fs_info->check_integrity_print_mask = 0;
2220#endif
2221
2222        spin_lock_init(&fs_info->balance_lock);
2223        mutex_init(&fs_info->balance_mutex);
2224        atomic_set(&fs_info->balance_running, 0);
2225        atomic_set(&fs_info->balance_pause_req, 0);
2226        atomic_set(&fs_info->balance_cancel_req, 0);
2227        fs_info->balance_ctl = NULL;
2228        init_waitqueue_head(&fs_info->balance_wait_q);
2229
2230        sb->s_blocksize = 4096;
2231        sb->s_blocksize_bits = blksize_bits(4096);
2232        sb->s_bdi = &fs_info->bdi;
2233
2234        fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2235        set_nlink(fs_info->btree_inode, 1);
2236        /*
2237         * we set the i_size on the btree inode to the max possible int.
2238         * the real end of the address space is determined by all of
2239         * the devices in the system
2240         */
2241        fs_info->btree_inode->i_size = OFFSET_MAX;
2242        fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2243        fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
2244
2245        RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2246        extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2247                             fs_info->btree_inode->i_mapping);
2248        BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2249        extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2250
2251        BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2252
2253        BTRFS_I(fs_info->btree_inode)->root = tree_root;
2254        memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2255               sizeof(struct btrfs_key));
2256        set_bit(BTRFS_INODE_DUMMY,
2257                &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2258        insert_inode_hash(fs_info->btree_inode);
2259
2260        spin_lock_init(&fs_info->block_group_cache_lock);
2261        fs_info->block_group_cache_tree = RB_ROOT;
2262        fs_info->first_logical_byte = (u64)-1;
2263
2264        extent_io_tree_init(&fs_info->freed_extents[0],
2265                             fs_info->btree_inode->i_mapping);
2266        extent_io_tree_init(&fs_info->freed_extents[1],
2267                             fs_info->btree_inode->i_mapping);
2268        fs_info->pinned_extents = &fs_info->freed_extents[0];
2269        fs_info->do_barriers = 1;
2270
2271
2272        mutex_init(&fs_info->ordered_operations_mutex);
2273        mutex_init(&fs_info->tree_log_mutex);
2274        mutex_init(&fs_info->chunk_mutex);
2275        mutex_init(&fs_info->transaction_kthread_mutex);
2276        mutex_init(&fs_info->cleaner_mutex);
2277        mutex_init(&fs_info->volume_mutex);
2278        init_rwsem(&fs_info->extent_commit_sem);
2279        init_rwsem(&fs_info->cleanup_work_sem);
2280        init_rwsem(&fs_info->subvol_sem);
2281        fs_info->dev_replace.lock_owner = 0;
2282        atomic_set(&fs_info->dev_replace.nesting_level, 0);
2283        mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2284        mutex_init(&fs_info->dev_replace.lock_management_lock);
2285        mutex_init(&fs_info->dev_replace.lock);
2286
2287        spin_lock_init(&fs_info->qgroup_lock);
2288        mutex_init(&fs_info->qgroup_ioctl_lock);
2289        fs_info->qgroup_tree = RB_ROOT;
2290        INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2291        fs_info->qgroup_seq = 1;
2292        fs_info->quota_enabled = 0;
2293        fs_info->pending_quota_state = 0;
2294        fs_info->qgroup_ulist = NULL;
2295        mutex_init(&fs_info->qgroup_rescan_lock);
2296
2297        btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2298        btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2299
2300        init_waitqueue_head(&fs_info->transaction_throttle);
2301        init_waitqueue_head(&fs_info->transaction_wait);
2302        init_waitqueue_head(&fs_info->transaction_blocked_wait);
2303        init_waitqueue_head(&fs_info->async_submit_wait);
2304
2305        ret = btrfs_alloc_stripe_hash_table(fs_info);
2306        if (ret) {
2307                err = ret;
2308                goto fail_alloc;
2309        }
2310
2311        __setup_root(4096, 4096, 4096, 4096, tree_root,
2312                     fs_info, BTRFS_ROOT_TREE_OBJECTID);
2313
2314        invalidate_bdev(fs_devices->latest_bdev);
2315
2316        /*
2317         * Read super block and check the signature bytes only
2318         */
2319        bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2320        if (!bh) {
2321                err = -EINVAL;
2322                goto fail_alloc;
2323        }
2324
2325        /*
2326         * We want to check superblock checksum, the type is stored inside.
2327         * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2328         */
2329        if (btrfs_check_super_csum(bh->b_data)) {
2330                printk(KERN_ERR "btrfs: superblock checksum mismatch\n");
2331                err = -EINVAL;
2332                goto fail_alloc;
2333        }
2334
2335        /*
2336         * super_copy is zeroed at allocation time and we never touch the
2337         * following bytes up to INFO_SIZE, the checksum is calculated from
2338         * the whole block of INFO_SIZE
2339         */
2340        memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2341        memcpy(fs_info->super_for_commit, fs_info->super_copy,
2342               sizeof(*fs_info->super_for_commit));
2343        brelse(bh);
2344
2345        memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2346
2347        ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2348        if (ret) {
2349                printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
2350                err = -EINVAL;
2351                goto fail_alloc;
2352        }
2353
2354        disk_super = fs_info->super_copy;
2355        if (!btrfs_super_root(disk_super))
2356                goto fail_alloc;
2357
2358        /* check FS state, whether FS is broken. */
2359        if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2360                set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2361
2362        /*
2363         * run through our array of backup supers and setup
2364         * our ring pointer to the oldest one
2365         */
2366        generation = btrfs_super_generation(disk_super);
2367        find_oldest_super_backup(fs_info, generation);
2368
2369        /*
2370         * In the long term, we'll store the compression type in the super
2371         * block, and it'll be used for per file compression control.
2372         */
2373        fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2374
2375        ret = btrfs_parse_options(tree_root, options);
2376        if (ret) {
2377                err = ret;
2378                goto fail_alloc;
2379        }
2380
2381        features = btrfs_super_incompat_flags(disk_super) &
2382                ~BTRFS_FEATURE_INCOMPAT_SUPP;
2383        if (features) {
2384                printk(KERN_ERR "BTRFS: couldn't mount because of "
2385                       "unsupported optional features (%Lx).\n",
2386                       (unsigned long long)features);
2387                err = -EINVAL;
2388                goto fail_alloc;
2389        }
2390
2391        if (btrfs_super_leafsize(disk_super) !=
2392            btrfs_super_nodesize(disk_super)) {
2393                printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2394                       "blocksizes don't match.  node %d leaf %d\n",
2395                       btrfs_super_nodesize(disk_super),
2396                       btrfs_super_leafsize(disk_super));
2397                err = -EINVAL;
2398                goto fail_alloc;
2399        }
2400        if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2401                printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2402                       "blocksize (%d) was too large\n",
2403                       btrfs_super_leafsize(disk_super));
2404                err = -EINVAL;
2405                goto fail_alloc;
2406        }
2407
2408        features = btrfs_super_incompat_flags(disk_super);
2409        features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2410        if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2411                features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2412
2413        if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2414                printk(KERN_ERR "btrfs: has skinny extents\n");
2415
2416        /*
2417         * flag our filesystem as having big metadata blocks if
2418         * they are bigger than the page size
2419         */
2420        if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
2421                if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2422                        printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
2423                features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2424        }
2425
2426        nodesize = btrfs_super_nodesize(disk_super);
2427        leafsize = btrfs_super_leafsize(disk_super);
2428        sectorsize = btrfs_super_sectorsize(disk_super);
2429        stripesize = btrfs_super_stripesize(disk_super);
2430        fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
2431        fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2432
2433        /*
2434         * mixed block groups end up with duplicate but slightly offset
2435         * extent buffers for the same range.  It leads to corruptions
2436         */
2437        if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2438            (sectorsize != leafsize)) {
2439                printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
2440                                "are not allowed for mixed block groups on %s\n",
2441                                sb->s_id);
2442                goto fail_alloc;
2443        }
2444
2445        /*
2446         * Needn't use the lock because there is no other task which will
2447         * update the flag.
2448         */
2449        btrfs_set_super_incompat_flags(disk_super, features);
2450
2451        features = btrfs_super_compat_ro_flags(disk_super) &
2452                ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2453        if (!(sb->s_flags & MS_RDONLY) && features) {
2454                printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2455                       "unsupported option features (%Lx).\n",
2456                       (unsigned long long)features);
2457                err = -EINVAL;
2458                goto fail_alloc;
2459        }
2460
2461        btrfs_init_workers(&fs_info->generic_worker,
2462                           "genwork", 1, NULL);
2463
2464        btrfs_init_workers(&fs_info->workers, "worker",
2465                           fs_info->thread_pool_size,
2466                           &fs_info->generic_worker);
2467
2468        btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
2469                           fs_info->thread_pool_size,
2470                           &fs_info->generic_worker);
2471
2472        btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc",
2473                           fs_info->thread_pool_size,
2474                           &fs_info->generic_worker);
2475
2476        btrfs_init_workers(&fs_info->submit_workers, "submit",
2477                           min_t(u64, fs_devices->num_devices,
2478                           fs_info->thread_pool_size),
2479                           &fs_info->generic_worker);
2480
2481        btrfs_init_workers(&fs_info->caching_workers, "cache",
2482                           2, &fs_info->generic_worker);
2483
2484        /* a higher idle thresh on the submit workers makes it much more
2485         * likely that bios will be send down in a sane order to the
2486         * devices
2487         */
2488        fs_info->submit_workers.idle_thresh = 64;
2489
2490        fs_info->workers.idle_thresh = 16;
2491        fs_info->workers.ordered = 1;
2492
2493        fs_info->delalloc_workers.idle_thresh = 2;
2494        fs_info->delalloc_workers.ordered = 1;
2495
2496        btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
2497                           &fs_info->generic_worker);
2498        btrfs_init_workers(&fs_info->endio_workers, "endio",
2499                           fs_info->thread_pool_size,
2500                           &fs_info->generic_worker);
2501        btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
2502                           fs_info->thread_pool_size,
2503                           &fs_info->generic_worker);
2504        btrfs_init_workers(&fs_info->endio_meta_write_workers,
2505                           "endio-meta-write", fs_info->thread_pool_size,
2506                           &fs_info->generic_worker);
2507        btrfs_init_workers(&fs_info->endio_raid56_workers,
2508                           "endio-raid56", fs_info->thread_pool_size,
2509                           &fs_info->generic_worker);
2510        btrfs_init_workers(&fs_info->rmw_workers,
2511                           "rmw", fs_info->thread_pool_size,
2512                           &fs_info->generic_worker);
2513        btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
2514                           fs_info->thread_pool_size,
2515                           &fs_info->generic_worker);
2516        btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
2517                           1, &fs_info->generic_worker);
2518        btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
2519                           fs_info->thread_pool_size,
2520                           &fs_info->generic_worker);
2521        btrfs_init_workers(&fs_info->readahead_workers, "readahead",
2522                           fs_info->thread_pool_size,
2523                           &fs_info->generic_worker);
2524        btrfs_init_workers(&fs_info->qgroup_rescan_workers, "qgroup-rescan", 1,
2525                           &fs_info->generic_worker);
2526
2527        /*
2528         * endios are largely parallel and should have a very
2529         * low idle thresh
2530         */
2531        fs_info->endio_workers.idle_thresh = 4;
2532        fs_info->endio_meta_workers.idle_thresh = 4;
2533        fs_info->endio_raid56_workers.idle_thresh = 4;
2534        fs_info->rmw_workers.idle_thresh = 2;
2535
2536        fs_info->endio_write_workers.idle_thresh = 2;
2537        fs_info->endio_meta_write_workers.idle_thresh = 2;
2538        fs_info->readahead_workers.idle_thresh = 2;
2539
2540        /*
2541         * btrfs_start_workers can really only fail because of ENOMEM so just
2542         * return -ENOMEM if any of these fail.
2543         */
2544        ret = btrfs_start_workers(&fs_info->workers);
2545        ret |= btrfs_start_workers(&fs_info->generic_worker);
2546        ret |= btrfs_start_workers(&fs_info->submit_workers);
2547        ret |= btrfs_start_workers(&fs_info->delalloc_workers);
2548        ret |= btrfs_start_workers(&fs_info->fixup_workers);
2549        ret |= btrfs_start_workers(&fs_info->endio_workers);
2550        ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
2551        ret |= btrfs_start_workers(&fs_info->rmw_workers);
2552        ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
2553        ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
2554        ret |= btrfs_start_workers(&fs_info->endio_write_workers);
2555        ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
2556        ret |= btrfs_start_workers(&fs_info->delayed_workers);
2557        ret |= btrfs_start_workers(&fs_info->caching_workers);
2558        ret |= btrfs_start_workers(&fs_info->readahead_workers);
2559        ret |= btrfs_start_workers(&fs_info->flush_workers);
2560        ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers);
2561        if (ret) {
2562                err = -ENOMEM;
2563                goto fail_sb_buffer;
2564        }
2565
2566        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2567        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2568                                    4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2569
2570        tree_root->nodesize = nodesize;
2571        tree_root->leafsize = leafsize;
2572        tree_root->sectorsize = sectorsize;
2573        tree_root->stripesize = stripesize;
2574
2575        sb->s_blocksize = sectorsize;
2576        sb->s_blocksize_bits = blksize_bits(sectorsize);
2577
2578        if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2579                printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
2580                goto fail_sb_buffer;
2581        }
2582
2583        if (sectorsize != PAGE_SIZE) {
2584                printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
2585                       "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2586                goto fail_sb_buffer;
2587        }
2588
2589        mutex_lock(&fs_info->chunk_mutex);
2590        ret = btrfs_read_sys_array(tree_root);
2591        mutex_unlock(&fs_info->chunk_mutex);
2592        if (ret) {
2593                printk(KERN_WARNING "btrfs: failed to read the system "
2594                       "array on %s\n", sb->s_id);
2595                goto fail_sb_buffer;
2596        }
2597
2598        blocksize = btrfs_level_size(tree_root,
2599                                     btrfs_super_chunk_root_level(disk_super));
2600        generation = btrfs_super_chunk_root_generation(disk_super);
2601
2602        __setup_root(nodesize, leafsize, sectorsize, stripesize,
2603                     chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2604
2605        chunk_root->node = read_tree_block(chunk_root,
2606                                           btrfs_super_chunk_root(disk_super),
2607                                           blocksize, generation);
2608        if (!chunk_root->node ||
2609            !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2610                printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
2611                       sb->s_id);
2612                goto fail_tree_roots;
2613        }
2614        btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2615        chunk_root->commit_root = btrfs_root_node(chunk_root);
2616
2617        read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2618           (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
2619           BTRFS_UUID_SIZE);
2620
2621        ret = btrfs_read_chunk_tree(chunk_root);
2622        if (ret) {
2623                printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
2624                       sb->s_id);
2625                goto fail_tree_roots;
2626        }
2627
2628        /*
2629         * keep the device that is marked to be the target device for the
2630         * dev_replace procedure
2631         */
2632        btrfs_close_extra_devices(fs_info, fs_devices, 0);
2633
2634        if (!fs_devices->latest_bdev) {
2635                printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
2636                       sb->s_id);
2637                goto fail_tree_roots;
2638        }
2639
2640retry_root_backup:
2641        blocksize = btrfs_level_size(tree_root,
2642                                     btrfs_super_root_level(disk_super));
2643        generation = btrfs_super_generation(disk_super);
2644
2645        tree_root->node = read_tree_block(tree_root,
2646                                          btrfs_super_root(disk_super),
2647                                          blocksize, generation);
2648        if (!tree_root->node ||
2649            !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2650                printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
2651                       sb->s_id);
2652
2653                goto recovery_tree_root;
2654        }
2655
2656        btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2657        tree_root->commit_root = btrfs_root_node(tree_root);
2658
2659        location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2660        location.type = BTRFS_ROOT_ITEM_KEY;
2661        location.offset = 0;
2662
2663        extent_root = btrfs_read_tree_root(tree_root, &location);
2664        if (IS_ERR(extent_root)) {
2665                ret = PTR_ERR(extent_root);
2666                goto recovery_tree_root;
2667        }
2668        extent_root->track_dirty = 1;
2669        fs_info->extent_root = extent_root;
2670
2671        location.objectid = BTRFS_DEV_TREE_OBJECTID;
2672        dev_root = btrfs_read_tree_root(tree_root, &location);
2673        if (IS_ERR(dev_root)) {
2674                ret = PTR_ERR(dev_root);
2675                goto recovery_tree_root;
2676        }
2677        dev_root->track_dirty = 1;
2678        fs_info->dev_root = dev_root;
2679        btrfs_init_devices_late(fs_info);
2680
2681        location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2682        csum_root = btrfs_read_tree_root(tree_root, &location);
2683        if (IS_ERR(csum_root)) {
2684                ret = PTR_ERR(csum_root);
2685                goto recovery_tree_root;
2686        }
2687        csum_root->track_dirty = 1;
2688        fs_info->csum_root = csum_root;
2689
2690        location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2691        quota_root = btrfs_read_tree_root(tree_root, &location);
2692        if (!IS_ERR(quota_root)) {
2693                quota_root->track_dirty = 1;
2694                fs_info->quota_enabled = 1;
2695                fs_info->pending_quota_state = 1;
2696                fs_info->quota_root = quota_root;
2697        }
2698
2699        fs_info->generation = generation;
2700        fs_info->last_trans_committed = generation;
2701
2702        ret = btrfs_recover_balance(fs_info);
2703        if (ret) {
2704                printk(KERN_WARNING "btrfs: failed to recover balance\n");
2705                goto fail_block_groups;
2706        }
2707
2708        ret = btrfs_init_dev_stats(fs_info);
2709        if (ret) {
2710                printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
2711                       ret);
2712                goto fail_block_groups;
2713        }
2714
2715        ret = btrfs_init_dev_replace(fs_info);
2716        if (ret) {
2717                pr_err("btrfs: failed to init dev_replace: %d\n", ret);
2718                goto fail_block_groups;
2719        }
2720
2721        btrfs_close_extra_devices(fs_info, fs_devices, 1);
2722
2723        ret = btrfs_init_space_info(fs_info);
2724        if (ret) {
2725                printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2726                goto fail_block_groups;
2727        }
2728
2729        ret = btrfs_read_block_groups(extent_root);
2730        if (ret) {
2731                printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2732                goto fail_block_groups;
2733        }
2734        fs_info->num_tolerated_disk_barrier_failures =
2735                btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2736        if (fs_info->fs_devices->missing_devices >
2737             fs_info->num_tolerated_disk_barrier_failures &&
2738            !(sb->s_flags & MS_RDONLY)) {
2739                printk(KERN_WARNING
2740                       "Btrfs: too many missing devices, writeable mount is not allowed\n");
2741                goto fail_block_groups;
2742        }
2743
2744        fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2745                                               "btrfs-cleaner");
2746        if (IS_ERR(fs_info->cleaner_kthread))
2747                goto fail_block_groups;
2748
2749        fs_info->transaction_kthread = kthread_run(transaction_kthread,
2750                                                   tree_root,
2751                                                   "btrfs-transaction");
2752        if (IS_ERR(fs_info->transaction_kthread))
2753                goto fail_cleaner;
2754
2755        if (!btrfs_test_opt(tree_root, SSD) &&
2756            !btrfs_test_opt(tree_root, NOSSD) &&
2757            !fs_info->fs_devices->rotating) {
2758                printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2759                       "mode\n");
2760                btrfs_set_opt(fs_info->mount_opt, SSD);
2761        }
2762
2763#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2764        if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2765                ret = btrfsic_mount(tree_root, fs_devices,
2766                                    btrfs_test_opt(tree_root,
2767                                        CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2768                                    1 : 0,
2769                                    fs_info->check_integrity_print_mask);
2770                if (ret)
2771                        printk(KERN_WARNING "btrfs: failed to initialize"
2772                               " integrity check module %s\n", sb->s_id);
2773        }
2774#endif
2775        ret = btrfs_read_qgroup_config(fs_info);
2776        if (ret)
2777                goto fail_trans_kthread;
2778
2779        /* do not make disk changes in broken FS */
2780        if (btrfs_super_log_root(disk_super) != 0) {
2781                u64 bytenr = btrfs_super_log_root(disk_super);
2782
2783                if (fs_devices->rw_devices == 0) {
2784                        printk(KERN_WARNING "Btrfs log replay required "
2785                               "on RO media\n");
2786                        err = -EIO;
2787                        goto fail_qgroup;
2788                }
2789                blocksize =
2790                     btrfs_level_size(tree_root,
2791                                      btrfs_super_log_root_level(disk_super));
2792
2793                log_tree_root = btrfs_alloc_root(fs_info);
2794                if (!log_tree_root) {
2795                        err = -ENOMEM;
2796                        goto fail_qgroup;
2797                }
2798
2799                __setup_root(nodesize, leafsize, sectorsize, stripesize,
2800                             log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2801
2802                log_tree_root->node = read_tree_block(tree_root, bytenr,
2803                                                      blocksize,
2804                                                      generation + 1);
2805                if (!log_tree_root->node ||
2806                    !extent_buffer_uptodate(log_tree_root->node)) {
2807                        printk(KERN_ERR "btrfs: failed to read log tree\n");
2808                        free_extent_buffer(log_tree_root->node);
2809                        kfree(log_tree_root);
2810                        goto fail_trans_kthread;
2811                }
2812                /* returns with log_tree_root freed on success */
2813                ret = btrfs_recover_log_trees(log_tree_root);
2814                if (ret) {
2815                        btrfs_error(tree_root->fs_info, ret,
2816                                    "Failed to recover log tree");
2817                        free_extent_buffer(log_tree_root->node);
2818                        kfree(log_tree_root);
2819                        goto fail_trans_kthread;
2820                }
2821
2822                if (sb->s_flags & MS_RDONLY) {
2823                        ret = btrfs_commit_super(tree_root);
2824                        if (ret)
2825                                goto fail_trans_kthread;
2826                }
2827        }
2828
2829        ret = btrfs_find_orphan_roots(tree_root);
2830        if (ret)
2831                goto fail_trans_kthread;
2832
2833        if (!(sb->s_flags & MS_RDONLY)) {
2834                ret = btrfs_cleanup_fs_roots(fs_info);
2835                if (ret)
2836                        goto fail_trans_kthread;
2837
2838                ret = btrfs_recover_relocation(tree_root);
2839                if (ret < 0) {
2840                        printk(KERN_WARNING
2841                               "btrfs: failed to recover relocation\n");
2842                        err = -EINVAL;
2843                        goto fail_qgroup;
2844                }
2845        }
2846
2847        location.objectid = BTRFS_FS_TREE_OBJECTID;
2848        location.type = BTRFS_ROOT_ITEM_KEY;
2849        location.offset = 0;
2850
2851        fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2852        if (IS_ERR(fs_info->fs_root)) {
2853                err = PTR_ERR(fs_info->fs_root);
2854                goto fail_qgroup;
2855        }
2856
2857        if (sb->s_flags & MS_RDONLY)
2858                return 0;
2859
2860        down_read(&fs_info->cleanup_work_sem);
2861        if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
2862            (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
2863                up_read(&fs_info->cleanup_work_sem);
2864                close_ctree(tree_root);
2865                return ret;
2866        }
2867        up_read(&fs_info->cleanup_work_sem);
2868
2869        ret = btrfs_resume_balance_async(fs_info);
2870        if (ret) {
2871                printk(KERN_WARNING "btrfs: failed to resume balance\n");
2872                close_ctree(tree_root);
2873                return ret;
2874        }
2875
2876        ret = btrfs_resume_dev_replace_async(fs_info);
2877        if (ret) {
2878                pr_warn("btrfs: failed to resume dev_replace\n");
2879                close_ctree(tree_root);
2880                return ret;
2881        }
2882
2883        btrfs_qgroup_rescan_resume(fs_info);
2884
2885        return 0;
2886
2887fail_qgroup:
2888        btrfs_free_qgroup_config(fs_info);
2889fail_trans_kthread:
2890        kthread_stop(fs_info->transaction_kthread);
2891        btrfs_cleanup_transaction(fs_info->tree_root);
2892        del_fs_roots(fs_info);
2893fail_cleaner:
2894        kthread_stop(fs_info->cleaner_kthread);
2895
2896        /*
2897         * make sure we're done with the btree inode before we stop our
2898         * kthreads
2899         */
2900        filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2901
2902fail_block_groups:
2903        btrfs_put_block_group_cache(fs_info);
2904        btrfs_free_block_groups(fs_info);
2905
2906fail_tree_roots:
2907        free_root_pointers(fs_info, 1);
2908        invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2909
2910fail_sb_buffer:
2911        btrfs_stop_all_workers(fs_info);
2912fail_alloc:
2913fail_iput:
2914        btrfs_mapping_tree_free(&fs_info->mapping_tree);
2915
2916        iput(fs_info->btree_inode);
2917fail_delalloc_bytes:
2918        percpu_counter_destroy(&fs_info->delalloc_bytes);
2919fail_dirty_metadata_bytes:
2920        percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
2921fail_bdi:
2922        bdi_destroy(&fs_info->bdi);
2923fail_srcu:
2924        cleanup_srcu_struct(&fs_info->subvol_srcu);
2925fail:
2926        btrfs_free_stripe_hash_table(fs_info);
2927        btrfs_close_devices(fs_info->fs_devices);
2928        return err;
2929
2930recovery_tree_root:
2931        if (!btrfs_test_opt(tree_root, RECOVERY))
2932                goto fail_tree_roots;
2933
2934        free_root_pointers(fs_info, 0);
2935
2936        /* don't use the log in recovery mode, it won't be valid */
2937        btrfs_set_super_log_root(disk_super, 0);
2938
2939        /* we can't trust the free space cache either */
2940        btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2941
2942        ret = next_root_backup(fs_info, fs_info->super_copy,
2943                               &num_backups_tried, &backup_index);
2944        if (ret == -1)
2945                goto fail_block_groups;
2946        goto retry_root_backup;
2947}
2948
2949static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2950{
2951        if (uptodate) {
2952                set_buffer_uptodate(bh);
2953        } else {
2954                struct btrfs_device *device = (struct btrfs_device *)
2955                        bh->b_private;
2956
2957                printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
2958                                          "I/O error on %s\n",
2959                                          rcu_str_deref(device->name));
2960                /* note, we dont' set_buffer_write_io_error because we have
2961                 * our own ways of dealing with the IO errors
2962                 */
2963                clear_buffer_uptodate(bh);
2964                btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
2965        }
2966        unlock_buffer(bh);
2967        put_bh(bh);
2968}
2969
2970struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2971{
2972        struct buffer_head *bh;
2973        struct buffer_head *latest = NULL;
2974        struct btrfs_super_block *super;
2975        int i;
2976        u64 transid = 0;
2977        u64 bytenr;
2978
2979        /* we would like to check all the supers, but that would make
2980         * a btrfs mount succeed after a mkfs from a different FS.
2981         * So, we need to add a special mount option to scan for
2982         * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2983         */
2984        for (i = 0; i < 1; i++) {
2985                bytenr = btrfs_sb_offset(i);
2986                if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2987                        break;
2988                bh = __bread(bdev, bytenr / 4096, 4096);
2989                if (!bh)
2990                        continue;
2991
2992                super = (struct btrfs_super_block *)bh->b_data;
2993                if (btrfs_super_bytenr(super) != bytenr ||
2994                    super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2995                        brelse(bh);
2996                        continue;
2997                }
2998
2999                if (!latest || btrfs_super_generation(super) > transid) {
3000                        brelse(latest);
3001                        latest = bh;
3002                        transid = btrfs_super_generation(super);
3003                } else {
3004                        brelse(bh);
3005                }
3006        }
3007        return latest;
3008}
3009
3010/*
3011 * this should be called twice, once with wait == 0 and
3012 * once with wait == 1.  When wait == 0 is done, all the buffer heads
3013 * we write are pinned.
3014 *
3015 * They are released when wait == 1 is done.
3016 * max_mirrors must be the same for both runs, and it indicates how
3017 * many supers on this one device should be written.
3018 *
3019 * max_mirrors == 0 means to write them all.
3020 */
3021static int write_dev_supers(struct btrfs_device *device,
3022                            struct btrfs_super_block *sb,
3023                            int do_barriers, int wait, int max_mirrors)
3024{
3025        struct buffer_head *bh;
3026        int i;
3027        int ret;
3028        int errors = 0;
3029        u32 crc;
3030        u64 bytenr;
3031
3032        if (max_mirrors == 0)
3033                max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3034
3035        for (i = 0; i < max_mirrors; i++) {
3036                bytenr = btrfs_sb_offset(i);
3037                if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
3038                        break;
3039
3040                if (wait) {
3041                        bh = __find_get_block(device->bdev, bytenr / 4096,
3042                                              BTRFS_SUPER_INFO_SIZE);
3043                        if (!bh) {
3044                                errors++;
3045                                continue;
3046                        }
3047                        wait_on_buffer(bh);
3048                        if (!buffer_uptodate(bh))
3049                                errors++;
3050
3051                        /* drop our reference */
3052                        brelse(bh);
3053
3054                        /* drop the reference from the wait == 0 run */
3055                        brelse(bh);
3056                        continue;
3057                } else {
3058                        btrfs_set_super_bytenr(sb, bytenr);
3059
3060                        crc = ~(u32)0;
3061                        crc = btrfs_csum_data((char *)sb +
3062                                              BTRFS_CSUM_SIZE, crc,
3063                                              BTRFS_SUPER_INFO_SIZE -
3064                                              BTRFS_CSUM_SIZE);
3065                        btrfs_csum_final(crc, sb->csum);
3066
3067                        /*
3068                         * one reference for us, and we leave it for the
3069                         * caller
3070                         */
3071                        bh = __getblk(device->bdev, bytenr / 4096,
3072                                      BTRFS_SUPER_INFO_SIZE);
3073                        if (!bh) {
3074                                printk(KERN_ERR "btrfs: couldn't get super "
3075                                       "buffer head for bytenr %Lu\n", bytenr);
3076                                errors++;
3077                                continue;
3078                        }
3079
3080                        memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3081
3082                        /* one reference for submit_bh */
3083                        get_bh(bh);
3084
3085                        set_buffer_uptodate(bh);
3086                        lock_buffer(bh);
3087                        bh->b_end_io = btrfs_end_buffer_write_sync;
3088                        bh->b_private = device;
3089                }
3090
3091                /*
3092                 * we fua the first super.  The others we allow
3093                 * to go down lazy.
3094                 */
3095                ret = btrfsic_submit_bh(WRITE_FUA, bh);
3096                if (ret)
3097                        errors++;
3098        }
3099        return errors < i ? 0 : -1;
3100}
3101
3102/*
3103 * endio for the write_dev_flush, this will wake anyone waiting
3104 * for the barrier when it is done
3105 */
3106static void btrfs_end_empty_barrier(struct bio *bio, int err)
3107{
3108        if (err) {
3109                if (err == -EOPNOTSUPP)
3110                        set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
3111                clear_bit(BIO_UPTODATE, &bio->bi_flags);
3112        }
3113        if (bio->bi_private)
3114                complete(bio->bi_private);
3115        bio_put(bio);
3116}
3117
3118/*
3119 * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
3120 * sent down.  With wait == 1, it waits for the previous flush.
3121 *
3122 * any device where the flush fails with eopnotsupp are flagged as not-barrier
3123 * capable
3124 */
3125static int write_dev_flush(struct btrfs_device *device, int wait)
3126{
3127        struct bio *bio;
3128        int ret = 0;
3129
3130        if (device->nobarriers)
3131                return 0;
3132
3133        if (wait) {
3134                bio = device->flush_bio;
3135                if (!bio)
3136                        return 0;
3137
3138                wait_for_completion(&device->flush_wait);
3139
3140                if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
3141                        printk_in_rcu("btrfs: disabling barriers on dev %s\n",
3142                                      rcu_str_deref(device->name));
3143                        device->nobarriers = 1;
3144                } else if (!bio_flagged(bio, BIO_UPTODATE)) {
3145                        ret = -EIO;
3146                        btrfs_dev_stat_inc_and_print(device,
3147                                BTRFS_DEV_STAT_FLUSH_ERRS);
3148                }
3149
3150                /* drop the reference from the wait == 0 run */
3151                bio_put(bio);
3152                device->flush_bio = NULL;
3153
3154                return ret;
3155        }
3156
3157        /*
3158         * one reference for us, and we leave it for the
3159         * caller
3160         */
3161        device->flush_bio = NULL;
3162        bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
3163        if (!bio)
3164                return -ENOMEM;
3165
3166        bio->bi_end_io = btrfs_end_empty_barrier;
3167        bio->bi_bdev = device->bdev;
3168        init_completion(&device->flush_wait);
3169        bio->bi_private = &device->flush_wait;
3170        device->flush_bio = bio;
3171
3172        bio_get(bio);
3173        btrfsic_submit_bio(WRITE_FLUSH, bio);
3174
3175        return 0;
3176}
3177
3178/*
3179 * send an empty flush down to each device in parallel,
3180 * then wait for them
3181 */
3182static int barrier_all_devices(struct btrfs_fs_info *info)
3183{
3184        struct list_head *head;
3185        struct btrfs_device *dev;
3186        int errors_send = 0;
3187        int errors_wait = 0;
3188        int ret;
3189
3190        /* send down all the barriers */
3191        head = &info->fs_devices->devices;
3192        list_for_each_entry_rcu(dev, head, dev_list) {
3193                if (!dev->bdev) {
3194                        errors_send++;
3195                        continue;
3196                }
3197                if (!dev->in_fs_metadata || !dev->writeable)
3198                        continue;
3199
3200                ret = write_dev_flush(dev, 0);
3201                if (ret)
3202                        errors_send++;
3203        }
3204
3205        /* wait for all the barriers */
3206        list_for_each_entry_rcu(dev, head, dev_list) {
3207                if (!dev->bdev) {
3208                        errors_wait++;
3209                        continue;
3210                }
3211                if (!dev->in_fs_metadata || !dev->writeable)
3212                        continue;
3213
3214                ret = write_dev_flush(dev, 1);
3215                if (ret)
3216                        errors_wait++;
3217        }
3218        if (errors_send > info->num_tolerated_disk_barrier_failures ||
3219            errors_wait > info->num_tolerated_disk_barrier_failures)
3220                return -EIO;
3221        return 0;
3222}
3223
3224int btrfs_calc_num_tolerated_disk_barrier_failures(
3225        struct btrfs_fs_info *fs_info)
3226{
3227        struct btrfs_ioctl_space_info space;
3228        struct btrfs_space_info *sinfo;
3229        u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3230                       BTRFS_BLOCK_GROUP_SYSTEM,
3231                       BTRFS_BLOCK_GROUP_METADATA,
3232                       BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3233        int num_types = 4;
3234        int i;
3235        int c;
3236        int num_tolerated_disk_barrier_failures =
3237                (int)fs_info->fs_devices->num_devices;
3238
3239        for (i = 0; i < num_types; i++) {
3240                struct btrfs_space_info *tmp;
3241
3242                sinfo = NULL;
3243                rcu_read_lock();
3244                list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3245                        if (tmp->flags == types[i]) {
3246                                sinfo = tmp;
3247                                break;
3248                        }
3249                }
3250                rcu_read_unlock();
3251
3252                if (!sinfo)
3253                        continue;
3254
3255                down_read(&sinfo->groups_sem);
3256                for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3257                        if (!list_empty(&sinfo->block_groups[c])) {
3258                                u64 flags;
3259
3260                                btrfs_get_block_group_info(
3261                                        &sinfo->block_groups[c], &space);
3262                                if (space.total_bytes == 0 ||
3263                                    space.used_bytes == 0)
3264                                        continue;
3265                                flags = space.flags;
3266                                /*
3267                                 * return
3268                                 * 0: if dup, single or RAID0 is configured for
3269                                 *    any of metadata, system or data, else
3270                                 * 1: if RAID5 is configured, or if RAID1 or
3271                                 *    RAID10 is configured and only two mirrors
3272                                 *    are used, else
3273                                 * 2: if RAID6 is configured, else
3274                                 * num_mirrors - 1: if RAID1 or RAID10 is
3275                                 *                  configured and more than
3276                                 *                  2 mirrors are used.
3277                                 */
3278                                if (num_tolerated_disk_barrier_failures > 0 &&
3279                                    ((flags & (BTRFS_BLOCK_GROUP_DUP |
3280                                               BTRFS_BLOCK_GROUP_RAID0)) ||
3281                                     ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
3282                                      == 0)))
3283                                        num_tolerated_disk_barrier_failures = 0;
3284                                else if (num_tolerated_disk_barrier_failures > 1) {
3285                                        if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3286                                            BTRFS_BLOCK_GROUP_RAID5 |
3287                                            BTRFS_BLOCK_GROUP_RAID10)) {
3288                                                num_tolerated_disk_barrier_failures = 1;
3289                                        } else if (flags &
3290                                                   BTRFS_BLOCK_GROUP_RAID6) {
3291                                                num_tolerated_disk_barrier_failures = 2;
3292                                        }
3293                                }
3294                        }
3295                }
3296                up_read(&sinfo->groups_sem);
3297        }
3298
3299        return num_tolerated_disk_barrier_failures;
3300}
3301
3302static int write_all_supers(struct btrfs_root *root, int max_mirrors)
3303{
3304        struct list_head *head;
3305        struct btrfs_device *dev;
3306        struct btrfs_super_block *sb;
3307        struct btrfs_dev_item *dev_item;
3308        int ret;
3309        int do_barriers;
3310        int max_errors;
3311        int total_errors = 0;
3312        u64 flags;
3313
3314        max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
3315        do_barriers = !btrfs_test_opt(root, NOBARRIER);
3316        backup_super_roots(root->fs_info);
3317
3318        sb = root->fs_info->super_for_commit;
3319        dev_item = &sb->dev_item;
3320
3321        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3322        head = &root->fs_info->fs_devices->devices;
3323
3324        if (do_barriers) {
3325                ret = barrier_all_devices(root->fs_info);
3326                if (ret) {
3327                        mutex_unlock(
3328                                &root->fs_info->fs_devices->device_list_mutex);
3329                        btrfs_error(root->fs_info, ret,
3330                                    "errors while submitting device barriers.");
3331                        return ret;
3332                }
3333        }
3334
3335        list_for_each_entry_rcu(dev, head, dev_list) {
3336                if (!dev->bdev) {
3337                        total_errors++;
3338                        continue;
3339                }
3340                if (!dev->in_fs_metadata || !dev->writeable)
3341                        continue;
3342
3343                btrfs_set_stack_device_generation(dev_item, 0);
3344                btrfs_set_stack_device_type(dev_item, dev->type);
3345                btrfs_set_stack_device_id(dev_item, dev->devid);
3346                btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
3347                btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
3348                btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3349                btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3350                btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3351                memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3352                memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3353
3354                flags = btrfs_super_flags(sb);
3355                btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3356
3357                ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
3358                if (ret)
3359                        total_errors++;
3360        }
3361        if (total_errors > max_errors) {
3362                printk(KERN_ERR "btrfs: %d errors while writing supers\n",
3363                       total_errors);
3364
3365                /* This shouldn't happen. FUA is masked off if unsupported */
3366                BUG();
3367        }
3368
3369        total_errors = 0;
3370        list_for_each_entry_rcu(dev, head, dev_list) {
3371                if (!dev->bdev)
3372                        continue;
3373                if (!dev->in_fs_metadata || !dev->writeable)
3374                        continue;
3375
3376                ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
3377                if (ret)
3378                        total_errors++;
3379        }
3380        mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3381        if (total_errors > max_errors) {
3382                btrfs_error(root->fs_info, -EIO,
3383                            "%d errors while writing supers", total_errors);
3384                return -EIO;
3385        }
3386        return 0;
3387}
3388
3389int write_ctree_super(struct btrfs_trans_handle *trans,
3390                      struct btrfs_root *root, int max_mirrors)
3391{
3392        int ret;
3393
3394        ret = write_all_supers(root, max_mirrors);
3395        return ret;
3396}
3397
3398/* Drop a fs root from the radix tree and free it. */
3399void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3400                                  struct btrfs_root *root)
3401{
3402        spin_lock(&fs_info->fs_roots_radix_lock);
3403        radix_tree_delete(&fs_info->fs_roots_radix,
3404                          (unsigned long)root->root_key.objectid);
3405        spin_unlock(&fs_info->fs_roots_radix_lock);
3406
3407        if (btrfs_root_refs(&root->root_item) == 0)
3408                synchronize_srcu(&fs_info->subvol_srcu);
3409
3410        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3411                btrfs_free_log(NULL, root);
3412                btrfs_free_log_root_tree(NULL, fs_info);
3413        }
3414
3415        __btrfs_remove_free_space_cache(root->free_ino_pinned);
3416        __btrfs_remove_free_space_cache(root->free_ino_ctl);
3417        free_fs_root(root);
3418}
3419
3420static void free_fs_root(struct btrfs_root *root)
3421{
3422        iput(root->cache_inode);
3423        WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3424        if (root->anon_dev)
3425                free_anon_bdev(root->anon_dev);
3426        free_extent_buffer(root->node);
3427        free_extent_buffer(root->commit_root);
3428        kfree(root->free_ino_ctl);
3429        kfree(root->free_ino_pinned);
3430        kfree(root->name);
3431        btrfs_put_fs_root(root);
3432}
3433
3434void btrfs_free_fs_root(struct btrfs_root *root)
3435{
3436        free_fs_root(root);
3437}
3438
3439int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3440{
3441        u64 root_objectid = 0;
3442        struct btrfs_root *gang[8];
3443        int i;
3444        int ret;
3445
3446        while (1) {
3447                ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3448                                             (void **)gang, root_objectid,
3449                                             ARRAY_SIZE(gang));
3450                if (!ret)
3451                        break;
3452
3453                root_objectid = gang[ret - 1]->root_key.objectid + 1;
3454                for (i = 0; i < ret; i++) {
3455                        int err;
3456
3457                        root_objectid = gang[i]->root_key.objectid;
3458                        err = btrfs_orphan_cleanup(gang[i]);
3459                        if (err)
3460                                return err;
3461                }
3462                root_objectid++;
3463        }
3464        return 0;
3465}
3466
3467int btrfs_commit_super(struct btrfs_root *root)
3468{
3469        struct btrfs_trans_handle *trans;
3470        int ret;
3471
3472        mutex_lock(&root->fs_info->cleaner_mutex);
3473        btrfs_run_delayed_iputs(root);
3474        mutex_unlock(&root->fs_info->cleaner_mutex);
3475        wake_up_process(root->fs_info->cleaner_kthread);
3476
3477        /* wait until ongoing cleanup work done */
3478        down_write(&root->fs_info->cleanup_work_sem);
3479        up_write(&root->fs_info->cleanup_work_sem);
3480
3481        trans = btrfs_join_transaction(root);
3482        if (IS_ERR(trans))
3483                return PTR_ERR(trans);
3484        ret = btrfs_commit_transaction(trans, root);
3485        if (ret)
3486                return ret;
3487        /* run commit again to drop the original snapshot */
3488        trans = btrfs_join_transaction(root);
3489        if (IS_ERR(trans))
3490                return PTR_ERR(trans);
3491        ret = btrfs_commit_transaction(trans, root);
3492        if (ret)
3493                return ret;
3494        ret = btrfs_write_and_wait_transaction(NULL, root);
3495        if (ret) {
3496                btrfs_error(root->fs_info, ret,
3497                            "Failed to sync btree inode to disk.");
3498                return ret;
3499        }
3500
3501        ret = write_ctree_super(NULL, root, 0);
3502        return ret;
3503}
3504
3505int close_ctree(struct btrfs_root *root)
3506{
3507        struct btrfs_fs_info *fs_info = root->fs_info;
3508        int ret;
3509
3510        fs_info->closing = 1;
3511        smp_mb();
3512
3513        /* pause restriper - we want to resume on mount */
3514        btrfs_pause_balance(fs_info);
3515
3516        btrfs_dev_replace_suspend_for_unmount(fs_info);
3517
3518        btrfs_scrub_cancel(fs_info);
3519
3520        /* wait for any defraggers to finish */
3521        wait_event(fs_info->transaction_wait,
3522                   (atomic_read(&fs_info->defrag_running) == 0));
3523
3524        /* clear out the rbtree of defraggable inodes */
3525        btrfs_cleanup_defrag_inodes(fs_info);
3526
3527        if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3528                ret = btrfs_commit_super(root);
3529                if (ret)
3530                        printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3531        }
3532
3533        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3534                btrfs_error_commit_super(root);
3535
3536        btrfs_put_block_group_cache(fs_info);
3537
3538        kthread_stop(fs_info->transaction_kthread);
3539        kthread_stop(fs_info->cleaner_kthread);
3540
3541        fs_info->closing = 2;
3542        smp_mb();
3543
3544        btrfs_free_qgroup_config(root->fs_info);
3545
3546        if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3547                printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
3548                       percpu_counter_sum(&fs_info->delalloc_bytes));
3549        }
3550
3551        btrfs_free_block_groups(fs_info);
3552
3553        btrfs_stop_all_workers(fs_info);
3554
3555        del_fs_roots(fs_info);
3556
3557        free_root_pointers(fs_info, 1);
3558
3559        iput(fs_info->btree_inode);
3560
3561#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3562        if (btrfs_test_opt(root, CHECK_INTEGRITY))
3563                btrfsic_unmount(root, fs_info->fs_devices);
3564#endif
3565
3566        btrfs_close_devices(fs_info->fs_devices);
3567        btrfs_mapping_tree_free(&fs_info->mapping_tree);
3568
3569        percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3570        percpu_counter_destroy(&fs_info->delalloc_bytes);
3571        bdi_destroy(&fs_info->bdi);
3572        cleanup_srcu_struct(&fs_info->subvol_srcu);
3573
3574        btrfs_free_stripe_hash_table(fs_info);
3575
3576        return 0;
3577}
3578
3579int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3580                          int atomic)
3581{
3582        int ret;
3583        struct inode *btree_inode = buf->pages[0]->mapping->host;
3584
3585        ret = extent_buffer_uptodate(buf);
3586        if (!ret)
3587                return ret;
3588
3589        ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3590                                    parent_transid, atomic);
3591        if (ret == -EAGAIN)
3592                return ret;
3593        return !ret;
3594}
3595
3596int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3597{
3598        return set_extent_buffer_uptodate(buf);
3599}
3600
3601void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3602{
3603        struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3604        u64 transid = btrfs_header_generation(buf);
3605        int was_dirty;
3606
3607        btrfs_assert_tree_locked(buf);
3608        if (transid != root->fs_info->generation)
3609                WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
3610                       "found %llu running %llu\n",
3611                        (unsigned long long)buf->start,
3612                        (unsigned long long)transid,
3613                        (unsigned long long)root->fs_info->generation);
3614        was_dirty = set_extent_buffer_dirty(buf);
3615        if (!was_dirty)
3616                __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
3617                                     buf->len,
3618                                     root->fs_info->dirty_metadata_batch);
3619}
3620
3621static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
3622                                        int flush_delayed)
3623{
3624        /*
3625         * looks as though older kernels can get into trouble with
3626         * this code, they end up stuck in balance_dirty_pages forever
3627         */
3628        int ret;
3629
3630        if (current->flags & PF_MEMALLOC)
3631                return;
3632
3633        if (flush_delayed)
3634                btrfs_balance_delayed_items(root);
3635
3636        ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
3637                                     BTRFS_DIRTY_METADATA_THRESH);
3638        if (ret > 0) {
3639                balance_dirty_pages_ratelimited(
3640                                   root->fs_info->btree_inode->i_mapping);
3641        }
3642        return;
3643}
3644
3645void btrfs_btree_balance_dirty(struct btrfs_root *root)
3646{
3647        __btrfs_btree_balance_dirty(root, 1);
3648}
3649
3650void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
3651{
3652        __btrfs_btree_balance_dirty(root, 0);
3653}
3654
3655int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3656{
3657        struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3658        return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3659}
3660
3661static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3662                              int read_only)
3663{
3664        /*
3665         * Placeholder for checks
3666         */
3667        return 0;
3668}
3669
3670static void btrfs_error_commit_super(struct btrfs_root *root)
3671{
3672        mutex_lock(&root->fs_info->cleaner_mutex);
3673        btrfs_run_delayed_iputs(root);
3674        mutex_unlock(&root->fs_info->cleaner_mutex);
3675
3676        down_write(&root->fs_info->cleanup_work_sem);
3677        up_write(&root->fs_info->cleanup_work_sem);
3678
3679        /* cleanup FS via transaction */
3680        btrfs_cleanup_transaction(root);
3681}
3682
3683static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
3684                                             struct btrfs_root *root)
3685{
3686        struct btrfs_inode *btrfs_inode;
3687        struct list_head splice;
3688
3689        INIT_LIST_HEAD(&splice);
3690
3691        mutex_lock(&root->fs_info->ordered_operations_mutex);
3692        spin_lock(&root->fs_info->ordered_root_lock);
3693
3694        list_splice_init(&t->ordered_operations, &splice);
3695        while (!list_empty(&splice)) {
3696                btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3697                                         ordered_operations);
3698
3699                list_del_init(&btrfs_inode->ordered_operations);
3700                spin_unlock(&root->fs_info->ordered_root_lock);
3701
3702                btrfs_invalidate_inodes(btrfs_inode->root);
3703
3704                spin_lock(&root->fs_info->ordered_root_lock);
3705        }
3706
3707        spin_unlock(&root->fs_info->ordered_root_lock);
3708        mutex_unlock(&root->fs_info->ordered_operations_mutex);
3709}
3710
3711static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3712{
3713        struct btrfs_ordered_extent *ordered;
3714
3715        spin_lock(&root->ordered_extent_lock);
3716        /*
3717         * This will just short circuit the ordered completion stuff which will
3718         * make sure the ordered extent gets properly cleaned up.
3719         */
3720        list_for_each_entry(ordered, &root->ordered_extents,
3721                            root_extent_list)
3722                set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
3723        spin_unlock(&root->ordered_extent_lock);
3724}
3725
3726static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
3727{
3728        struct btrfs_root *root;
3729        struct list_head splice;
3730
3731        INIT_LIST_HEAD(&splice);
3732
3733        spin_lock(&fs_info->ordered_root_lock);
3734        list_splice_init(&fs_info->ordered_roots, &splice);
3735        while (!list_empty(&splice)) {
3736                root = list_first_entry(&splice, struct btrfs_root,
3737                                        ordered_root);
3738                list_del_init(&root->ordered_root);
3739
3740                btrfs_destroy_ordered_extents(root);
3741
3742                cond_resched_lock(&fs_info->ordered_root_lock);
3743        }
3744        spin_unlock(&fs_info->ordered_root_lock);
3745}
3746
3747int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3748                               struct btrfs_root *root)
3749{
3750        struct rb_node *node;
3751        struct btrfs_delayed_ref_root *delayed_refs;
3752        struct btrfs_delayed_ref_node *ref;
3753        int ret = 0;
3754
3755        delayed_refs = &trans->delayed_refs;
3756
3757        spin_lock(&delayed_refs->lock);
3758        if (delayed_refs->num_entries == 0) {
3759                spin_unlock(&delayed_refs->lock);
3760                printk(KERN_INFO "delayed_refs has NO entry\n");
3761                return ret;
3762        }
3763
3764        while ((node = rb_first(&delayed_refs->root)) != NULL) {
3765                struct btrfs_delayed_ref_head *head = NULL;
3766                bool pin_bytes = false;
3767
3768                ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3769                atomic_set(&ref->refs, 1);
3770                if (btrfs_delayed_ref_is_head(ref)) {
3771
3772                        head = btrfs_delayed_node_to_head(ref);
3773                        if (!mutex_trylock(&head->mutex)) {
3774                                atomic_inc(&ref->refs);
3775                                spin_unlock(&delayed_refs->lock);
3776
3777                                /* Need to wait for the delayed ref to run */
3778                                mutex_lock(&head->mutex);
3779                                mutex_unlock(&head->mutex);
3780                                btrfs_put_delayed_ref(ref);
3781
3782                                spin_lock(&delayed_refs->lock);
3783                                continue;
3784                        }
3785
3786                        if (head->must_insert_reserved)
3787                                pin_bytes = true;
3788                        btrfs_free_delayed_extent_op(head->extent_op);
3789                        delayed_refs->num_heads--;
3790                        if (list_empty(&head->cluster))
3791                                delayed_refs->num_heads_ready--;
3792                        list_del_init(&head->cluster);
3793                }
3794
3795                ref->in_tree = 0;
3796                rb_erase(&ref->rb_node, &delayed_refs->root);
3797                delayed_refs->num_entries--;
3798                spin_unlock(&delayed_refs->lock);
3799                if (head) {
3800                        if (pin_bytes)
3801                                btrfs_pin_extent(root, ref->bytenr,
3802                                                 ref->num_bytes, 1);
3803                        mutex_unlock(&head->mutex);
3804                }
3805                btrfs_put_delayed_ref(ref);
3806
3807                cond_resched();
3808                spin_lock(&delayed_refs->lock);
3809        }
3810
3811        spin_unlock(&delayed_refs->lock);
3812
3813        return ret;
3814}
3815
3816static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t)
3817{
3818        struct btrfs_pending_snapshot *snapshot;
3819        struct list_head splice;
3820
3821        INIT_LIST_HEAD(&splice);
3822
3823        list_splice_init(&t->pending_snapshots, &splice);
3824
3825        while (!list_empty(&splice)) {
3826                snapshot = list_entry(splice.next,
3827                                      struct btrfs_pending_snapshot,
3828                                      list);
3829                snapshot->error = -ECANCELED;
3830                list_del_init(&snapshot->list);
3831        }
3832}
3833
3834static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3835{
3836        struct btrfs_inode *btrfs_inode;
3837        struct list_head splice;
3838
3839        INIT_LIST_HEAD(&splice);
3840
3841        spin_lock(&root->delalloc_lock);
3842        list_splice_init(&root->delalloc_inodes, &splice);
3843
3844        while (!list_empty(&splice)) {
3845                btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
3846                                               delalloc_inodes);
3847
3848                list_del_init(&btrfs_inode->delalloc_inodes);
3849                clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
3850                          &btrfs_inode->runtime_flags);
3851                spin_unlock(&root->delalloc_lock);
3852
3853                btrfs_invalidate_inodes(btrfs_inode->root);
3854
3855                spin_lock(&root->delalloc_lock);
3856        }
3857
3858        spin_unlock(&root->delalloc_lock);
3859}
3860
3861static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
3862{
3863        struct btrfs_root *root;
3864        struct list_head splice;
3865
3866        INIT_LIST_HEAD(&splice);
3867
3868        spin_lock(&fs_info->delalloc_root_lock);
3869        list_splice_init(&fs_info->delalloc_roots, &splice);
3870        while (!list_empty(&splice)) {
3871                root = list_first_entry(&splice, struct btrfs_root,
3872                                         delalloc_root);
3873                list_del_init(&root->delalloc_root);
3874                root = btrfs_grab_fs_root(root);
3875                BUG_ON(!root);
3876                spin_unlock(&fs_info->delalloc_root_lock);
3877
3878                btrfs_destroy_delalloc_inodes(root);
3879                btrfs_put_fs_root(root);
3880
3881                spin_lock(&fs_info->delalloc_root_lock);
3882        }
3883        spin_unlock(&fs_info->delalloc_root_lock);
3884}
3885
3886static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3887                                        struct extent_io_tree *dirty_pages,
3888                                        int mark)
3889{
3890        int ret;
3891        struct extent_buffer *eb;
3892        u64 start = 0;
3893        u64 end;
3894
3895        while (1) {
3896                ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3897                                            mark, NULL);
3898                if (ret)
3899                        break;
3900
3901                clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
3902                while (start <= end) {
3903                        eb = btrfs_find_tree_block(root, start,
3904                                                   root->leafsize);
3905                        start += root->leafsize;
3906                        if (!eb)
3907                                continue;
3908                        wait_on_extent_buffer_writeback(eb);
3909
3910                        if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3911                                               &eb->bflags))
3912                                clear_extent_buffer_dirty(eb);
3913                        free_extent_buffer_stale(eb);
3914                }
3915        }
3916
3917        return ret;
3918}
3919
3920static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3921                                       struct extent_io_tree *pinned_extents)
3922{
3923        struct extent_io_tree *unpin;
3924        u64 start;
3925        u64 end;
3926        int ret;
3927        bool loop = true;
3928
3929        unpin = pinned_extents;
3930again:
3931        while (1) {
3932                ret = find_first_extent_bit(unpin, 0, &start, &end,
3933                                            EXTENT_DIRTY, NULL);
3934                if (ret)
3935                        break;
3936
3937                /* opt_discard */
3938                if (btrfs_test_opt(root, DISCARD))
3939                        ret = btrfs_error_discard_extent(root, start,
3940                                                         end + 1 - start,
3941                                                         NULL);
3942
3943                clear_extent_dirty(unpin, start, end, GFP_NOFS);
3944                btrfs_error_unpin_extent_range(root, start, end);
3945                cond_resched();
3946        }
3947
3948        if (loop) {
3949                if (unpin == &root->fs_info->freed_extents[0])
3950                        unpin = &root->fs_info->freed_extents[1];
3951                else
3952                        unpin = &root->fs_info->freed_extents[0];
3953                loop = false;
3954                goto again;
3955        }
3956
3957        return 0;
3958}
3959
3960void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3961                                   struct btrfs_root *root)
3962{
3963        btrfs_destroy_delayed_refs(cur_trans, root);
3964        btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
3965                                cur_trans->dirty_pages.dirty_bytes);
3966
3967        cur_trans->state = TRANS_STATE_COMMIT_START;
3968        wake_up(&root->fs_info->transaction_blocked_wait);
3969
3970        btrfs_evict_pending_snapshots(cur_trans);
3971
3972        cur_trans->state = TRANS_STATE_UNBLOCKED;
3973        wake_up(&root->fs_info->transaction_wait);
3974
3975        btrfs_destroy_delayed_inodes(root);
3976        btrfs_assert_delayed_root_empty(root);
3977
3978        btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
3979                                     EXTENT_DIRTY);
3980        btrfs_destroy_pinned_extent(root,
3981                                    root->fs_info->pinned_extents);
3982
3983        cur_trans->state =TRANS_STATE_COMPLETED;
3984        wake_up(&cur_trans->commit_wait);
3985
3986        /*
3987        memset(cur_trans, 0, sizeof(*cur_trans));
3988        kmem_cache_free(btrfs_transaction_cachep, cur_trans);
3989        */
3990}
3991
3992static int btrfs_cleanup_transaction(struct btrfs_root *root)
3993{
3994        struct btrfs_transaction *t;
3995        LIST_HEAD(list);
3996
3997        mutex_lock(&root->fs_info->transaction_kthread_mutex);
3998
3999        spin_lock(&root->fs_info->trans_lock);
4000        list_splice_init(&root->fs_info->trans_list, &list);
4001        root->fs_info->running_transaction = NULL;
4002        spin_unlock(&root->fs_info->trans_lock);
4003
4004        while (!list_empty(&list)) {
4005                t = list_entry(list.next, struct btrfs_transaction, list);
4006
4007                btrfs_destroy_ordered_operations(t, root);
4008
4009                btrfs_destroy_all_ordered_extents(root->fs_info);
4010
4011                btrfs_destroy_delayed_refs(t, root);
4012
4013                /*
4014                 *  FIXME: cleanup wait for commit
4015                 *  We needn't acquire the lock here, because we are during
4016                 *  the umount, there is no other task which will change it.
4017                 */
4018                t->state = TRANS_STATE_COMMIT_START;
4019                smp_mb();
4020                if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
4021                        wake_up(&root->fs_info->transaction_blocked_wait);
4022
4023                btrfs_evict_pending_snapshots(t);
4024
4025                t->state = TRANS_STATE_UNBLOCKED;
4026                smp_mb();
4027                if (waitqueue_active(&root->fs_info->transaction_wait))
4028                        wake_up(&root->fs_info->transaction_wait);
4029
4030                btrfs_destroy_delayed_inodes(root);
4031                btrfs_assert_delayed_root_empty(root);
4032
4033                btrfs_destroy_all_delalloc_inodes(root->fs_info);
4034
4035                btrfs_destroy_marked_extents(root, &t->dirty_pages,
4036                                             EXTENT_DIRTY);
4037
4038                btrfs_destroy_pinned_extent(root,
4039                                            root->fs_info->pinned_extents);
4040
4041                t->state = TRANS_STATE_COMPLETED;
4042                smp_mb();
4043                if (waitqueue_active(&t->commit_wait))
4044                        wake_up(&t->commit_wait);
4045
4046                atomic_set(&t->use_count, 0);
4047                list_del_init(&t->list);
4048                memset(t, 0, sizeof(*t));
4049                kmem_cache_free(btrfs_transaction_cachep, t);
4050        }
4051
4052        mutex_unlock(&root->fs_info->transaction_kthread_mutex);
4053
4054        return 0;
4055}
4056
4057static struct extent_io_ops btree_extent_io_ops = {
4058        .readpage_end_io_hook = btree_readpage_end_io_hook,
4059        .readpage_io_failed_hook = btree_io_failed_hook,
4060        .submit_bio_hook = btree_submit_bio_hook,
4061        /* note we're sharing with inode.c for the merge bio hook */
4062        .merge_bio_hook = btrfs_merge_bio_hook,
4063};
4064