linux/fs/btrfs/disk-io.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/fs.h>
  20#include <linux/blkdev.h>
  21#include <linux/scatterlist.h>
  22#include <linux/swap.h>
  23#include <linux/radix-tree.h>
  24#include <linux/writeback.h>
  25#include <linux/buffer_head.h>
  26#include <linux/workqueue.h>
  27#include <linux/kthread.h>
  28#include <linux/freezer.h>
  29#include <linux/crc32c.h>
  30#include <linux/slab.h>
  31#include <linux/migrate.h>
  32#include <linux/ratelimit.h>
  33#include <linux/uuid.h>
  34#include <asm/unaligned.h>
  35#include "compat.h"
  36#include "ctree.h"
  37#include "disk-io.h"
  38#include "transaction.h"
  39#include "btrfs_inode.h"
  40#include "volumes.h"
  41#include "print-tree.h"
  42#include "async-thread.h"
  43#include "locking.h"
  44#include "tree-log.h"
  45#include "free-space-cache.h"
  46#include "inode-map.h"
  47#include "check-integrity.h"
  48#include "rcu-string.h"
  49#include "dev-replace.h"
  50#include "raid56.h"
  51
  52#ifdef CONFIG_X86
  53#include <asm/cpufeature.h>
  54#endif
  55
  56static struct extent_io_ops btree_extent_io_ops;
  57static void end_workqueue_fn(struct btrfs_work *work);
  58static void free_fs_root(struct btrfs_root *root);
  59static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
  60                                    int read_only);
  61static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
  62                                             struct btrfs_root *root);
  63static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
  64static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
  65                                      struct btrfs_root *root);
  66static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t);
  67static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
  68static int btrfs_destroy_marked_extents(struct btrfs_root *root,
  69                                        struct extent_io_tree *dirty_pages,
  70                                        int mark);
  71static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
  72                                       struct extent_io_tree *pinned_extents);
  73static int btrfs_cleanup_transaction(struct btrfs_root *root);
  74static void btrfs_error_commit_super(struct btrfs_root *root);
  75
  76/*
  77 * end_io_wq structs are used to do processing in task context when an IO is
  78 * complete.  This is used during reads to verify checksums, and it is used
  79 * by writes to insert metadata for new file extents after IO is complete.
  80 */
  81struct end_io_wq {
  82        struct bio *bio;
  83        bio_end_io_t *end_io;
  84        void *private;
  85        struct btrfs_fs_info *info;
  86        int error;
  87        int metadata;
  88        struct list_head list;
  89        struct btrfs_work work;
  90};
  91
  92/*
  93 * async submit bios are used to offload expensive checksumming
  94 * onto the worker threads.  They checksum file and metadata bios
  95 * just before they are sent down the IO stack.
  96 */
  97struct async_submit_bio {
  98        struct inode *inode;
  99        struct bio *bio;
 100        struct list_head list;
 101        extent_submit_bio_hook_t *submit_bio_start;
 102        extent_submit_bio_hook_t *submit_bio_done;
 103        int rw;
 104        int mirror_num;
 105        unsigned long bio_flags;
 106        /*
 107         * bio_offset is optional, can be used if the pages in the bio
 108         * can't tell us where in the file the bio should go
 109         */
 110        u64 bio_offset;
 111        struct btrfs_work work;
 112        int error;
 113};
 114
 115/*
 116 * Lockdep class keys for extent_buffer->lock's in this root.  For a given
 117 * eb, the lockdep key is determined by the btrfs_root it belongs to and
 118 * the level the eb occupies in the tree.
 119 *
 120 * Different roots are used for different purposes and may nest inside each
 121 * other and they require separate keysets.  As lockdep keys should be
 122 * static, assign keysets according to the purpose of the root as indicated
 123 * by btrfs_root->objectid.  This ensures that all special purpose roots
 124 * have separate keysets.
 125 *
 126 * Lock-nesting across peer nodes is always done with the immediate parent
 127 * node locked thus preventing deadlock.  As lockdep doesn't know this, use
 128 * subclass to avoid triggering lockdep warning in such cases.
 129 *
 130 * The key is set by the readpage_end_io_hook after the buffer has passed
 131 * csum validation but before the pages are unlocked.  It is also set by
 132 * btrfs_init_new_buffer on freshly allocated blocks.
 133 *
 134 * We also add a check to make sure the highest level of the tree is the
 135 * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
 136 * needs update as well.
 137 */
 138#ifdef CONFIG_DEBUG_LOCK_ALLOC
 139# if BTRFS_MAX_LEVEL != 8
 140#  error
 141# endif
 142
 143static struct btrfs_lockdep_keyset {
 144        u64                     id;             /* root objectid */
 145        const char              *name_stem;     /* lock name stem */
 146        char                    names[BTRFS_MAX_LEVEL + 1][20];
 147        struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
 148} btrfs_lockdep_keysets[] = {
 149        { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
 150        { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
 151        { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
 152        { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
 153        { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
 154        { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
 155        { .id = BTRFS_QUOTA_TREE_OBJECTID,      .name_stem = "quota"    },
 156        { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
 157        { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
 158        { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
 159        { .id = 0,                              .name_stem = "tree"     },
 160};
 161
 162void __init btrfs_init_lockdep(void)
 163{
 164        int i, j;
 165
 166        /* initialize lockdep class names */
 167        for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
 168                struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
 169
 170                for (j = 0; j < ARRAY_SIZE(ks->names); j++)
 171                        snprintf(ks->names[j], sizeof(ks->names[j]),
 172                                 "btrfs-%s-%02d", ks->name_stem, j);
 173        }
 174}
 175
 176void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
 177                                    int level)
 178{
 179        struct btrfs_lockdep_keyset *ks;
 180
 181        BUG_ON(level >= ARRAY_SIZE(ks->keys));
 182
 183        /* find the matching keyset, id 0 is the default entry */
 184        for (ks = btrfs_lockdep_keysets; ks->id; ks++)
 185                if (ks->id == objectid)
 186                        break;
 187
 188        lockdep_set_class_and_name(&eb->lock,
 189                                   &ks->keys[level], ks->names[level]);
 190}
 191
 192#endif
 193
 194/*
 195 * extents on the btree inode are pretty simple, there's one extent
 196 * that covers the entire device
 197 */
 198static struct extent_map *btree_get_extent(struct inode *inode,
 199                struct page *page, size_t pg_offset, u64 start, u64 len,
 200                int create)
 201{
 202        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 203        struct extent_map *em;
 204        int ret;
 205
 206        read_lock(&em_tree->lock);
 207        em = lookup_extent_mapping(em_tree, start, len);
 208        if (em) {
 209                em->bdev =
 210                        BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 211                read_unlock(&em_tree->lock);
 212                goto out;
 213        }
 214        read_unlock(&em_tree->lock);
 215
 216        em = alloc_extent_map();
 217        if (!em) {
 218                em = ERR_PTR(-ENOMEM);
 219                goto out;
 220        }
 221        em->start = 0;
 222        em->len = (u64)-1;
 223        em->block_len = (u64)-1;
 224        em->block_start = 0;
 225        em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 226
 227        write_lock(&em_tree->lock);
 228        ret = add_extent_mapping(em_tree, em, 0);
 229        if (ret == -EEXIST) {
 230                free_extent_map(em);
 231                em = lookup_extent_mapping(em_tree, start, len);
 232                if (!em)
 233                        em = ERR_PTR(-EIO);
 234        } else if (ret) {
 235                free_extent_map(em);
 236                em = ERR_PTR(ret);
 237        }
 238        write_unlock(&em_tree->lock);
 239
 240out:
 241        return em;
 242}
 243
 244u32 btrfs_csum_data(char *data, u32 seed, size_t len)
 245{
 246        return crc32c(seed, data, len);
 247}
 248
 249void btrfs_csum_final(u32 crc, char *result)
 250{
 251        put_unaligned_le32(~crc, result);
 252}
 253
 254/*
 255 * compute the csum for a btree block, and either verify it or write it
 256 * into the csum field of the block.
 257 */
 258static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
 259                           int verify)
 260{
 261        u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 262        char *result = NULL;
 263        unsigned long len;
 264        unsigned long cur_len;
 265        unsigned long offset = BTRFS_CSUM_SIZE;
 266        char *kaddr;
 267        unsigned long map_start;
 268        unsigned long map_len;
 269        int err;
 270        u32 crc = ~(u32)0;
 271        unsigned long inline_result;
 272
 273        len = buf->len - offset;
 274        while (len > 0) {
 275                err = map_private_extent_buffer(buf, offset, 32,
 276                                        &kaddr, &map_start, &map_len);
 277                if (err)
 278                        return 1;
 279                cur_len = min(len, map_len - (offset - map_start));
 280                crc = btrfs_csum_data(kaddr + offset - map_start,
 281                                      crc, cur_len);
 282                len -= cur_len;
 283                offset += cur_len;
 284        }
 285        if (csum_size > sizeof(inline_result)) {
 286                result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
 287                if (!result)
 288                        return 1;
 289        } else {
 290                result = (char *)&inline_result;
 291        }
 292
 293        btrfs_csum_final(crc, result);
 294
 295        if (verify) {
 296                if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
 297                        u32 val;
 298                        u32 found = 0;
 299                        memcpy(&found, result, csum_size);
 300
 301                        read_extent_buffer(buf, &val, 0, csum_size);
 302                        printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
 303                                       "failed on %llu wanted %X found %X "
 304                                       "level %d\n",
 305                                       root->fs_info->sb->s_id,
 306                                       (unsigned long long)buf->start, val, found,
 307                                       btrfs_header_level(buf));
 308                        if (result != (char *)&inline_result)
 309                                kfree(result);
 310                        return 1;
 311                }
 312        } else {
 313                write_extent_buffer(buf, result, 0, csum_size);
 314        }
 315        if (result != (char *)&inline_result)
 316                kfree(result);
 317        return 0;
 318}
 319
 320/*
 321 * we can't consider a given block up to date unless the transid of the
 322 * block matches the transid in the parent node's pointer.  This is how we
 323 * detect blocks that either didn't get written at all or got written
 324 * in the wrong place.
 325 */
 326static int verify_parent_transid(struct extent_io_tree *io_tree,
 327                                 struct extent_buffer *eb, u64 parent_transid,
 328                                 int atomic)
 329{
 330        struct extent_state *cached_state = NULL;
 331        int ret;
 332
 333        if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
 334                return 0;
 335
 336        if (atomic)
 337                return -EAGAIN;
 338
 339        lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
 340                         0, &cached_state);
 341        if (extent_buffer_uptodate(eb) &&
 342            btrfs_header_generation(eb) == parent_transid) {
 343                ret = 0;
 344                goto out;
 345        }
 346        printk_ratelimited("parent transid verify failed on %llu wanted %llu "
 347                       "found %llu\n",
 348                       (unsigned long long)eb->start,
 349                       (unsigned long long)parent_transid,
 350                       (unsigned long long)btrfs_header_generation(eb));
 351        ret = 1;
 352        clear_extent_buffer_uptodate(eb);
 353out:
 354        unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
 355                             &cached_state, GFP_NOFS);
 356        return ret;
 357}
 358
 359/*
 360 * Return 0 if the superblock checksum type matches the checksum value of that
 361 * algorithm. Pass the raw disk superblock data.
 362 */
 363static int btrfs_check_super_csum(char *raw_disk_sb)
 364{
 365        struct btrfs_super_block *disk_sb =
 366                (struct btrfs_super_block *)raw_disk_sb;
 367        u16 csum_type = btrfs_super_csum_type(disk_sb);
 368        int ret = 0;
 369
 370        if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
 371                u32 crc = ~(u32)0;
 372                const int csum_size = sizeof(crc);
 373                char result[csum_size];
 374
 375                /*
 376                 * The super_block structure does not span the whole
 377                 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
 378                 * is filled with zeros and is included in the checkum.
 379                 */
 380                crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
 381                                crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
 382                btrfs_csum_final(crc, result);
 383
 384                if (memcmp(raw_disk_sb, result, csum_size))
 385                        ret = 1;
 386
 387                if (ret && btrfs_super_generation(disk_sb) < 10) {
 388                        printk(KERN_WARNING "btrfs: super block crcs don't match, older mkfs detected\n");
 389                        ret = 0;
 390                }
 391        }
 392
 393        if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
 394                printk(KERN_ERR "btrfs: unsupported checksum algorithm %u\n",
 395                                csum_type);
 396                ret = 1;
 397        }
 398
 399        return ret;
 400}
 401
 402/*
 403 * helper to read a given tree block, doing retries as required when
 404 * the checksums don't match and we have alternate mirrors to try.
 405 */
 406static int btree_read_extent_buffer_pages(struct btrfs_root *root,
 407                                          struct extent_buffer *eb,
 408                                          u64 start, u64 parent_transid)
 409{
 410        struct extent_io_tree *io_tree;
 411        int failed = 0;
 412        int ret;
 413        int num_copies = 0;
 414        int mirror_num = 0;
 415        int failed_mirror = 0;
 416
 417        clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 418        io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
 419        while (1) {
 420                ret = read_extent_buffer_pages(io_tree, eb, start,
 421                                               WAIT_COMPLETE,
 422                                               btree_get_extent, mirror_num);
 423                if (!ret) {
 424                        if (!verify_parent_transid(io_tree, eb,
 425                                                   parent_transid, 0))
 426                                break;
 427                        else
 428                                ret = -EIO;
 429                }
 430
 431                /*
 432                 * This buffer's crc is fine, but its contents are corrupted, so
 433                 * there is no reason to read the other copies, they won't be
 434                 * any less wrong.
 435                 */
 436                if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
 437                        break;
 438
 439                num_copies = btrfs_num_copies(root->fs_info,
 440                                              eb->start, eb->len);
 441                if (num_copies == 1)
 442                        break;
 443
 444                if (!failed_mirror) {
 445                        failed = 1;
 446                        failed_mirror = eb->read_mirror;
 447                }
 448
 449                mirror_num++;
 450                if (mirror_num == failed_mirror)
 451                        mirror_num++;
 452
 453                if (mirror_num > num_copies)
 454                        break;
 455        }
 456
 457        if (failed && !ret && failed_mirror)
 458                repair_eb_io_failure(root, eb, failed_mirror);
 459
 460        return ret;
 461}
 462
 463/*
 464 * checksum a dirty tree block before IO.  This has extra checks to make sure
 465 * we only fill in the checksum field in the first page of a multi-page block
 466 */
 467
 468static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
 469{
 470        struct extent_io_tree *tree;
 471        u64 start = page_offset(page);
 472        u64 found_start;
 473        struct extent_buffer *eb;
 474
 475        tree = &BTRFS_I(page->mapping->host)->io_tree;
 476
 477        eb = (struct extent_buffer *)page->private;
 478        if (page != eb->pages[0])
 479                return 0;
 480        found_start = btrfs_header_bytenr(eb);
 481        if (found_start != start) {
 482                WARN_ON(1);
 483                return 0;
 484        }
 485        if (!PageUptodate(page)) {
 486                WARN_ON(1);
 487                return 0;
 488        }
 489        csum_tree_block(root, eb, 0);
 490        return 0;
 491}
 492
 493static int check_tree_block_fsid(struct btrfs_root *root,
 494                                 struct extent_buffer *eb)
 495{
 496        struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
 497        u8 fsid[BTRFS_UUID_SIZE];
 498        int ret = 1;
 499
 500        read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
 501                           BTRFS_FSID_SIZE);
 502        while (fs_devices) {
 503                if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
 504                        ret = 0;
 505                        break;
 506                }
 507                fs_devices = fs_devices->seed;
 508        }
 509        return ret;
 510}
 511
 512#define CORRUPT(reason, eb, root, slot)                         \
 513        printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
 514               "root=%llu, slot=%d\n", reason,                  \
 515               (unsigned long long)btrfs_header_bytenr(eb),     \
 516               (unsigned long long)root->objectid, slot)
 517
 518static noinline int check_leaf(struct btrfs_root *root,
 519                               struct extent_buffer *leaf)
 520{
 521        struct btrfs_key key;
 522        struct btrfs_key leaf_key;
 523        u32 nritems = btrfs_header_nritems(leaf);
 524        int slot;
 525
 526        if (nritems == 0)
 527                return 0;
 528
 529        /* Check the 0 item */
 530        if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
 531            BTRFS_LEAF_DATA_SIZE(root)) {
 532                CORRUPT("invalid item offset size pair", leaf, root, 0);
 533                return -EIO;
 534        }
 535
 536        /*
 537         * Check to make sure each items keys are in the correct order and their
 538         * offsets make sense.  We only have to loop through nritems-1 because
 539         * we check the current slot against the next slot, which verifies the
 540         * next slot's offset+size makes sense and that the current's slot
 541         * offset is correct.
 542         */
 543        for (slot = 0; slot < nritems - 1; slot++) {
 544                btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
 545                btrfs_item_key_to_cpu(leaf, &key, slot + 1);
 546
 547                /* Make sure the keys are in the right order */
 548                if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
 549                        CORRUPT("bad key order", leaf, root, slot);
 550                        return -EIO;
 551                }
 552
 553                /*
 554                 * Make sure the offset and ends are right, remember that the
 555                 * item data starts at the end of the leaf and grows towards the
 556                 * front.
 557                 */
 558                if (btrfs_item_offset_nr(leaf, slot) !=
 559                        btrfs_item_end_nr(leaf, slot + 1)) {
 560                        CORRUPT("slot offset bad", leaf, root, slot);
 561                        return -EIO;
 562                }
 563
 564                /*
 565                 * Check to make sure that we don't point outside of the leaf,
 566                 * just incase all the items are consistent to eachother, but
 567                 * all point outside of the leaf.
 568                 */
 569                if (btrfs_item_end_nr(leaf, slot) >
 570                    BTRFS_LEAF_DATA_SIZE(root)) {
 571                        CORRUPT("slot end outside of leaf", leaf, root, slot);
 572                        return -EIO;
 573                }
 574        }
 575
 576        return 0;
 577}
 578
 579static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
 580                               struct extent_state *state, int mirror)
 581{
 582        struct extent_io_tree *tree;
 583        u64 found_start;
 584        int found_level;
 585        struct extent_buffer *eb;
 586        struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
 587        int ret = 0;
 588        int reads_done;
 589
 590        if (!page->private)
 591                goto out;
 592
 593        tree = &BTRFS_I(page->mapping->host)->io_tree;
 594        eb = (struct extent_buffer *)page->private;
 595
 596        /* the pending IO might have been the only thing that kept this buffer
 597         * in memory.  Make sure we have a ref for all this other checks
 598         */
 599        extent_buffer_get(eb);
 600
 601        reads_done = atomic_dec_and_test(&eb->io_pages);
 602        if (!reads_done)
 603                goto err;
 604
 605        eb->read_mirror = mirror;
 606        if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
 607                ret = -EIO;
 608                goto err;
 609        }
 610
 611        found_start = btrfs_header_bytenr(eb);
 612        if (found_start != eb->start) {
 613                printk_ratelimited(KERN_INFO "btrfs bad tree block start "
 614                               "%llu %llu\n",
 615                               (unsigned long long)found_start,
 616                               (unsigned long long)eb->start);
 617                ret = -EIO;
 618                goto err;
 619        }
 620        if (check_tree_block_fsid(root, eb)) {
 621                printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
 622                               (unsigned long long)eb->start);
 623                ret = -EIO;
 624                goto err;
 625        }
 626        found_level = btrfs_header_level(eb);
 627        if (found_level >= BTRFS_MAX_LEVEL) {
 628                btrfs_info(root->fs_info, "bad tree block level %d\n",
 629                           (int)btrfs_header_level(eb));
 630                ret = -EIO;
 631                goto err;
 632        }
 633
 634        btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
 635                                       eb, found_level);
 636
 637        ret = csum_tree_block(root, eb, 1);
 638        if (ret) {
 639                ret = -EIO;
 640                goto err;
 641        }
 642
 643        /*
 644         * If this is a leaf block and it is corrupt, set the corrupt bit so
 645         * that we don't try and read the other copies of this block, just
 646         * return -EIO.
 647         */
 648        if (found_level == 0 && check_leaf(root, eb)) {
 649                set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 650                ret = -EIO;
 651        }
 652
 653        if (!ret)
 654                set_extent_buffer_uptodate(eb);
 655err:
 656        if (reads_done &&
 657            test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
 658                btree_readahead_hook(root, eb, eb->start, ret);
 659
 660        if (ret) {
 661                /*
 662                 * our io error hook is going to dec the io pages
 663                 * again, we have to make sure it has something
 664                 * to decrement
 665                 */
 666                atomic_inc(&eb->io_pages);
 667                clear_extent_buffer_uptodate(eb);
 668        }
 669        free_extent_buffer(eb);
 670out:
 671        return ret;
 672}
 673
 674static int btree_io_failed_hook(struct page *page, int failed_mirror)
 675{
 676        struct extent_buffer *eb;
 677        struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
 678
 679        eb = (struct extent_buffer *)page->private;
 680        set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
 681        eb->read_mirror = failed_mirror;
 682        atomic_dec(&eb->io_pages);
 683        if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
 684                btree_readahead_hook(root, eb, eb->start, -EIO);
 685        return -EIO;    /* we fixed nothing */
 686}
 687
 688static void end_workqueue_bio(struct bio *bio, int err)
 689{
 690        struct end_io_wq *end_io_wq = bio->bi_private;
 691        struct btrfs_fs_info *fs_info;
 692
 693        fs_info = end_io_wq->info;
 694        end_io_wq->error = err;
 695        end_io_wq->work.func = end_workqueue_fn;
 696        end_io_wq->work.flags = 0;
 697
 698        if (bio->bi_rw & REQ_WRITE) {
 699                if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
 700                        btrfs_queue_worker(&fs_info->endio_meta_write_workers,
 701                                           &end_io_wq->work);
 702                else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
 703                        btrfs_queue_worker(&fs_info->endio_freespace_worker,
 704                                           &end_io_wq->work);
 705                else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
 706                        btrfs_queue_worker(&fs_info->endio_raid56_workers,
 707                                           &end_io_wq->work);
 708                else
 709                        btrfs_queue_worker(&fs_info->endio_write_workers,
 710                                           &end_io_wq->work);
 711        } else {
 712                if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
 713                        btrfs_queue_worker(&fs_info->endio_raid56_workers,
 714                                           &end_io_wq->work);
 715                else if (end_io_wq->metadata)
 716                        btrfs_queue_worker(&fs_info->endio_meta_workers,
 717                                           &end_io_wq->work);
 718                else
 719                        btrfs_queue_worker(&fs_info->endio_workers,
 720                                           &end_io_wq->work);
 721        }
 722}
 723
 724/*
 725 * For the metadata arg you want
 726 *
 727 * 0 - if data
 728 * 1 - if normal metadta
 729 * 2 - if writing to the free space cache area
 730 * 3 - raid parity work
 731 */
 732int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 733                        int metadata)
 734{
 735        struct end_io_wq *end_io_wq;
 736        end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
 737        if (!end_io_wq)
 738                return -ENOMEM;
 739
 740        end_io_wq->private = bio->bi_private;
 741        end_io_wq->end_io = bio->bi_end_io;
 742        end_io_wq->info = info;
 743        end_io_wq->error = 0;
 744        end_io_wq->bio = bio;
 745        end_io_wq->metadata = metadata;
 746
 747        bio->bi_private = end_io_wq;
 748        bio->bi_end_io = end_workqueue_bio;
 749        return 0;
 750}
 751
 752unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
 753{
 754        unsigned long limit = min_t(unsigned long,
 755                                    info->workers.max_workers,
 756                                    info->fs_devices->open_devices);
 757        return 256 * limit;
 758}
 759
 760static void run_one_async_start(struct btrfs_work *work)
 761{
 762        struct async_submit_bio *async;
 763        int ret;
 764
 765        async = container_of(work, struct  async_submit_bio, work);
 766        ret = async->submit_bio_start(async->inode, async->rw, async->bio,
 767                                      async->mirror_num, async->bio_flags,
 768                                      async->bio_offset);
 769        if (ret)
 770                async->error = ret;
 771}
 772
 773static void run_one_async_done(struct btrfs_work *work)
 774{
 775        struct btrfs_fs_info *fs_info;
 776        struct async_submit_bio *async;
 777        int limit;
 778
 779        async = container_of(work, struct  async_submit_bio, work);
 780        fs_info = BTRFS_I(async->inode)->root->fs_info;
 781
 782        limit = btrfs_async_submit_limit(fs_info);
 783        limit = limit * 2 / 3;
 784
 785        if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
 786            waitqueue_active(&fs_info->async_submit_wait))
 787                wake_up(&fs_info->async_submit_wait);
 788
 789        /* If an error occured we just want to clean up the bio and move on */
 790        if (async->error) {
 791                bio_endio(async->bio, async->error);
 792                return;
 793        }
 794
 795        async->submit_bio_done(async->inode, async->rw, async->bio,
 796                               async->mirror_num, async->bio_flags,
 797                               async->bio_offset);
 798}
 799
 800static void run_one_async_free(struct btrfs_work *work)
 801{
 802        struct async_submit_bio *async;
 803
 804        async = container_of(work, struct  async_submit_bio, work);
 805        kfree(async);
 806}
 807
 808int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 809                        int rw, struct bio *bio, int mirror_num,
 810                        unsigned long bio_flags,
 811                        u64 bio_offset,
 812                        extent_submit_bio_hook_t *submit_bio_start,
 813                        extent_submit_bio_hook_t *submit_bio_done)
 814{
 815        struct async_submit_bio *async;
 816
 817        async = kmalloc(sizeof(*async), GFP_NOFS);
 818        if (!async)
 819                return -ENOMEM;
 820
 821        async->inode = inode;
 822        async->rw = rw;
 823        async->bio = bio;
 824        async->mirror_num = mirror_num;
 825        async->submit_bio_start = submit_bio_start;
 826        async->submit_bio_done = submit_bio_done;
 827
 828        async->work.func = run_one_async_start;
 829        async->work.ordered_func = run_one_async_done;
 830        async->work.ordered_free = run_one_async_free;
 831
 832        async->work.flags = 0;
 833        async->bio_flags = bio_flags;
 834        async->bio_offset = bio_offset;
 835
 836        async->error = 0;
 837
 838        atomic_inc(&fs_info->nr_async_submits);
 839
 840        if (rw & REQ_SYNC)
 841                btrfs_set_work_high_prio(&async->work);
 842
 843        btrfs_queue_worker(&fs_info->workers, &async->work);
 844
 845        while (atomic_read(&fs_info->async_submit_draining) &&
 846              atomic_read(&fs_info->nr_async_submits)) {
 847                wait_event(fs_info->async_submit_wait,
 848                           (atomic_read(&fs_info->nr_async_submits) == 0));
 849        }
 850
 851        return 0;
 852}
 853
 854static int btree_csum_one_bio(struct bio *bio)
 855{
 856        struct bio_vec *bvec = bio->bi_io_vec;
 857        int bio_index = 0;
 858        struct btrfs_root *root;
 859        int ret = 0;
 860
 861        WARN_ON(bio->bi_vcnt <= 0);
 862        while (bio_index < bio->bi_vcnt) {
 863                root = BTRFS_I(bvec->bv_page->mapping->host)->root;
 864                ret = csum_dirty_buffer(root, bvec->bv_page);
 865                if (ret)
 866                        break;
 867                bio_index++;
 868                bvec++;
 869        }
 870        return ret;
 871}
 872
 873static int __btree_submit_bio_start(struct inode *inode, int rw,
 874                                    struct bio *bio, int mirror_num,
 875                                    unsigned long bio_flags,
 876                                    u64 bio_offset)
 877{
 878        /*
 879         * when we're called for a write, we're already in the async
 880         * submission context.  Just jump into btrfs_map_bio
 881         */
 882        return btree_csum_one_bio(bio);
 883}
 884
 885static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
 886                                 int mirror_num, unsigned long bio_flags,
 887                                 u64 bio_offset)
 888{
 889        int ret;
 890
 891        /*
 892         * when we're called for a write, we're already in the async
 893         * submission context.  Just jump into btrfs_map_bio
 894         */
 895        ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
 896        if (ret)
 897                bio_endio(bio, ret);
 898        return ret;
 899}
 900
 901static int check_async_write(struct inode *inode, unsigned long bio_flags)
 902{
 903        if (bio_flags & EXTENT_BIO_TREE_LOG)
 904                return 0;
 905#ifdef CONFIG_X86
 906        if (cpu_has_xmm4_2)
 907                return 0;
 908#endif
 909        return 1;
 910}
 911
 912static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
 913                                 int mirror_num, unsigned long bio_flags,
 914                                 u64 bio_offset)
 915{
 916        int async = check_async_write(inode, bio_flags);
 917        int ret;
 918
 919        if (!(rw & REQ_WRITE)) {
 920                /*
 921                 * called for a read, do the setup so that checksum validation
 922                 * can happen in the async kernel threads
 923                 */
 924                ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
 925                                          bio, 1);
 926                if (ret)
 927                        goto out_w_error;
 928                ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
 929                                    mirror_num, 0);
 930        } else if (!async) {
 931                ret = btree_csum_one_bio(bio);
 932                if (ret)
 933                        goto out_w_error;
 934                ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
 935                                    mirror_num, 0);
 936        } else {
 937                /*
 938                 * kthread helpers are used to submit writes so that
 939                 * checksumming can happen in parallel across all CPUs
 940                 */
 941                ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
 942                                          inode, rw, bio, mirror_num, 0,
 943                                          bio_offset,
 944                                          __btree_submit_bio_start,
 945                                          __btree_submit_bio_done);
 946        }
 947
 948        if (ret) {
 949out_w_error:
 950                bio_endio(bio, ret);
 951        }
 952        return ret;
 953}
 954
 955#ifdef CONFIG_MIGRATION
 956static int btree_migratepage(struct address_space *mapping,
 957                        struct page *newpage, struct page *page,
 958                        enum migrate_mode mode)
 959{
 960        /*
 961         * we can't safely write a btree page from here,
 962         * we haven't done the locking hook
 963         */
 964        if (PageDirty(page))
 965                return -EAGAIN;
 966        /*
 967         * Buffers may be managed in a filesystem specific way.
 968         * We must have no buffers or drop them.
 969         */
 970        if (page_has_private(page) &&
 971            !try_to_release_page(page, GFP_KERNEL))
 972                return -EAGAIN;
 973        return migrate_page(mapping, newpage, page, mode);
 974}
 975#endif
 976
 977
 978static int btree_writepages(struct address_space *mapping,
 979                            struct writeback_control *wbc)
 980{
 981        struct extent_io_tree *tree;
 982        struct btrfs_fs_info *fs_info;
 983        int ret;
 984
 985        tree = &BTRFS_I(mapping->host)->io_tree;
 986        if (wbc->sync_mode == WB_SYNC_NONE) {
 987
 988                if (wbc->for_kupdate)
 989                        return 0;
 990
 991                fs_info = BTRFS_I(mapping->host)->root->fs_info;
 992                /* this is a bit racy, but that's ok */
 993                ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
 994                                             BTRFS_DIRTY_METADATA_THRESH);
 995                if (ret < 0)
 996                        return 0;
 997        }
 998        return btree_write_cache_pages(mapping, wbc);
 999}
1000
1001static int btree_readpage(struct file *file, struct page *page)
1002{
1003        struct extent_io_tree *tree;
1004        tree = &BTRFS_I(page->mapping->host)->io_tree;
1005        return extent_read_full_page(tree, page, btree_get_extent, 0);
1006}
1007
1008static int btree_releasepage(struct page *page, gfp_t gfp_flags)
1009{
1010        if (PageWriteback(page) || PageDirty(page))
1011                return 0;
1012
1013        return try_release_extent_buffer(page);
1014}
1015
1016static void btree_invalidatepage(struct page *page, unsigned long offset)
1017{
1018        struct extent_io_tree *tree;
1019        tree = &BTRFS_I(page->mapping->host)->io_tree;
1020        extent_invalidatepage(tree, page, offset);
1021        btree_releasepage(page, GFP_NOFS);
1022        if (PagePrivate(page)) {
1023                printk(KERN_WARNING "btrfs warning page private not zero "
1024                       "on page %llu\n", (unsigned long long)page_offset(page));
1025                ClearPagePrivate(page);
1026                set_page_private(page, 0);
1027                page_cache_release(page);
1028        }
1029}
1030
1031static int btree_set_page_dirty(struct page *page)
1032{
1033#ifdef DEBUG
1034        struct extent_buffer *eb;
1035
1036        BUG_ON(!PagePrivate(page));
1037        eb = (struct extent_buffer *)page->private;
1038        BUG_ON(!eb);
1039        BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1040        BUG_ON(!atomic_read(&eb->refs));
1041        btrfs_assert_tree_locked(eb);
1042#endif
1043        return __set_page_dirty_nobuffers(page);
1044}
1045
1046static const struct address_space_operations btree_aops = {
1047        .readpage       = btree_readpage,
1048        .writepages     = btree_writepages,
1049        .releasepage    = btree_releasepage,
1050        .invalidatepage = btree_invalidatepage,
1051#ifdef CONFIG_MIGRATION
1052        .migratepage    = btree_migratepage,
1053#endif
1054        .set_page_dirty = btree_set_page_dirty,
1055};
1056
1057int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1058                         u64 parent_transid)
1059{
1060        struct extent_buffer *buf = NULL;
1061        struct inode *btree_inode = root->fs_info->btree_inode;
1062        int ret = 0;
1063
1064        buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1065        if (!buf)
1066                return 0;
1067        read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1068                                 buf, 0, WAIT_NONE, btree_get_extent, 0);
1069        free_extent_buffer(buf);
1070        return ret;
1071}
1072
1073int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1074                         int mirror_num, struct extent_buffer **eb)
1075{
1076        struct extent_buffer *buf = NULL;
1077        struct inode *btree_inode = root->fs_info->btree_inode;
1078        struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1079        int ret;
1080
1081        buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1082        if (!buf)
1083                return 0;
1084
1085        set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1086
1087        ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1088                                       btree_get_extent, mirror_num);
1089        if (ret) {
1090                free_extent_buffer(buf);
1091                return ret;
1092        }
1093
1094        if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1095                free_extent_buffer(buf);
1096                return -EIO;
1097        } else if (extent_buffer_uptodate(buf)) {
1098                *eb = buf;
1099        } else {
1100                free_extent_buffer(buf);
1101        }
1102        return 0;
1103}
1104
1105struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1106                                            u64 bytenr, u32 blocksize)
1107{
1108        struct inode *btree_inode = root->fs_info->btree_inode;
1109        struct extent_buffer *eb;
1110        eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1111                                bytenr, blocksize);
1112        return eb;
1113}
1114
1115struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1116                                                 u64 bytenr, u32 blocksize)
1117{
1118        struct inode *btree_inode = root->fs_info->btree_inode;
1119        struct extent_buffer *eb;
1120
1121        eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1122                                 bytenr, blocksize);
1123        return eb;
1124}
1125
1126
1127int btrfs_write_tree_block(struct extent_buffer *buf)
1128{
1129        return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1130                                        buf->start + buf->len - 1);
1131}
1132
1133int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1134{
1135        return filemap_fdatawait_range(buf->pages[0]->mapping,
1136                                       buf->start, buf->start + buf->len - 1);
1137}
1138
1139struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1140                                      u32 blocksize, u64 parent_transid)
1141{
1142        struct extent_buffer *buf = NULL;
1143        int ret;
1144
1145        buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1146        if (!buf)
1147                return NULL;
1148
1149        ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1150        return buf;
1151
1152}
1153
1154void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1155                      struct extent_buffer *buf)
1156{
1157        struct btrfs_fs_info *fs_info = root->fs_info;
1158
1159        if (btrfs_header_generation(buf) ==
1160            fs_info->running_transaction->transid) {
1161                btrfs_assert_tree_locked(buf);
1162
1163                if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1164                        __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1165                                             -buf->len,
1166                                             fs_info->dirty_metadata_batch);
1167                        /* ugh, clear_extent_buffer_dirty needs to lock the page */
1168                        btrfs_set_lock_blocking(buf);
1169                        clear_extent_buffer_dirty(buf);
1170                }
1171        }
1172}
1173
1174static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1175                         u32 stripesize, struct btrfs_root *root,
1176                         struct btrfs_fs_info *fs_info,
1177                         u64 objectid)
1178{
1179        root->node = NULL;
1180        root->commit_root = NULL;
1181        root->sectorsize = sectorsize;
1182        root->nodesize = nodesize;
1183        root->leafsize = leafsize;
1184        root->stripesize = stripesize;
1185        root->ref_cows = 0;
1186        root->track_dirty = 0;
1187        root->in_radix = 0;
1188        root->orphan_item_inserted = 0;
1189        root->orphan_cleanup_state = 0;
1190
1191        root->objectid = objectid;
1192        root->last_trans = 0;
1193        root->highest_objectid = 0;
1194        root->name = NULL;
1195        root->inode_tree = RB_ROOT;
1196        INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1197        root->block_rsv = NULL;
1198        root->orphan_block_rsv = NULL;
1199
1200        INIT_LIST_HEAD(&root->dirty_list);
1201        INIT_LIST_HEAD(&root->root_list);
1202        INIT_LIST_HEAD(&root->logged_list[0]);
1203        INIT_LIST_HEAD(&root->logged_list[1]);
1204        spin_lock_init(&root->orphan_lock);
1205        spin_lock_init(&root->inode_lock);
1206        spin_lock_init(&root->accounting_lock);
1207        spin_lock_init(&root->log_extents_lock[0]);
1208        spin_lock_init(&root->log_extents_lock[1]);
1209        mutex_init(&root->objectid_mutex);
1210        mutex_init(&root->log_mutex);
1211        init_waitqueue_head(&root->log_writer_wait);
1212        init_waitqueue_head(&root->log_commit_wait[0]);
1213        init_waitqueue_head(&root->log_commit_wait[1]);
1214        atomic_set(&root->log_commit[0], 0);
1215        atomic_set(&root->log_commit[1], 0);
1216        atomic_set(&root->log_writers, 0);
1217        atomic_set(&root->log_batch, 0);
1218        atomic_set(&root->orphan_inodes, 0);
1219        root->log_transid = 0;
1220        root->last_log_commit = 0;
1221        extent_io_tree_init(&root->dirty_log_pages,
1222                             fs_info->btree_inode->i_mapping);
1223
1224        memset(&root->root_key, 0, sizeof(root->root_key));
1225        memset(&root->root_item, 0, sizeof(root->root_item));
1226        memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1227        memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1228        root->defrag_trans_start = fs_info->generation;
1229        init_completion(&root->kobj_unregister);
1230        root->defrag_running = 0;
1231        root->root_key.objectid = objectid;
1232        root->anon_dev = 0;
1233
1234        spin_lock_init(&root->root_item_lock);
1235}
1236
1237static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
1238                                            struct btrfs_fs_info *fs_info,
1239                                            u64 objectid,
1240                                            struct btrfs_root *root)
1241{
1242        int ret;
1243        u32 blocksize;
1244        u64 generation;
1245
1246        __setup_root(tree_root->nodesize, tree_root->leafsize,
1247                     tree_root->sectorsize, tree_root->stripesize,
1248                     root, fs_info, objectid);
1249        ret = btrfs_find_last_root(tree_root, objectid,
1250                                   &root->root_item, &root->root_key);
1251        if (ret > 0)
1252                return -ENOENT;
1253        else if (ret < 0)
1254                return ret;
1255
1256        generation = btrfs_root_generation(&root->root_item);
1257        blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1258        root->commit_root = NULL;
1259        root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1260                                     blocksize, generation);
1261        if (!root->node || !btrfs_buffer_uptodate(root->node, generation, 0)) {
1262                free_extent_buffer(root->node);
1263                root->node = NULL;
1264                return -EIO;
1265        }
1266        root->commit_root = btrfs_root_node(root);
1267        return 0;
1268}
1269
1270static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1271{
1272        struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1273        if (root)
1274                root->fs_info = fs_info;
1275        return root;
1276}
1277
1278struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1279                                     struct btrfs_fs_info *fs_info,
1280                                     u64 objectid)
1281{
1282        struct extent_buffer *leaf;
1283        struct btrfs_root *tree_root = fs_info->tree_root;
1284        struct btrfs_root *root;
1285        struct btrfs_key key;
1286        int ret = 0;
1287        u64 bytenr;
1288        uuid_le uuid;
1289
1290        root = btrfs_alloc_root(fs_info);
1291        if (!root)
1292                return ERR_PTR(-ENOMEM);
1293
1294        __setup_root(tree_root->nodesize, tree_root->leafsize,
1295                     tree_root->sectorsize, tree_root->stripesize,
1296                     root, fs_info, objectid);
1297        root->root_key.objectid = objectid;
1298        root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1299        root->root_key.offset = 0;
1300
1301        leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
1302                                      0, objectid, NULL, 0, 0, 0);
1303        if (IS_ERR(leaf)) {
1304                ret = PTR_ERR(leaf);
1305                leaf = NULL;
1306                goto fail;
1307        }
1308
1309        bytenr = leaf->start;
1310        memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1311        btrfs_set_header_bytenr(leaf, leaf->start);
1312        btrfs_set_header_generation(leaf, trans->transid);
1313        btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1314        btrfs_set_header_owner(leaf, objectid);
1315        root->node = leaf;
1316
1317        write_extent_buffer(leaf, fs_info->fsid,
1318                            (unsigned long)btrfs_header_fsid(leaf),
1319                            BTRFS_FSID_SIZE);
1320        write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
1321                            (unsigned long)btrfs_header_chunk_tree_uuid(leaf),
1322                            BTRFS_UUID_SIZE);
1323        btrfs_mark_buffer_dirty(leaf);
1324
1325        root->commit_root = btrfs_root_node(root);
1326        root->track_dirty = 1;
1327
1328
1329        root->root_item.flags = 0;
1330        root->root_item.byte_limit = 0;
1331        btrfs_set_root_bytenr(&root->root_item, leaf->start);
1332        btrfs_set_root_generation(&root->root_item, trans->transid);
1333        btrfs_set_root_level(&root->root_item, 0);
1334        btrfs_set_root_refs(&root->root_item, 1);
1335        btrfs_set_root_used(&root->root_item, leaf->len);
1336        btrfs_set_root_last_snapshot(&root->root_item, 0);
1337        btrfs_set_root_dirid(&root->root_item, 0);
1338        uuid_le_gen(&uuid);
1339        memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1340        root->root_item.drop_level = 0;
1341
1342        key.objectid = objectid;
1343        key.type = BTRFS_ROOT_ITEM_KEY;
1344        key.offset = 0;
1345        ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1346        if (ret)
1347                goto fail;
1348
1349        btrfs_tree_unlock(leaf);
1350
1351        return root;
1352
1353fail:
1354        if (leaf) {
1355                btrfs_tree_unlock(leaf);
1356                free_extent_buffer(leaf);
1357        }
1358        kfree(root);
1359
1360        return ERR_PTR(ret);
1361}
1362
1363static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1364                                         struct btrfs_fs_info *fs_info)
1365{
1366        struct btrfs_root *root;
1367        struct btrfs_root *tree_root = fs_info->tree_root;
1368        struct extent_buffer *leaf;
1369
1370        root = btrfs_alloc_root(fs_info);
1371        if (!root)
1372                return ERR_PTR(-ENOMEM);
1373
1374        __setup_root(tree_root->nodesize, tree_root->leafsize,
1375                     tree_root->sectorsize, tree_root->stripesize,
1376                     root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1377
1378        root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1379        root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1380        root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1381        /*
1382         * log trees do not get reference counted because they go away
1383         * before a real commit is actually done.  They do store pointers
1384         * to file data extents, and those reference counts still get
1385         * updated (along with back refs to the log tree).
1386         */
1387        root->ref_cows = 0;
1388
1389        leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1390                                      BTRFS_TREE_LOG_OBJECTID, NULL,
1391                                      0, 0, 0);
1392        if (IS_ERR(leaf)) {
1393                kfree(root);
1394                return ERR_CAST(leaf);
1395        }
1396
1397        memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1398        btrfs_set_header_bytenr(leaf, leaf->start);
1399        btrfs_set_header_generation(leaf, trans->transid);
1400        btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1401        btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1402        root->node = leaf;
1403
1404        write_extent_buffer(root->node, root->fs_info->fsid,
1405                            (unsigned long)btrfs_header_fsid(root->node),
1406                            BTRFS_FSID_SIZE);
1407        btrfs_mark_buffer_dirty(root->node);
1408        btrfs_tree_unlock(root->node);
1409        return root;
1410}
1411
1412int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1413                             struct btrfs_fs_info *fs_info)
1414{
1415        struct btrfs_root *log_root;
1416
1417        log_root = alloc_log_tree(trans, fs_info);
1418        if (IS_ERR(log_root))
1419                return PTR_ERR(log_root);
1420        WARN_ON(fs_info->log_root_tree);
1421        fs_info->log_root_tree = log_root;
1422        return 0;
1423}
1424
1425int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1426                       struct btrfs_root *root)
1427{
1428        struct btrfs_root *log_root;
1429        struct btrfs_inode_item *inode_item;
1430
1431        log_root = alloc_log_tree(trans, root->fs_info);
1432        if (IS_ERR(log_root))
1433                return PTR_ERR(log_root);
1434
1435        log_root->last_trans = trans->transid;
1436        log_root->root_key.offset = root->root_key.objectid;
1437
1438        inode_item = &log_root->root_item.inode;
1439        inode_item->generation = cpu_to_le64(1);
1440        inode_item->size = cpu_to_le64(3);
1441        inode_item->nlink = cpu_to_le32(1);
1442        inode_item->nbytes = cpu_to_le64(root->leafsize);
1443        inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1444
1445        btrfs_set_root_node(&log_root->root_item, log_root->node);
1446
1447        WARN_ON(root->log_root);
1448        root->log_root = log_root;
1449        root->log_transid = 0;
1450        root->last_log_commit = 0;
1451        return 0;
1452}
1453
1454struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1455                                               struct btrfs_key *location)
1456{
1457        struct btrfs_root *root;
1458        struct btrfs_fs_info *fs_info = tree_root->fs_info;
1459        struct btrfs_path *path;
1460        struct extent_buffer *l;
1461        u64 generation;
1462        u32 blocksize;
1463        int ret = 0;
1464        int slot;
1465
1466        root = btrfs_alloc_root(fs_info);
1467        if (!root)
1468                return ERR_PTR(-ENOMEM);
1469        if (location->offset == (u64)-1) {
1470                ret = find_and_setup_root(tree_root, fs_info,
1471                                          location->objectid, root);
1472                if (ret) {
1473                        kfree(root);
1474                        return ERR_PTR(ret);
1475                }
1476                goto out;
1477        }
1478
1479        __setup_root(tree_root->nodesize, tree_root->leafsize,
1480                     tree_root->sectorsize, tree_root->stripesize,
1481                     root, fs_info, location->objectid);
1482
1483        path = btrfs_alloc_path();
1484        if (!path) {
1485                kfree(root);
1486                return ERR_PTR(-ENOMEM);
1487        }
1488        ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1489        if (ret == 0) {
1490                l = path->nodes[0];
1491                slot = path->slots[0];
1492                btrfs_read_root_item(l, slot, &root->root_item);
1493                memcpy(&root->root_key, location, sizeof(*location));
1494        }
1495        btrfs_free_path(path);
1496        if (ret) {
1497                kfree(root);
1498                if (ret > 0)
1499                        ret = -ENOENT;
1500                return ERR_PTR(ret);
1501        }
1502
1503        generation = btrfs_root_generation(&root->root_item);
1504        blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1505        root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1506                                     blocksize, generation);
1507        if (!root->node || !extent_buffer_uptodate(root->node)) {
1508                ret = (!root->node) ? -ENOMEM : -EIO;
1509
1510                free_extent_buffer(root->node);
1511                kfree(root);
1512                return ERR_PTR(ret);
1513        }
1514
1515        root->commit_root = btrfs_root_node(root);
1516out:
1517        if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1518                root->ref_cows = 1;
1519                btrfs_check_and_init_root_item(&root->root_item);
1520        }
1521
1522        return root;
1523}
1524
1525struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1526                                              struct btrfs_key *location)
1527{
1528        struct btrfs_root *root;
1529        int ret;
1530
1531        if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1532                return fs_info->tree_root;
1533        if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1534                return fs_info->extent_root;
1535        if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1536                return fs_info->chunk_root;
1537        if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1538                return fs_info->dev_root;
1539        if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1540                return fs_info->csum_root;
1541        if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1542                return fs_info->quota_root ? fs_info->quota_root :
1543                                             ERR_PTR(-ENOENT);
1544again:
1545        spin_lock(&fs_info->fs_roots_radix_lock);
1546        root = radix_tree_lookup(&fs_info->fs_roots_radix,
1547                                 (unsigned long)location->objectid);
1548        spin_unlock(&fs_info->fs_roots_radix_lock);
1549        if (root)
1550                return root;
1551
1552        root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1553        if (IS_ERR(root))
1554                return root;
1555
1556        root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1557        root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1558                                        GFP_NOFS);
1559        if (!root->free_ino_pinned || !root->free_ino_ctl) {
1560                ret = -ENOMEM;
1561                goto fail;
1562        }
1563
1564        btrfs_init_free_ino_ctl(root);
1565        mutex_init(&root->fs_commit_mutex);
1566        spin_lock_init(&root->cache_lock);
1567        init_waitqueue_head(&root->cache_wait);
1568
1569        ret = get_anon_bdev(&root->anon_dev);
1570        if (ret)
1571                goto fail;
1572
1573        if (btrfs_root_refs(&root->root_item) == 0) {
1574                ret = -ENOENT;
1575                goto fail;
1576        }
1577
1578        ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1579        if (ret < 0)
1580                goto fail;
1581        if (ret == 0)
1582                root->orphan_item_inserted = 1;
1583
1584        ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1585        if (ret)
1586                goto fail;
1587
1588        spin_lock(&fs_info->fs_roots_radix_lock);
1589        ret = radix_tree_insert(&fs_info->fs_roots_radix,
1590                                (unsigned long)root->root_key.objectid,
1591                                root);
1592        if (ret == 0)
1593                root->in_radix = 1;
1594
1595        spin_unlock(&fs_info->fs_roots_radix_lock);
1596        radix_tree_preload_end();
1597        if (ret) {
1598                if (ret == -EEXIST) {
1599                        free_fs_root(root);
1600                        goto again;
1601                }
1602                goto fail;
1603        }
1604
1605        ret = btrfs_find_dead_roots(fs_info->tree_root,
1606                                    root->root_key.objectid);
1607        WARN_ON(ret);
1608        return root;
1609fail:
1610        free_fs_root(root);
1611        return ERR_PTR(ret);
1612}
1613
1614static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1615{
1616        struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1617        int ret = 0;
1618        struct btrfs_device *device;
1619        struct backing_dev_info *bdi;
1620
1621        rcu_read_lock();
1622        list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1623                if (!device->bdev)
1624                        continue;
1625                bdi = blk_get_backing_dev_info(device->bdev);
1626                if (bdi && bdi_congested(bdi, bdi_bits)) {
1627                        ret = 1;
1628                        break;
1629                }
1630        }
1631        rcu_read_unlock();
1632        return ret;
1633}
1634
1635/*
1636 * If this fails, caller must call bdi_destroy() to get rid of the
1637 * bdi again.
1638 */
1639static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1640{
1641        int err;
1642
1643        bdi->capabilities = BDI_CAP_MAP_COPY;
1644        err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1645        if (err)
1646                return err;
1647
1648        bdi->ra_pages   = default_backing_dev_info.ra_pages;
1649        bdi->congested_fn       = btrfs_congested_fn;
1650        bdi->congested_data     = info;
1651        return 0;
1652}
1653
1654/*
1655 * called by the kthread helper functions to finally call the bio end_io
1656 * functions.  This is where read checksum verification actually happens
1657 */
1658static void end_workqueue_fn(struct btrfs_work *work)
1659{
1660        struct bio *bio;
1661        struct end_io_wq *end_io_wq;
1662        struct btrfs_fs_info *fs_info;
1663        int error;
1664
1665        end_io_wq = container_of(work, struct end_io_wq, work);
1666        bio = end_io_wq->bio;
1667        fs_info = end_io_wq->info;
1668
1669        error = end_io_wq->error;
1670        bio->bi_private = end_io_wq->private;
1671        bio->bi_end_io = end_io_wq->end_io;
1672        kfree(end_io_wq);
1673        bio_endio(bio, error);
1674}
1675
1676static int cleaner_kthread(void *arg)
1677{
1678        struct btrfs_root *root = arg;
1679
1680        do {
1681                int again = 0;
1682
1683                if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1684                    down_read_trylock(&root->fs_info->sb->s_umount)) {
1685                        if (mutex_trylock(&root->fs_info->cleaner_mutex)) {
1686                                btrfs_run_delayed_iputs(root);
1687                                again = btrfs_clean_one_deleted_snapshot(root);
1688                                mutex_unlock(&root->fs_info->cleaner_mutex);
1689                        }
1690                        btrfs_run_defrag_inodes(root->fs_info);
1691                        up_read(&root->fs_info->sb->s_umount);
1692                }
1693
1694                if (!try_to_freeze() && !again) {
1695                        set_current_state(TASK_INTERRUPTIBLE);
1696                        if (!kthread_should_stop())
1697                                schedule();
1698                        __set_current_state(TASK_RUNNING);
1699                }
1700        } while (!kthread_should_stop());
1701        return 0;
1702}
1703
1704static int transaction_kthread(void *arg)
1705{
1706        struct btrfs_root *root = arg;
1707        struct btrfs_trans_handle *trans;
1708        struct btrfs_transaction *cur;
1709        u64 transid;
1710        unsigned long now;
1711        unsigned long delay;
1712        bool cannot_commit;
1713
1714        do {
1715                cannot_commit = false;
1716                delay = HZ * 30;
1717                mutex_lock(&root->fs_info->transaction_kthread_mutex);
1718
1719                spin_lock(&root->fs_info->trans_lock);
1720                cur = root->fs_info->running_transaction;
1721                if (!cur) {
1722                        spin_unlock(&root->fs_info->trans_lock);
1723                        goto sleep;
1724                }
1725
1726                now = get_seconds();
1727                if (!cur->blocked &&
1728                    (now < cur->start_time || now - cur->start_time < 30)) {
1729                        spin_unlock(&root->fs_info->trans_lock);
1730                        delay = HZ * 5;
1731                        goto sleep;
1732                }
1733                transid = cur->transid;
1734                spin_unlock(&root->fs_info->trans_lock);
1735
1736                /* If the file system is aborted, this will always fail. */
1737                trans = btrfs_attach_transaction(root);
1738                if (IS_ERR(trans)) {
1739                        if (PTR_ERR(trans) != -ENOENT)
1740                                cannot_commit = true;
1741                        goto sleep;
1742                }
1743                if (transid == trans->transid) {
1744                        btrfs_commit_transaction(trans, root);
1745                } else {
1746                        btrfs_end_transaction(trans, root);
1747                }
1748sleep:
1749                wake_up_process(root->fs_info->cleaner_kthread);
1750                mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1751
1752                if (!try_to_freeze()) {
1753                        set_current_state(TASK_INTERRUPTIBLE);
1754                        if (!kthread_should_stop() &&
1755                            (!btrfs_transaction_blocked(root->fs_info) ||
1756                             cannot_commit))
1757                                schedule_timeout(delay);
1758                        __set_current_state(TASK_RUNNING);
1759                }
1760        } while (!kthread_should_stop());
1761        return 0;
1762}
1763
1764/*
1765 * this will find the highest generation in the array of
1766 * root backups.  The index of the highest array is returned,
1767 * or -1 if we can't find anything.
1768 *
1769 * We check to make sure the array is valid by comparing the
1770 * generation of the latest  root in the array with the generation
1771 * in the super block.  If they don't match we pitch it.
1772 */
1773static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1774{
1775        u64 cur;
1776        int newest_index = -1;
1777        struct btrfs_root_backup *root_backup;
1778        int i;
1779
1780        for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1781                root_backup = info->super_copy->super_roots + i;
1782                cur = btrfs_backup_tree_root_gen(root_backup);
1783                if (cur == newest_gen)
1784                        newest_index = i;
1785        }
1786
1787        /* check to see if we actually wrapped around */
1788        if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1789                root_backup = info->super_copy->super_roots;
1790                cur = btrfs_backup_tree_root_gen(root_backup);
1791                if (cur == newest_gen)
1792                        newest_index = 0;
1793        }
1794        return newest_index;
1795}
1796
1797
1798/*
1799 * find the oldest backup so we know where to store new entries
1800 * in the backup array.  This will set the backup_root_index
1801 * field in the fs_info struct
1802 */
1803static void find_oldest_super_backup(struct btrfs_fs_info *info,
1804                                     u64 newest_gen)
1805{
1806        int newest_index = -1;
1807
1808        newest_index = find_newest_super_backup(info, newest_gen);
1809        /* if there was garbage in there, just move along */
1810        if (newest_index == -1) {
1811                info->backup_root_index = 0;
1812        } else {
1813                info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1814        }
1815}
1816
1817/*
1818 * copy all the root pointers into the super backup array.
1819 * this will bump the backup pointer by one when it is
1820 * done
1821 */
1822static void backup_super_roots(struct btrfs_fs_info *info)
1823{
1824        int next_backup;
1825        struct btrfs_root_backup *root_backup;
1826        int last_backup;
1827
1828        next_backup = info->backup_root_index;
1829        last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1830                BTRFS_NUM_BACKUP_ROOTS;
1831
1832        /*
1833         * just overwrite the last backup if we're at the same generation
1834         * this happens only at umount
1835         */
1836        root_backup = info->super_for_commit->super_roots + last_backup;
1837        if (btrfs_backup_tree_root_gen(root_backup) ==
1838            btrfs_header_generation(info->tree_root->node))
1839                next_backup = last_backup;
1840
1841        root_backup = info->super_for_commit->super_roots + next_backup;
1842
1843        /*
1844         * make sure all of our padding and empty slots get zero filled
1845         * regardless of which ones we use today
1846         */
1847        memset(root_backup, 0, sizeof(*root_backup));
1848
1849        info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1850
1851        btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1852        btrfs_set_backup_tree_root_gen(root_backup,
1853                               btrfs_header_generation(info->tree_root->node));
1854
1855        btrfs_set_backup_tree_root_level(root_backup,
1856                               btrfs_header_level(info->tree_root->node));
1857
1858        btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1859        btrfs_set_backup_chunk_root_gen(root_backup,
1860                               btrfs_header_generation(info->chunk_root->node));
1861        btrfs_set_backup_chunk_root_level(root_backup,
1862                               btrfs_header_level(info->chunk_root->node));
1863
1864        btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1865        btrfs_set_backup_extent_root_gen(root_backup,
1866                               btrfs_header_generation(info->extent_root->node));
1867        btrfs_set_backup_extent_root_level(root_backup,
1868                               btrfs_header_level(info->extent_root->node));
1869
1870        /*
1871         * we might commit during log recovery, which happens before we set
1872         * the fs_root.  Make sure it is valid before we fill it in.
1873         */
1874        if (info->fs_root && info->fs_root->node) {
1875                btrfs_set_backup_fs_root(root_backup,
1876                                         info->fs_root->node->start);
1877                btrfs_set_backup_fs_root_gen(root_backup,
1878                               btrfs_header_generation(info->fs_root->node));
1879                btrfs_set_backup_fs_root_level(root_backup,
1880                               btrfs_header_level(info->fs_root->node));
1881        }
1882
1883        btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1884        btrfs_set_backup_dev_root_gen(root_backup,
1885                               btrfs_header_generation(info->dev_root->node));
1886        btrfs_set_backup_dev_root_level(root_backup,
1887                                       btrfs_header_level(info->dev_root->node));
1888
1889        btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1890        btrfs_set_backup_csum_root_gen(root_backup,
1891                               btrfs_header_generation(info->csum_root->node));
1892        btrfs_set_backup_csum_root_level(root_backup,
1893                               btrfs_header_level(info->csum_root->node));
1894
1895        btrfs_set_backup_total_bytes(root_backup,
1896                             btrfs_super_total_bytes(info->super_copy));
1897        btrfs_set_backup_bytes_used(root_backup,
1898                             btrfs_super_bytes_used(info->super_copy));
1899        btrfs_set_backup_num_devices(root_backup,
1900                             btrfs_super_num_devices(info->super_copy));
1901
1902        /*
1903         * if we don't copy this out to the super_copy, it won't get remembered
1904         * for the next commit
1905         */
1906        memcpy(&info->super_copy->super_roots,
1907               &info->super_for_commit->super_roots,
1908               sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1909}
1910
1911/*
1912 * this copies info out of the root backup array and back into
1913 * the in-memory super block.  It is meant to help iterate through
1914 * the array, so you send it the number of backups you've already
1915 * tried and the last backup index you used.
1916 *
1917 * this returns -1 when it has tried all the backups
1918 */
1919static noinline int next_root_backup(struct btrfs_fs_info *info,
1920                                     struct btrfs_super_block *super,
1921                                     int *num_backups_tried, int *backup_index)
1922{
1923        struct btrfs_root_backup *root_backup;
1924        int newest = *backup_index;
1925
1926        if (*num_backups_tried == 0) {
1927                u64 gen = btrfs_super_generation(super);
1928
1929                newest = find_newest_super_backup(info, gen);
1930                if (newest == -1)
1931                        return -1;
1932
1933                *backup_index = newest;
1934                *num_backups_tried = 1;
1935        } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1936                /* we've tried all the backups, all done */
1937                return -1;
1938        } else {
1939                /* jump to the next oldest backup */
1940                newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1941                        BTRFS_NUM_BACKUP_ROOTS;
1942                *backup_index = newest;
1943                *num_backups_tried += 1;
1944        }
1945        root_backup = super->super_roots + newest;
1946
1947        btrfs_set_super_generation(super,
1948                                   btrfs_backup_tree_root_gen(root_backup));
1949        btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1950        btrfs_set_super_root_level(super,
1951                                   btrfs_backup_tree_root_level(root_backup));
1952        btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1953
1954        /*
1955         * fixme: the total bytes and num_devices need to match or we should
1956         * need a fsck
1957         */
1958        btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1959        btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1960        return 0;
1961}
1962
1963/* helper to cleanup workers */
1964static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1965{
1966        btrfs_stop_workers(&fs_info->generic_worker);
1967        btrfs_stop_workers(&fs_info->fixup_workers);
1968        btrfs_stop_workers(&fs_info->delalloc_workers);
1969        btrfs_stop_workers(&fs_info->workers);
1970        btrfs_stop_workers(&fs_info->endio_workers);
1971        btrfs_stop_workers(&fs_info->endio_meta_workers);
1972        btrfs_stop_workers(&fs_info->endio_raid56_workers);
1973        btrfs_stop_workers(&fs_info->rmw_workers);
1974        btrfs_stop_workers(&fs_info->endio_meta_write_workers);
1975        btrfs_stop_workers(&fs_info->endio_write_workers);
1976        btrfs_stop_workers(&fs_info->endio_freespace_worker);
1977        btrfs_stop_workers(&fs_info->submit_workers);
1978        btrfs_stop_workers(&fs_info->delayed_workers);
1979        btrfs_stop_workers(&fs_info->caching_workers);
1980        btrfs_stop_workers(&fs_info->readahead_workers);
1981        btrfs_stop_workers(&fs_info->flush_workers);
1982        btrfs_stop_workers(&fs_info->qgroup_rescan_workers);
1983}
1984
1985/* helper to cleanup tree roots */
1986static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
1987{
1988        free_extent_buffer(info->tree_root->node);
1989        free_extent_buffer(info->tree_root->commit_root);
1990        info->tree_root->node = NULL;
1991        info->tree_root->commit_root = NULL;
1992
1993        if (info->dev_root) {
1994                free_extent_buffer(info->dev_root->node);
1995                free_extent_buffer(info->dev_root->commit_root);
1996                info->dev_root->node = NULL;
1997                info->dev_root->commit_root = NULL;
1998        }
1999        if (info->extent_root) {
2000                free_extent_buffer(info->extent_root->node);
2001                free_extent_buffer(info->extent_root->commit_root);
2002                info->extent_root->node = NULL;
2003                info->extent_root->commit_root = NULL;
2004        }
2005        if (info->csum_root) {
2006                free_extent_buffer(info->csum_root->node);
2007                free_extent_buffer(info->csum_root->commit_root);
2008                info->csum_root->node = NULL;
2009                info->csum_root->commit_root = NULL;
2010        }
2011        if (info->quota_root) {
2012                free_extent_buffer(info->quota_root->node);
2013                free_extent_buffer(info->quota_root->commit_root);
2014                info->quota_root->node = NULL;
2015                info->quota_root->commit_root = NULL;
2016        }
2017        if (chunk_root) {
2018                free_extent_buffer(info->chunk_root->node);
2019                free_extent_buffer(info->chunk_root->commit_root);
2020                info->chunk_root->node = NULL;
2021                info->chunk_root->commit_root = NULL;
2022        }
2023}
2024
2025static void del_fs_roots(struct btrfs_fs_info *fs_info)
2026{
2027        int ret;
2028        struct btrfs_root *gang[8];
2029        int i;
2030
2031        while (!list_empty(&fs_info->dead_roots)) {
2032                gang[0] = list_entry(fs_info->dead_roots.next,
2033                                     struct btrfs_root, root_list);
2034                list_del(&gang[0]->root_list);
2035
2036                if (gang[0]->in_radix) {
2037                        btrfs_free_fs_root(fs_info, gang[0]);
2038                } else {
2039                        free_extent_buffer(gang[0]->node);
2040                        free_extent_buffer(gang[0]->commit_root);
2041                        kfree(gang[0]);
2042                }
2043        }
2044
2045        while (1) {
2046                ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2047                                             (void **)gang, 0,
2048                                             ARRAY_SIZE(gang));
2049                if (!ret)
2050                        break;
2051                for (i = 0; i < ret; i++)
2052                        btrfs_free_fs_root(fs_info, gang[i]);
2053        }
2054}
2055
2056int open_ctree(struct super_block *sb,
2057               struct btrfs_fs_devices *fs_devices,
2058               char *options)
2059{
2060        u32 sectorsize;
2061        u32 nodesize;
2062        u32 leafsize;
2063        u32 blocksize;
2064        u32 stripesize;
2065        u64 generation;
2066        u64 features;
2067        struct btrfs_key location;
2068        struct buffer_head *bh;
2069        struct btrfs_super_block *disk_super;
2070        struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2071        struct btrfs_root *tree_root;
2072        struct btrfs_root *extent_root;
2073        struct btrfs_root *csum_root;
2074        struct btrfs_root *chunk_root;
2075        struct btrfs_root *dev_root;
2076        struct btrfs_root *quota_root;
2077        struct btrfs_root *log_tree_root;
2078        int ret;
2079        int err = -EINVAL;
2080        int num_backups_tried = 0;
2081        int backup_index = 0;
2082
2083        tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
2084        extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info);
2085        csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
2086        chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
2087        dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
2088        quota_root = fs_info->quota_root = btrfs_alloc_root(fs_info);
2089
2090        if (!tree_root || !extent_root || !csum_root ||
2091            !chunk_root || !dev_root || !quota_root) {
2092                err = -ENOMEM;
2093                goto fail;
2094        }
2095
2096        ret = init_srcu_struct(&fs_info->subvol_srcu);
2097        if (ret) {
2098                err = ret;
2099                goto fail;
2100        }
2101
2102        ret = setup_bdi(fs_info, &fs_info->bdi);
2103        if (ret) {
2104                err = ret;
2105                goto fail_srcu;
2106        }
2107
2108        ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
2109        if (ret) {
2110                err = ret;
2111                goto fail_bdi;
2112        }
2113        fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
2114                                        (1 + ilog2(nr_cpu_ids));
2115
2116        ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
2117        if (ret) {
2118                err = ret;
2119                goto fail_dirty_metadata_bytes;
2120        }
2121
2122        fs_info->btree_inode = new_inode(sb);
2123        if (!fs_info->btree_inode) {
2124                err = -ENOMEM;
2125                goto fail_delalloc_bytes;
2126        }
2127
2128        mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2129
2130        INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2131        INIT_LIST_HEAD(&fs_info->trans_list);
2132        INIT_LIST_HEAD(&fs_info->dead_roots);
2133        INIT_LIST_HEAD(&fs_info->delayed_iputs);
2134        INIT_LIST_HEAD(&fs_info->delalloc_inodes);
2135        INIT_LIST_HEAD(&fs_info->caching_block_groups);
2136        spin_lock_init(&fs_info->delalloc_lock);
2137        spin_lock_init(&fs_info->trans_lock);
2138        spin_lock_init(&fs_info->fs_roots_radix_lock);
2139        spin_lock_init(&fs_info->delayed_iput_lock);
2140        spin_lock_init(&fs_info->defrag_inodes_lock);
2141        spin_lock_init(&fs_info->free_chunk_lock);
2142        spin_lock_init(&fs_info->tree_mod_seq_lock);
2143        spin_lock_init(&fs_info->super_lock);
2144        rwlock_init(&fs_info->tree_mod_log_lock);
2145        mutex_init(&fs_info->reloc_mutex);
2146        seqlock_init(&fs_info->profiles_lock);
2147
2148        init_completion(&fs_info->kobj_unregister);
2149        INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2150        INIT_LIST_HEAD(&fs_info->space_info);
2151        INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2152        btrfs_mapping_init(&fs_info->mapping_tree);
2153        btrfs_init_block_rsv(&fs_info->global_block_rsv,
2154                             BTRFS_BLOCK_RSV_GLOBAL);
2155        btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2156                             BTRFS_BLOCK_RSV_DELALLOC);
2157        btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2158        btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2159        btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2160        btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2161                             BTRFS_BLOCK_RSV_DELOPS);
2162        atomic_set(&fs_info->nr_async_submits, 0);
2163        atomic_set(&fs_info->async_delalloc_pages, 0);
2164        atomic_set(&fs_info->async_submit_draining, 0);
2165        atomic_set(&fs_info->nr_async_bios, 0);
2166        atomic_set(&fs_info->defrag_running, 0);
2167        atomic64_set(&fs_info->tree_mod_seq, 0);
2168        fs_info->sb = sb;
2169        fs_info->max_inline = 8192 * 1024;
2170        fs_info->metadata_ratio = 0;
2171        fs_info->defrag_inodes = RB_ROOT;
2172        fs_info->trans_no_join = 0;
2173        fs_info->free_chunk_space = 0;
2174        fs_info->tree_mod_log = RB_ROOT;
2175
2176        /* readahead state */
2177        INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
2178        spin_lock_init(&fs_info->reada_lock);
2179
2180        fs_info->thread_pool_size = min_t(unsigned long,
2181                                          num_online_cpus() + 2, 8);
2182
2183        INIT_LIST_HEAD(&fs_info->ordered_extents);
2184        spin_lock_init(&fs_info->ordered_extent_lock);
2185        fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2186                                        GFP_NOFS);
2187        if (!fs_info->delayed_root) {
2188                err = -ENOMEM;
2189                goto fail_iput;
2190        }
2191        btrfs_init_delayed_root(fs_info->delayed_root);
2192
2193        mutex_init(&fs_info->scrub_lock);
2194        atomic_set(&fs_info->scrubs_running, 0);
2195        atomic_set(&fs_info->scrub_pause_req, 0);
2196        atomic_set(&fs_info->scrubs_paused, 0);
2197        atomic_set(&fs_info->scrub_cancel_req, 0);
2198        init_waitqueue_head(&fs_info->scrub_pause_wait);
2199        init_rwsem(&fs_info->scrub_super_lock);
2200        fs_info->scrub_workers_refcnt = 0;
2201#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2202        fs_info->check_integrity_print_mask = 0;
2203#endif
2204
2205        spin_lock_init(&fs_info->balance_lock);
2206        mutex_init(&fs_info->balance_mutex);
2207        atomic_set(&fs_info->balance_running, 0);
2208        atomic_set(&fs_info->balance_pause_req, 0);
2209        atomic_set(&fs_info->balance_cancel_req, 0);
2210        fs_info->balance_ctl = NULL;
2211        init_waitqueue_head(&fs_info->balance_wait_q);
2212
2213        sb->s_blocksize = 4096;
2214        sb->s_blocksize_bits = blksize_bits(4096);
2215        sb->s_bdi = &fs_info->bdi;
2216
2217        fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2218        set_nlink(fs_info->btree_inode, 1);
2219        /*
2220         * we set the i_size on the btree inode to the max possible int.
2221         * the real end of the address space is determined by all of
2222         * the devices in the system
2223         */
2224        fs_info->btree_inode->i_size = OFFSET_MAX;
2225        fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2226        fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
2227
2228        RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2229        extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2230                             fs_info->btree_inode->i_mapping);
2231        BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2232        extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2233
2234        BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2235
2236        BTRFS_I(fs_info->btree_inode)->root = tree_root;
2237        memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2238               sizeof(struct btrfs_key));
2239        set_bit(BTRFS_INODE_DUMMY,
2240                &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2241        insert_inode_hash(fs_info->btree_inode);
2242
2243        spin_lock_init(&fs_info->block_group_cache_lock);
2244        fs_info->block_group_cache_tree = RB_ROOT;
2245        fs_info->first_logical_byte = (u64)-1;
2246
2247        extent_io_tree_init(&fs_info->freed_extents[0],
2248                             fs_info->btree_inode->i_mapping);
2249        extent_io_tree_init(&fs_info->freed_extents[1],
2250                             fs_info->btree_inode->i_mapping);
2251        fs_info->pinned_extents = &fs_info->freed_extents[0];
2252        fs_info->do_barriers = 1;
2253
2254
2255        mutex_init(&fs_info->ordered_operations_mutex);
2256        mutex_init(&fs_info->tree_log_mutex);
2257        mutex_init(&fs_info->chunk_mutex);
2258        mutex_init(&fs_info->transaction_kthread_mutex);
2259        mutex_init(&fs_info->cleaner_mutex);
2260        mutex_init(&fs_info->volume_mutex);
2261        init_rwsem(&fs_info->extent_commit_sem);
2262        init_rwsem(&fs_info->cleanup_work_sem);
2263        init_rwsem(&fs_info->subvol_sem);
2264        fs_info->dev_replace.lock_owner = 0;
2265        atomic_set(&fs_info->dev_replace.nesting_level, 0);
2266        mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2267        mutex_init(&fs_info->dev_replace.lock_management_lock);
2268        mutex_init(&fs_info->dev_replace.lock);
2269
2270        spin_lock_init(&fs_info->qgroup_lock);
2271        mutex_init(&fs_info->qgroup_ioctl_lock);
2272        fs_info->qgroup_tree = RB_ROOT;
2273        INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2274        fs_info->qgroup_seq = 1;
2275        fs_info->quota_enabled = 0;
2276        fs_info->pending_quota_state = 0;
2277        mutex_init(&fs_info->qgroup_rescan_lock);
2278
2279        btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2280        btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2281
2282        init_waitqueue_head(&fs_info->transaction_throttle);
2283        init_waitqueue_head(&fs_info->transaction_wait);
2284        init_waitqueue_head(&fs_info->transaction_blocked_wait);
2285        init_waitqueue_head(&fs_info->async_submit_wait);
2286
2287        ret = btrfs_alloc_stripe_hash_table(fs_info);
2288        if (ret) {
2289                err = ret;
2290                goto fail_alloc;
2291        }
2292
2293        __setup_root(4096, 4096, 4096, 4096, tree_root,
2294                     fs_info, BTRFS_ROOT_TREE_OBJECTID);
2295
2296        invalidate_bdev(fs_devices->latest_bdev);
2297
2298        /*
2299         * Read super block and check the signature bytes only
2300         */
2301        bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2302        if (!bh) {
2303                err = -EINVAL;
2304                goto fail_alloc;
2305        }
2306
2307        /*
2308         * We want to check superblock checksum, the type is stored inside.
2309         * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2310         */
2311        if (btrfs_check_super_csum(bh->b_data)) {
2312                printk(KERN_ERR "btrfs: superblock checksum mismatch\n");
2313                err = -EINVAL;
2314                goto fail_alloc;
2315        }
2316
2317        /*
2318         * super_copy is zeroed at allocation time and we never touch the
2319         * following bytes up to INFO_SIZE, the checksum is calculated from
2320         * the whole block of INFO_SIZE
2321         */
2322        memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2323        memcpy(fs_info->super_for_commit, fs_info->super_copy,
2324               sizeof(*fs_info->super_for_commit));
2325        brelse(bh);
2326
2327        memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2328
2329        ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2330        if (ret) {
2331                printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
2332                err = -EINVAL;
2333                goto fail_alloc;
2334        }
2335
2336        disk_super = fs_info->super_copy;
2337        if (!btrfs_super_root(disk_super))
2338                goto fail_alloc;
2339
2340        /* check FS state, whether FS is broken. */
2341        if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2342                set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2343
2344        /*
2345         * run through our array of backup supers and setup
2346         * our ring pointer to the oldest one
2347         */
2348        generation = btrfs_super_generation(disk_super);
2349        find_oldest_super_backup(fs_info, generation);
2350
2351        /*
2352         * In the long term, we'll store the compression type in the super
2353         * block, and it'll be used for per file compression control.
2354         */
2355        fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2356
2357        ret = btrfs_parse_options(tree_root, options);
2358        if (ret) {
2359                err = ret;
2360                goto fail_alloc;
2361        }
2362
2363        features = btrfs_super_incompat_flags(disk_super) &
2364                ~BTRFS_FEATURE_INCOMPAT_SUPP;
2365        if (features) {
2366                printk(KERN_ERR "BTRFS: couldn't mount because of "
2367                       "unsupported optional features (%Lx).\n",
2368                       (unsigned long long)features);
2369                err = -EINVAL;
2370                goto fail_alloc;
2371        }
2372
2373        if (btrfs_super_leafsize(disk_super) !=
2374            btrfs_super_nodesize(disk_super)) {
2375                printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2376                       "blocksizes don't match.  node %d leaf %d\n",
2377                       btrfs_super_nodesize(disk_super),
2378                       btrfs_super_leafsize(disk_super));
2379                err = -EINVAL;
2380                goto fail_alloc;
2381        }
2382        if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2383                printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2384                       "blocksize (%d) was too large\n",
2385                       btrfs_super_leafsize(disk_super));
2386                err = -EINVAL;
2387                goto fail_alloc;
2388        }
2389
2390        features = btrfs_super_incompat_flags(disk_super);
2391        features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2392        if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2393                features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2394
2395        if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2396                printk(KERN_ERR "btrfs: has skinny extents\n");
2397
2398        /*
2399         * flag our filesystem as having big metadata blocks if
2400         * they are bigger than the page size
2401         */
2402        if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
2403                if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2404                        printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
2405                features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2406        }
2407
2408        nodesize = btrfs_super_nodesize(disk_super);
2409        leafsize = btrfs_super_leafsize(disk_super);
2410        sectorsize = btrfs_super_sectorsize(disk_super);
2411        stripesize = btrfs_super_stripesize(disk_super);
2412        fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
2413        fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2414
2415        /*
2416         * mixed block groups end up with duplicate but slightly offset
2417         * extent buffers for the same range.  It leads to corruptions
2418         */
2419        if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2420            (sectorsize != leafsize)) {
2421                printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
2422                                "are not allowed for mixed block groups on %s\n",
2423                                sb->s_id);
2424                goto fail_alloc;
2425        }
2426
2427        /*
2428         * Needn't use the lock because there is no other task which will
2429         * update the flag.
2430         */
2431        btrfs_set_super_incompat_flags(disk_super, features);
2432
2433        features = btrfs_super_compat_ro_flags(disk_super) &
2434                ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2435        if (!(sb->s_flags & MS_RDONLY) && features) {
2436                printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2437                       "unsupported option features (%Lx).\n",
2438                       (unsigned long long)features);
2439                err = -EINVAL;
2440                goto fail_alloc;
2441        }
2442
2443        btrfs_init_workers(&fs_info->generic_worker,
2444                           "genwork", 1, NULL);
2445
2446        btrfs_init_workers(&fs_info->workers, "worker",
2447                           fs_info->thread_pool_size,
2448                           &fs_info->generic_worker);
2449
2450        btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
2451                           fs_info->thread_pool_size,
2452                           &fs_info->generic_worker);
2453
2454        btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc",
2455                           fs_info->thread_pool_size,
2456                           &fs_info->generic_worker);
2457
2458        btrfs_init_workers(&fs_info->submit_workers, "submit",
2459                           min_t(u64, fs_devices->num_devices,
2460                           fs_info->thread_pool_size),
2461                           &fs_info->generic_worker);
2462
2463        btrfs_init_workers(&fs_info->caching_workers, "cache",
2464                           2, &fs_info->generic_worker);
2465
2466        /* a higher idle thresh on the submit workers makes it much more
2467         * likely that bios will be send down in a sane order to the
2468         * devices
2469         */
2470        fs_info->submit_workers.idle_thresh = 64;
2471
2472        fs_info->workers.idle_thresh = 16;
2473        fs_info->workers.ordered = 1;
2474
2475        fs_info->delalloc_workers.idle_thresh = 2;
2476        fs_info->delalloc_workers.ordered = 1;
2477
2478        btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
2479                           &fs_info->generic_worker);
2480        btrfs_init_workers(&fs_info->endio_workers, "endio",
2481                           fs_info->thread_pool_size,
2482                           &fs_info->generic_worker);
2483        btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
2484                           fs_info->thread_pool_size,
2485                           &fs_info->generic_worker);
2486        btrfs_init_workers(&fs_info->endio_meta_write_workers,
2487                           "endio-meta-write", fs_info->thread_pool_size,
2488                           &fs_info->generic_worker);
2489        btrfs_init_workers(&fs_info->endio_raid56_workers,
2490                           "endio-raid56", fs_info->thread_pool_size,
2491                           &fs_info->generic_worker);
2492        btrfs_init_workers(&fs_info->rmw_workers,
2493                           "rmw", fs_info->thread_pool_size,
2494                           &fs_info->generic_worker);
2495        btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
2496                           fs_info->thread_pool_size,
2497                           &fs_info->generic_worker);
2498        btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
2499                           1, &fs_info->generic_worker);
2500        btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
2501                           fs_info->thread_pool_size,
2502                           &fs_info->generic_worker);
2503        btrfs_init_workers(&fs_info->readahead_workers, "readahead",
2504                           fs_info->thread_pool_size,
2505                           &fs_info->generic_worker);
2506        btrfs_init_workers(&fs_info->qgroup_rescan_workers, "qgroup-rescan", 1,
2507                           &fs_info->generic_worker);
2508
2509        /*
2510         * endios are largely parallel and should have a very
2511         * low idle thresh
2512         */
2513        fs_info->endio_workers.idle_thresh = 4;
2514        fs_info->endio_meta_workers.idle_thresh = 4;
2515        fs_info->endio_raid56_workers.idle_thresh = 4;
2516        fs_info->rmw_workers.idle_thresh = 2;
2517
2518        fs_info->endio_write_workers.idle_thresh = 2;
2519        fs_info->endio_meta_write_workers.idle_thresh = 2;
2520        fs_info->readahead_workers.idle_thresh = 2;
2521
2522        /*
2523         * btrfs_start_workers can really only fail because of ENOMEM so just
2524         * return -ENOMEM if any of these fail.
2525         */
2526        ret = btrfs_start_workers(&fs_info->workers);
2527        ret |= btrfs_start_workers(&fs_info->generic_worker);
2528        ret |= btrfs_start_workers(&fs_info->submit_workers);
2529        ret |= btrfs_start_workers(&fs_info->delalloc_workers);
2530        ret |= btrfs_start_workers(&fs_info->fixup_workers);
2531        ret |= btrfs_start_workers(&fs_info->endio_workers);
2532        ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
2533        ret |= btrfs_start_workers(&fs_info->rmw_workers);
2534        ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
2535        ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
2536        ret |= btrfs_start_workers(&fs_info->endio_write_workers);
2537        ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
2538        ret |= btrfs_start_workers(&fs_info->delayed_workers);
2539        ret |= btrfs_start_workers(&fs_info->caching_workers);
2540        ret |= btrfs_start_workers(&fs_info->readahead_workers);
2541        ret |= btrfs_start_workers(&fs_info->flush_workers);
2542        ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers);
2543        if (ret) {
2544                err = -ENOMEM;
2545                goto fail_sb_buffer;
2546        }
2547
2548        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2549        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2550                                    4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2551
2552        tree_root->nodesize = nodesize;
2553        tree_root->leafsize = leafsize;
2554        tree_root->sectorsize = sectorsize;
2555        tree_root->stripesize = stripesize;
2556
2557        sb->s_blocksize = sectorsize;
2558        sb->s_blocksize_bits = blksize_bits(sectorsize);
2559
2560        if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2561                printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
2562                goto fail_sb_buffer;
2563        }
2564
2565        if (sectorsize != PAGE_SIZE) {
2566                printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
2567                       "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2568                goto fail_sb_buffer;
2569        }
2570
2571        mutex_lock(&fs_info->chunk_mutex);
2572        ret = btrfs_read_sys_array(tree_root);
2573        mutex_unlock(&fs_info->chunk_mutex);
2574        if (ret) {
2575                printk(KERN_WARNING "btrfs: failed to read the system "
2576                       "array on %s\n", sb->s_id);
2577                goto fail_sb_buffer;
2578        }
2579
2580        blocksize = btrfs_level_size(tree_root,
2581                                     btrfs_super_chunk_root_level(disk_super));
2582        generation = btrfs_super_chunk_root_generation(disk_super);
2583
2584        __setup_root(nodesize, leafsize, sectorsize, stripesize,
2585                     chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2586
2587        chunk_root->node = read_tree_block(chunk_root,
2588                                           btrfs_super_chunk_root(disk_super),
2589                                           blocksize, generation);
2590        if (!chunk_root->node ||
2591            !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2592                printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
2593                       sb->s_id);
2594                goto fail_tree_roots;
2595        }
2596        btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2597        chunk_root->commit_root = btrfs_root_node(chunk_root);
2598
2599        read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2600           (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
2601           BTRFS_UUID_SIZE);
2602
2603        ret = btrfs_read_chunk_tree(chunk_root);
2604        if (ret) {
2605                printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
2606                       sb->s_id);
2607                goto fail_tree_roots;
2608        }
2609
2610        /*
2611         * keep the device that is marked to be the target device for the
2612         * dev_replace procedure
2613         */
2614        btrfs_close_extra_devices(fs_info, fs_devices, 0);
2615
2616        if (!fs_devices->latest_bdev) {
2617                printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
2618                       sb->s_id);
2619                goto fail_tree_roots;
2620        }
2621
2622retry_root_backup:
2623        blocksize = btrfs_level_size(tree_root,
2624                                     btrfs_super_root_level(disk_super));
2625        generation = btrfs_super_generation(disk_super);
2626
2627        tree_root->node = read_tree_block(tree_root,
2628                                          btrfs_super_root(disk_super),
2629                                          blocksize, generation);
2630        if (!tree_root->node ||
2631            !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2632                printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
2633                       sb->s_id);
2634
2635                goto recovery_tree_root;
2636        }
2637
2638        btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2639        tree_root->commit_root = btrfs_root_node(tree_root);
2640
2641        ret = find_and_setup_root(tree_root, fs_info,
2642                                  BTRFS_EXTENT_TREE_OBJECTID, extent_root);
2643        if (ret)
2644                goto recovery_tree_root;
2645        extent_root->track_dirty = 1;
2646
2647        ret = find_and_setup_root(tree_root, fs_info,
2648                                  BTRFS_DEV_TREE_OBJECTID, dev_root);
2649        if (ret)
2650                goto recovery_tree_root;
2651        dev_root->track_dirty = 1;
2652
2653        ret = find_and_setup_root(tree_root, fs_info,
2654                                  BTRFS_CSUM_TREE_OBJECTID, csum_root);
2655        if (ret)
2656                goto recovery_tree_root;
2657        csum_root->track_dirty = 1;
2658
2659        ret = find_and_setup_root(tree_root, fs_info,
2660                                  BTRFS_QUOTA_TREE_OBJECTID, quota_root);
2661        if (ret) {
2662                kfree(quota_root);
2663                quota_root = fs_info->quota_root = NULL;
2664        } else {
2665                quota_root->track_dirty = 1;
2666                fs_info->quota_enabled = 1;
2667                fs_info->pending_quota_state = 1;
2668        }
2669
2670        fs_info->generation = generation;
2671        fs_info->last_trans_committed = generation;
2672
2673        ret = btrfs_recover_balance(fs_info);
2674        if (ret) {
2675                printk(KERN_WARNING "btrfs: failed to recover balance\n");
2676                goto fail_block_groups;
2677        }
2678
2679        ret = btrfs_init_dev_stats(fs_info);
2680        if (ret) {
2681                printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
2682                       ret);
2683                goto fail_block_groups;
2684        }
2685
2686        ret = btrfs_init_dev_replace(fs_info);
2687        if (ret) {
2688                pr_err("btrfs: failed to init dev_replace: %d\n", ret);
2689                goto fail_block_groups;
2690        }
2691
2692        btrfs_close_extra_devices(fs_info, fs_devices, 1);
2693
2694        ret = btrfs_init_space_info(fs_info);
2695        if (ret) {
2696                printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2697                goto fail_block_groups;
2698        }
2699
2700        ret = btrfs_read_block_groups(extent_root);
2701        if (ret) {
2702                printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2703                goto fail_block_groups;
2704        }
2705        fs_info->num_tolerated_disk_barrier_failures =
2706                btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2707        if (fs_info->fs_devices->missing_devices >
2708             fs_info->num_tolerated_disk_barrier_failures &&
2709            !(sb->s_flags & MS_RDONLY)) {
2710                printk(KERN_WARNING
2711                       "Btrfs: too many missing devices, writeable mount is not allowed\n");
2712                goto fail_block_groups;
2713        }
2714
2715        fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2716                                               "btrfs-cleaner");
2717        if (IS_ERR(fs_info->cleaner_kthread))
2718                goto fail_block_groups;
2719
2720        fs_info->transaction_kthread = kthread_run(transaction_kthread,
2721                                                   tree_root,
2722                                                   "btrfs-transaction");
2723        if (IS_ERR(fs_info->transaction_kthread))
2724                goto fail_cleaner;
2725
2726        if (!btrfs_test_opt(tree_root, SSD) &&
2727            !btrfs_test_opt(tree_root, NOSSD) &&
2728            !fs_info->fs_devices->rotating) {
2729                printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2730                       "mode\n");
2731                btrfs_set_opt(fs_info->mount_opt, SSD);
2732        }
2733
2734#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2735        if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2736                ret = btrfsic_mount(tree_root, fs_devices,
2737                                    btrfs_test_opt(tree_root,
2738                                        CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2739                                    1 : 0,
2740                                    fs_info->check_integrity_print_mask);
2741                if (ret)
2742                        printk(KERN_WARNING "btrfs: failed to initialize"
2743                               " integrity check module %s\n", sb->s_id);
2744        }
2745#endif
2746        ret = btrfs_read_qgroup_config(fs_info);
2747        if (ret)
2748                goto fail_trans_kthread;
2749
2750        /* do not make disk changes in broken FS */
2751        if (btrfs_super_log_root(disk_super) != 0) {
2752                u64 bytenr = btrfs_super_log_root(disk_super);
2753
2754                if (fs_devices->rw_devices == 0) {
2755                        printk(KERN_WARNING "Btrfs log replay required "
2756                               "on RO media\n");
2757                        err = -EIO;
2758                        goto fail_qgroup;
2759                }
2760                blocksize =
2761                     btrfs_level_size(tree_root,
2762                                      btrfs_super_log_root_level(disk_super));
2763
2764                log_tree_root = btrfs_alloc_root(fs_info);
2765                if (!log_tree_root) {
2766                        err = -ENOMEM;
2767                        goto fail_qgroup;
2768                }
2769
2770                __setup_root(nodesize, leafsize, sectorsize, stripesize,
2771                             log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2772
2773                log_tree_root->node = read_tree_block(tree_root, bytenr,
2774                                                      blocksize,
2775                                                      generation + 1);
2776                if (!log_tree_root->node ||
2777                    !extent_buffer_uptodate(log_tree_root->node)) {
2778                        printk(KERN_ERR "btrfs: failed to read log tree\n");
2779                        free_extent_buffer(log_tree_root->node);
2780                        kfree(log_tree_root);
2781                        goto fail_trans_kthread;
2782                }
2783                /* returns with log_tree_root freed on success */
2784                ret = btrfs_recover_log_trees(log_tree_root);
2785                if (ret) {
2786                        btrfs_error(tree_root->fs_info, ret,
2787                                    "Failed to recover log tree");
2788                        free_extent_buffer(log_tree_root->node);
2789                        kfree(log_tree_root);
2790                        goto fail_trans_kthread;
2791                }
2792
2793                if (sb->s_flags & MS_RDONLY) {
2794                        ret = btrfs_commit_super(tree_root);
2795                        if (ret)
2796                                goto fail_trans_kthread;
2797                }
2798        }
2799
2800        ret = btrfs_find_orphan_roots(tree_root);
2801        if (ret)
2802                goto fail_trans_kthread;
2803
2804        if (!(sb->s_flags & MS_RDONLY)) {
2805                ret = btrfs_cleanup_fs_roots(fs_info);
2806                if (ret)
2807                        goto fail_trans_kthread;
2808
2809                ret = btrfs_recover_relocation(tree_root);
2810                if (ret < 0) {
2811                        printk(KERN_WARNING
2812                               "btrfs: failed to recover relocation\n");
2813                        err = -EINVAL;
2814                        goto fail_qgroup;
2815                }
2816        }
2817
2818        location.objectid = BTRFS_FS_TREE_OBJECTID;
2819        location.type = BTRFS_ROOT_ITEM_KEY;
2820        location.offset = (u64)-1;
2821
2822        fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2823        if (!fs_info->fs_root)
2824                goto fail_qgroup;
2825        if (IS_ERR(fs_info->fs_root)) {
2826                err = PTR_ERR(fs_info->fs_root);
2827                goto fail_qgroup;
2828        }
2829
2830        if (sb->s_flags & MS_RDONLY)
2831                return 0;
2832
2833        down_read(&fs_info->cleanup_work_sem);
2834        if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
2835            (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
2836                up_read(&fs_info->cleanup_work_sem);
2837                close_ctree(tree_root);
2838                return ret;
2839        }
2840        up_read(&fs_info->cleanup_work_sem);
2841
2842        ret = btrfs_resume_balance_async(fs_info);
2843        if (ret) {
2844                printk(KERN_WARNING "btrfs: failed to resume balance\n");
2845                close_ctree(tree_root);
2846                return ret;
2847        }
2848
2849        ret = btrfs_resume_dev_replace_async(fs_info);
2850        if (ret) {
2851                pr_warn("btrfs: failed to resume dev_replace\n");
2852                close_ctree(tree_root);
2853                return ret;
2854        }
2855
2856        return 0;
2857
2858fail_qgroup:
2859        btrfs_free_qgroup_config(fs_info);
2860fail_trans_kthread:
2861        kthread_stop(fs_info->transaction_kthread);
2862        btrfs_cleanup_transaction(fs_info->tree_root);
2863        del_fs_roots(fs_info);
2864fail_cleaner:
2865        kthread_stop(fs_info->cleaner_kthread);
2866
2867        /*
2868         * make sure we're done with the btree inode before we stop our
2869         * kthreads
2870         */
2871        filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2872
2873fail_block_groups:
2874        btrfs_put_block_group_cache(fs_info);
2875        btrfs_free_block_groups(fs_info);
2876
2877fail_tree_roots:
2878        free_root_pointers(fs_info, 1);
2879        invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2880
2881fail_sb_buffer:
2882        btrfs_stop_all_workers(fs_info);
2883fail_alloc:
2884fail_iput:
2885        btrfs_mapping_tree_free(&fs_info->mapping_tree);
2886
2887        iput(fs_info->btree_inode);
2888fail_delalloc_bytes:
2889        percpu_counter_destroy(&fs_info->delalloc_bytes);
2890fail_dirty_metadata_bytes:
2891        percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
2892fail_bdi:
2893        bdi_destroy(&fs_info->bdi);
2894fail_srcu:
2895        cleanup_srcu_struct(&fs_info->subvol_srcu);
2896fail:
2897        btrfs_free_stripe_hash_table(fs_info);
2898        btrfs_close_devices(fs_info->fs_devices);
2899        return err;
2900
2901recovery_tree_root:
2902        if (!btrfs_test_opt(tree_root, RECOVERY))
2903                goto fail_tree_roots;
2904
2905        free_root_pointers(fs_info, 0);
2906
2907        /* don't use the log in recovery mode, it won't be valid */
2908        btrfs_set_super_log_root(disk_super, 0);
2909
2910        /* we can't trust the free space cache either */
2911        btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2912
2913        ret = next_root_backup(fs_info, fs_info->super_copy,
2914                               &num_backups_tried, &backup_index);
2915        if (ret == -1)
2916                goto fail_block_groups;
2917        goto retry_root_backup;
2918}
2919
2920static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2921{
2922        if (uptodate) {
2923                set_buffer_uptodate(bh);
2924        } else {
2925                struct btrfs_device *device = (struct btrfs_device *)
2926                        bh->b_private;
2927
2928                printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
2929                                          "I/O error on %s\n",
2930                                          rcu_str_deref(device->name));
2931                /* note, we dont' set_buffer_write_io_error because we have
2932                 * our own ways of dealing with the IO errors
2933                 */
2934                clear_buffer_uptodate(bh);
2935                btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
2936        }
2937        unlock_buffer(bh);
2938        put_bh(bh);
2939}
2940
2941struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2942{
2943        struct buffer_head *bh;
2944        struct buffer_head *latest = NULL;
2945        struct btrfs_super_block *super;
2946        int i;
2947        u64 transid = 0;
2948        u64 bytenr;
2949
2950        /* we would like to check all the supers, but that would make
2951         * a btrfs mount succeed after a mkfs from a different FS.
2952         * So, we need to add a special mount option to scan for
2953         * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2954         */
2955        for (i = 0; i < 1; i++) {
2956                bytenr = btrfs_sb_offset(i);
2957                if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2958                        break;
2959                bh = __bread(bdev, bytenr / 4096, 4096);
2960                if (!bh)
2961                        continue;
2962
2963                super = (struct btrfs_super_block *)bh->b_data;
2964                if (btrfs_super_bytenr(super) != bytenr ||
2965                    super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2966                        brelse(bh);
2967                        continue;
2968                }
2969
2970                if (!latest || btrfs_super_generation(super) > transid) {
2971                        brelse(latest);
2972                        latest = bh;
2973                        transid = btrfs_super_generation(super);
2974                } else {
2975                        brelse(bh);
2976                }
2977        }
2978        return latest;
2979}
2980
2981/*
2982 * this should be called twice, once with wait == 0 and
2983 * once with wait == 1.  When wait == 0 is done, all the buffer heads
2984 * we write are pinned.
2985 *
2986 * They are released when wait == 1 is done.
2987 * max_mirrors must be the same for both runs, and it indicates how
2988 * many supers on this one device should be written.
2989 *
2990 * max_mirrors == 0 means to write them all.
2991 */
2992static int write_dev_supers(struct btrfs_device *device,
2993                            struct btrfs_super_block *sb,
2994                            int do_barriers, int wait, int max_mirrors)
2995{
2996        struct buffer_head *bh;
2997        int i;
2998        int ret;
2999        int errors = 0;
3000        u32 crc;
3001        u64 bytenr;
3002
3003        if (max_mirrors == 0)
3004                max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3005
3006        for (i = 0; i < max_mirrors; i++) {
3007                bytenr = btrfs_sb_offset(i);
3008                if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
3009                        break;
3010
3011                if (wait) {
3012                        bh = __find_get_block(device->bdev, bytenr / 4096,
3013                                              BTRFS_SUPER_INFO_SIZE);
3014                        if (!bh) {
3015                                errors++;
3016                                continue;
3017                        }
3018                        wait_on_buffer(bh);
3019                        if (!buffer_uptodate(bh))
3020                                errors++;
3021
3022                        /* drop our reference */
3023                        brelse(bh);
3024
3025                        /* drop the reference from the wait == 0 run */
3026                        brelse(bh);
3027                        continue;
3028                } else {
3029                        btrfs_set_super_bytenr(sb, bytenr);
3030
3031                        crc = ~(u32)0;
3032                        crc = btrfs_csum_data((char *)sb +
3033                                              BTRFS_CSUM_SIZE, crc,
3034                                              BTRFS_SUPER_INFO_SIZE -
3035                                              BTRFS_CSUM_SIZE);
3036                        btrfs_csum_final(crc, sb->csum);
3037
3038                        /*
3039                         * one reference for us, and we leave it for the
3040                         * caller
3041                         */
3042                        bh = __getblk(device->bdev, bytenr / 4096,
3043                                      BTRFS_SUPER_INFO_SIZE);
3044                        if (!bh) {
3045                                printk(KERN_ERR "btrfs: couldn't get super "
3046                                       "buffer head for bytenr %Lu\n", bytenr);
3047                                errors++;
3048                                continue;
3049                        }
3050
3051                        memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3052
3053                        /* one reference for submit_bh */
3054                        get_bh(bh);
3055
3056                        set_buffer_uptodate(bh);
3057                        lock_buffer(bh);
3058                        bh->b_end_io = btrfs_end_buffer_write_sync;
3059                        bh->b_private = device;
3060                }
3061
3062                /*
3063                 * we fua the first super.  The others we allow
3064                 * to go down lazy.
3065                 */
3066                ret = btrfsic_submit_bh(WRITE_FUA, bh);
3067                if (ret)
3068                        errors++;
3069        }
3070        return errors < i ? 0 : -1;
3071}
3072
3073/*
3074 * endio for the write_dev_flush, this will wake anyone waiting
3075 * for the barrier when it is done
3076 */
3077static void btrfs_end_empty_barrier(struct bio *bio, int err)
3078{
3079        if (err) {
3080                if (err == -EOPNOTSUPP)
3081                        set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
3082                clear_bit(BIO_UPTODATE, &bio->bi_flags);
3083        }
3084        if (bio->bi_private)
3085                complete(bio->bi_private);
3086        bio_put(bio);
3087}
3088
3089/*
3090 * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
3091 * sent down.  With wait == 1, it waits for the previous flush.
3092 *
3093 * any device where the flush fails with eopnotsupp are flagged as not-barrier
3094 * capable
3095 */
3096static int write_dev_flush(struct btrfs_device *device, int wait)
3097{
3098        struct bio *bio;
3099        int ret = 0;
3100
3101        if (device->nobarriers)
3102                return 0;
3103
3104        if (wait) {
3105                bio = device->flush_bio;
3106                if (!bio)
3107                        return 0;
3108
3109                wait_for_completion(&device->flush_wait);
3110
3111                if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
3112                        printk_in_rcu("btrfs: disabling barriers on dev %s\n",
3113                                      rcu_str_deref(device->name));
3114                        device->nobarriers = 1;
3115                } else if (!bio_flagged(bio, BIO_UPTODATE)) {
3116                        ret = -EIO;
3117                        btrfs_dev_stat_inc_and_print(device,
3118                                BTRFS_DEV_STAT_FLUSH_ERRS);
3119                }
3120
3121                /* drop the reference from the wait == 0 run */
3122                bio_put(bio);
3123                device->flush_bio = NULL;
3124
3125                return ret;
3126        }
3127
3128        /*
3129         * one reference for us, and we leave it for the
3130         * caller
3131         */
3132        device->flush_bio = NULL;
3133        bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
3134        if (!bio)
3135                return -ENOMEM;
3136
3137        bio->bi_end_io = btrfs_end_empty_barrier;
3138        bio->bi_bdev = device->bdev;
3139        init_completion(&device->flush_wait);
3140        bio->bi_private = &device->flush_wait;
3141        device->flush_bio = bio;
3142
3143        bio_get(bio);
3144        btrfsic_submit_bio(WRITE_FLUSH, bio);
3145
3146        return 0;
3147}
3148
3149/*
3150 * send an empty flush down to each device in parallel,
3151 * then wait for them
3152 */
3153static int barrier_all_devices(struct btrfs_fs_info *info)
3154{
3155        struct list_head *head;
3156        struct btrfs_device *dev;
3157        int errors_send = 0;
3158        int errors_wait = 0;
3159        int ret;
3160
3161        /* send down all the barriers */
3162        head = &info->fs_devices->devices;
3163        list_for_each_entry_rcu(dev, head, dev_list) {
3164                if (!dev->bdev) {
3165                        errors_send++;
3166                        continue;
3167                }
3168                if (!dev->in_fs_metadata || !dev->writeable)
3169                        continue;
3170
3171                ret = write_dev_flush(dev, 0);
3172                if (ret)
3173                        errors_send++;
3174        }
3175
3176        /* wait for all the barriers */
3177        list_for_each_entry_rcu(dev, head, dev_list) {
3178                if (!dev->bdev) {
3179                        errors_wait++;
3180                        continue;
3181                }
3182                if (!dev->in_fs_metadata || !dev->writeable)
3183                        continue;
3184
3185                ret = write_dev_flush(dev, 1);
3186                if (ret)
3187                        errors_wait++;
3188        }
3189        if (errors_send > info->num_tolerated_disk_barrier_failures ||
3190            errors_wait > info->num_tolerated_disk_barrier_failures)
3191                return -EIO;
3192        return 0;
3193}
3194
3195int btrfs_calc_num_tolerated_disk_barrier_failures(
3196        struct btrfs_fs_info *fs_info)
3197{
3198        struct btrfs_ioctl_space_info space;
3199        struct btrfs_space_info *sinfo;
3200        u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3201                       BTRFS_BLOCK_GROUP_SYSTEM,
3202                       BTRFS_BLOCK_GROUP_METADATA,
3203                       BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3204        int num_types = 4;
3205        int i;
3206        int c;
3207        int num_tolerated_disk_barrier_failures =
3208                (int)fs_info->fs_devices->num_devices;
3209
3210        for (i = 0; i < num_types; i++) {
3211                struct btrfs_space_info *tmp;
3212
3213                sinfo = NULL;
3214                rcu_read_lock();
3215                list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3216                        if (tmp->flags == types[i]) {
3217                                sinfo = tmp;
3218                                break;
3219                        }
3220                }
3221                rcu_read_unlock();
3222
3223                if (!sinfo)
3224                        continue;
3225
3226                down_read(&sinfo->groups_sem);
3227                for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3228                        if (!list_empty(&sinfo->block_groups[c])) {
3229                                u64 flags;
3230
3231                                btrfs_get_block_group_info(
3232                                        &sinfo->block_groups[c], &space);
3233                                if (space.total_bytes == 0 ||
3234                                    space.used_bytes == 0)
3235                                        continue;
3236                                flags = space.flags;
3237                                /*
3238                                 * return
3239                                 * 0: if dup, single or RAID0 is configured for
3240                                 *    any of metadata, system or data, else
3241                                 * 1: if RAID5 is configured, or if RAID1 or
3242                                 *    RAID10 is configured and only two mirrors
3243                                 *    are used, else
3244                                 * 2: if RAID6 is configured, else
3245                                 * num_mirrors - 1: if RAID1 or RAID10 is
3246                                 *                  configured and more than
3247                                 *                  2 mirrors are used.
3248                                 */
3249                                if (num_tolerated_disk_barrier_failures > 0 &&
3250                                    ((flags & (BTRFS_BLOCK_GROUP_DUP |
3251                                               BTRFS_BLOCK_GROUP_RAID0)) ||
3252                                     ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
3253                                      == 0)))
3254                                        num_tolerated_disk_barrier_failures = 0;
3255                                else if (num_tolerated_disk_barrier_failures > 1) {
3256                                        if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3257                                            BTRFS_BLOCK_GROUP_RAID5 |
3258                                            BTRFS_BLOCK_GROUP_RAID10)) {
3259                                                num_tolerated_disk_barrier_failures = 1;
3260                                        } else if (flags &
3261                                                   BTRFS_BLOCK_GROUP_RAID5) {
3262                                                num_tolerated_disk_barrier_failures = 2;
3263                                        }
3264                                }
3265                        }
3266                }
3267                up_read(&sinfo->groups_sem);
3268        }
3269
3270        return num_tolerated_disk_barrier_failures;
3271}
3272
3273static int write_all_supers(struct btrfs_root *root, int max_mirrors)
3274{
3275        struct list_head *head;
3276        struct btrfs_device *dev;
3277        struct btrfs_super_block *sb;
3278        struct btrfs_dev_item *dev_item;
3279        int ret;
3280        int do_barriers;
3281        int max_errors;
3282        int total_errors = 0;
3283        u64 flags;
3284
3285        max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
3286        do_barriers = !btrfs_test_opt(root, NOBARRIER);
3287        backup_super_roots(root->fs_info);
3288
3289        sb = root->fs_info->super_for_commit;
3290        dev_item = &sb->dev_item;
3291
3292        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3293        head = &root->fs_info->fs_devices->devices;
3294
3295        if (do_barriers) {
3296                ret = barrier_all_devices(root->fs_info);
3297                if (ret) {
3298                        mutex_unlock(
3299                                &root->fs_info->fs_devices->device_list_mutex);
3300                        btrfs_error(root->fs_info, ret,
3301                                    "errors while submitting device barriers.");
3302                        return ret;
3303                }
3304        }
3305
3306        list_for_each_entry_rcu(dev, head, dev_list) {
3307                if (!dev->bdev) {
3308                        total_errors++;
3309                        continue;
3310                }
3311                if (!dev->in_fs_metadata || !dev->writeable)
3312                        continue;
3313
3314                btrfs_set_stack_device_generation(dev_item, 0);
3315                btrfs_set_stack_device_type(dev_item, dev->type);
3316                btrfs_set_stack_device_id(dev_item, dev->devid);
3317                btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
3318                btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
3319                btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3320                btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3321                btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3322                memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3323                memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3324
3325                flags = btrfs_super_flags(sb);
3326                btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3327
3328                ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
3329                if (ret)
3330                        total_errors++;
3331        }
3332        if (total_errors > max_errors) {
3333                printk(KERN_ERR "btrfs: %d errors while writing supers\n",
3334                       total_errors);
3335
3336                /* This shouldn't happen. FUA is masked off if unsupported */
3337                BUG();
3338        }
3339
3340        total_errors = 0;
3341        list_for_each_entry_rcu(dev, head, dev_list) {
3342                if (!dev->bdev)
3343                        continue;
3344                if (!dev->in_fs_metadata || !dev->writeable)
3345                        continue;
3346
3347                ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
3348                if (ret)
3349                        total_errors++;
3350        }
3351        mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3352        if (total_errors > max_errors) {
3353                btrfs_error(root->fs_info, -EIO,
3354                            "%d errors while writing supers", total_errors);
3355                return -EIO;
3356        }
3357        return 0;
3358}
3359
3360int write_ctree_super(struct btrfs_trans_handle *trans,
3361                      struct btrfs_root *root, int max_mirrors)
3362{
3363        int ret;
3364
3365        ret = write_all_supers(root, max_mirrors);
3366        return ret;
3367}
3368
3369void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
3370{
3371        spin_lock(&fs_info->fs_roots_radix_lock);
3372        radix_tree_delete(&fs_info->fs_roots_radix,
3373                          (unsigned long)root->root_key.objectid);
3374        spin_unlock(&fs_info->fs_roots_radix_lock);
3375
3376        if (btrfs_root_refs(&root->root_item) == 0)
3377                synchronize_srcu(&fs_info->subvol_srcu);
3378
3379        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3380                btrfs_free_log(NULL, root);
3381                btrfs_free_log_root_tree(NULL, fs_info);
3382        }
3383
3384        __btrfs_remove_free_space_cache(root->free_ino_pinned);
3385        __btrfs_remove_free_space_cache(root->free_ino_ctl);
3386        free_fs_root(root);
3387}
3388
3389static void free_fs_root(struct btrfs_root *root)
3390{
3391        iput(root->cache_inode);
3392        WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3393        if (root->anon_dev)
3394                free_anon_bdev(root->anon_dev);
3395        free_extent_buffer(root->node);
3396        free_extent_buffer(root->commit_root);
3397        kfree(root->free_ino_ctl);
3398        kfree(root->free_ino_pinned);
3399        kfree(root->name);
3400        kfree(root);
3401}
3402
3403int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3404{
3405        u64 root_objectid = 0;
3406        struct btrfs_root *gang[8];
3407        int i;
3408        int ret;
3409
3410        while (1) {
3411                ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3412                                             (void **)gang, root_objectid,
3413                                             ARRAY_SIZE(gang));
3414                if (!ret)
3415                        break;
3416
3417                root_objectid = gang[ret - 1]->root_key.objectid + 1;
3418                for (i = 0; i < ret; i++) {
3419                        int err;
3420
3421                        root_objectid = gang[i]->root_key.objectid;
3422                        err = btrfs_orphan_cleanup(gang[i]);
3423                        if (err)
3424                                return err;
3425                }
3426                root_objectid++;
3427        }
3428        return 0;
3429}
3430
3431int btrfs_commit_super(struct btrfs_root *root)
3432{
3433        struct btrfs_trans_handle *trans;
3434        int ret;
3435
3436        mutex_lock(&root->fs_info->cleaner_mutex);
3437        btrfs_run_delayed_iputs(root);
3438        mutex_unlock(&root->fs_info->cleaner_mutex);
3439        wake_up_process(root->fs_info->cleaner_kthread);
3440
3441        /* wait until ongoing cleanup work done */
3442        down_write(&root->fs_info->cleanup_work_sem);
3443        up_write(&root->fs_info->cleanup_work_sem);
3444
3445        trans = btrfs_join_transaction(root);
3446        if (IS_ERR(trans))
3447                return PTR_ERR(trans);
3448        ret = btrfs_commit_transaction(trans, root);
3449        if (ret)
3450                return ret;
3451        /* run commit again to drop the original snapshot */
3452        trans = btrfs_join_transaction(root);
3453        if (IS_ERR(trans))
3454                return PTR_ERR(trans);
3455        ret = btrfs_commit_transaction(trans, root);
3456        if (ret)
3457                return ret;
3458        ret = btrfs_write_and_wait_transaction(NULL, root);
3459        if (ret) {
3460                btrfs_error(root->fs_info, ret,
3461                            "Failed to sync btree inode to disk.");
3462                return ret;
3463        }
3464
3465        ret = write_ctree_super(NULL, root, 0);
3466        return ret;
3467}
3468
3469int close_ctree(struct btrfs_root *root)
3470{
3471        struct btrfs_fs_info *fs_info = root->fs_info;
3472        int ret;
3473
3474        fs_info->closing = 1;
3475        smp_mb();
3476
3477        /* pause restriper - we want to resume on mount */
3478        btrfs_pause_balance(fs_info);
3479
3480        btrfs_dev_replace_suspend_for_unmount(fs_info);
3481
3482        btrfs_scrub_cancel(fs_info);
3483
3484        /* wait for any defraggers to finish */
3485        wait_event(fs_info->transaction_wait,
3486                   (atomic_read(&fs_info->defrag_running) == 0));
3487
3488        /* clear out the rbtree of defraggable inodes */
3489        btrfs_cleanup_defrag_inodes(fs_info);
3490
3491        if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3492                ret = btrfs_commit_super(root);
3493                if (ret)
3494                        printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3495        }
3496
3497        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3498                btrfs_error_commit_super(root);
3499
3500        btrfs_put_block_group_cache(fs_info);
3501
3502        kthread_stop(fs_info->transaction_kthread);
3503        kthread_stop(fs_info->cleaner_kthread);
3504
3505        fs_info->closing = 2;
3506        smp_mb();
3507
3508        btrfs_free_qgroup_config(root->fs_info);
3509
3510        if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3511                printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
3512                       percpu_counter_sum(&fs_info->delalloc_bytes));
3513        }
3514
3515        btrfs_free_block_groups(fs_info);
3516
3517        btrfs_stop_all_workers(fs_info);
3518
3519        del_fs_roots(fs_info);
3520
3521        free_root_pointers(fs_info, 1);
3522
3523        iput(fs_info->btree_inode);
3524
3525#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3526        if (btrfs_test_opt(root, CHECK_INTEGRITY))
3527                btrfsic_unmount(root, fs_info->fs_devices);
3528#endif
3529
3530        btrfs_close_devices(fs_info->fs_devices);
3531        btrfs_mapping_tree_free(&fs_info->mapping_tree);
3532
3533        percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3534        percpu_counter_destroy(&fs_info->delalloc_bytes);
3535        bdi_destroy(&fs_info->bdi);
3536        cleanup_srcu_struct(&fs_info->subvol_srcu);
3537
3538        btrfs_free_stripe_hash_table(fs_info);
3539
3540        return 0;
3541}
3542
3543int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3544                          int atomic)
3545{
3546        int ret;
3547        struct inode *btree_inode = buf->pages[0]->mapping->host;
3548
3549        ret = extent_buffer_uptodate(buf);
3550        if (!ret)
3551                return ret;
3552
3553        ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3554                                    parent_transid, atomic);
3555        if (ret == -EAGAIN)
3556                return ret;
3557        return !ret;
3558}
3559
3560int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3561{
3562        return set_extent_buffer_uptodate(buf);
3563}
3564
3565void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3566{
3567        struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3568        u64 transid = btrfs_header_generation(buf);
3569        int was_dirty;
3570
3571        btrfs_assert_tree_locked(buf);
3572        if (transid != root->fs_info->generation)
3573                WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
3574                       "found %llu running %llu\n",
3575                        (unsigned long long)buf->start,
3576                        (unsigned long long)transid,
3577                        (unsigned long long)root->fs_info->generation);
3578        was_dirty = set_extent_buffer_dirty(buf);
3579        if (!was_dirty)
3580                __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
3581                                     buf->len,
3582                                     root->fs_info->dirty_metadata_batch);
3583}
3584
3585static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
3586                                        int flush_delayed)
3587{
3588        /*
3589         * looks as though older kernels can get into trouble with
3590         * this code, they end up stuck in balance_dirty_pages forever
3591         */
3592        int ret;
3593
3594        if (current->flags & PF_MEMALLOC)
3595                return;
3596
3597        if (flush_delayed)
3598                btrfs_balance_delayed_items(root);
3599
3600        ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
3601                                     BTRFS_DIRTY_METADATA_THRESH);
3602        if (ret > 0) {
3603                balance_dirty_pages_ratelimited(
3604                                   root->fs_info->btree_inode->i_mapping);
3605        }
3606        return;
3607}
3608
3609void btrfs_btree_balance_dirty(struct btrfs_root *root)
3610{
3611        __btrfs_btree_balance_dirty(root, 1);
3612}
3613
3614void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
3615{
3616        __btrfs_btree_balance_dirty(root, 0);
3617}
3618
3619int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3620{
3621        struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3622        return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3623}
3624
3625static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3626                              int read_only)
3627{
3628        /*
3629         * Placeholder for checks
3630         */
3631        return 0;
3632}
3633
3634static void btrfs_error_commit_super(struct btrfs_root *root)
3635{
3636        mutex_lock(&root->fs_info->cleaner_mutex);
3637        btrfs_run_delayed_iputs(root);
3638        mutex_unlock(&root->fs_info->cleaner_mutex);
3639
3640        down_write(&root->fs_info->cleanup_work_sem);
3641        up_write(&root->fs_info->cleanup_work_sem);
3642
3643        /* cleanup FS via transaction */
3644        btrfs_cleanup_transaction(root);
3645}
3646
3647static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
3648                                             struct btrfs_root *root)
3649{
3650        struct btrfs_inode *btrfs_inode;
3651        struct list_head splice;
3652
3653        INIT_LIST_HEAD(&splice);
3654
3655        mutex_lock(&root->fs_info->ordered_operations_mutex);
3656        spin_lock(&root->fs_info->ordered_extent_lock);
3657
3658        list_splice_init(&t->ordered_operations, &splice);
3659        while (!list_empty(&splice)) {
3660                btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3661                                         ordered_operations);
3662
3663                list_del_init(&btrfs_inode->ordered_operations);
3664                spin_unlock(&root->fs_info->ordered_extent_lock);
3665
3666                btrfs_invalidate_inodes(btrfs_inode->root);
3667
3668                spin_lock(&root->fs_info->ordered_extent_lock);
3669        }
3670
3671        spin_unlock(&root->fs_info->ordered_extent_lock);
3672        mutex_unlock(&root->fs_info->ordered_operations_mutex);
3673}
3674
3675static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3676{
3677        struct btrfs_ordered_extent *ordered;
3678
3679        spin_lock(&root->fs_info->ordered_extent_lock);
3680        /*
3681         * This will just short circuit the ordered completion stuff which will
3682         * make sure the ordered extent gets properly cleaned up.
3683         */
3684        list_for_each_entry(ordered, &root->fs_info->ordered_extents,
3685                            root_extent_list)
3686                set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
3687        spin_unlock(&root->fs_info->ordered_extent_lock);
3688}
3689
3690int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3691                               struct btrfs_root *root)
3692{
3693        struct rb_node *node;
3694        struct btrfs_delayed_ref_root *delayed_refs;
3695        struct btrfs_delayed_ref_node *ref;
3696        int ret = 0;
3697
3698        delayed_refs = &trans->delayed_refs;
3699
3700        spin_lock(&delayed_refs->lock);
3701        if (delayed_refs->num_entries == 0) {
3702                spin_unlock(&delayed_refs->lock);
3703                printk(KERN_INFO "delayed_refs has NO entry\n");
3704                return ret;
3705        }
3706
3707        while ((node = rb_first(&delayed_refs->root)) != NULL) {
3708                struct btrfs_delayed_ref_head *head = NULL;
3709
3710                ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3711                atomic_set(&ref->refs, 1);
3712                if (btrfs_delayed_ref_is_head(ref)) {
3713
3714                        head = btrfs_delayed_node_to_head(ref);
3715                        if (!mutex_trylock(&head->mutex)) {
3716                                atomic_inc(&ref->refs);
3717                                spin_unlock(&delayed_refs->lock);
3718
3719                                /* Need to wait for the delayed ref to run */
3720                                mutex_lock(&head->mutex);
3721                                mutex_unlock(&head->mutex);
3722                                btrfs_put_delayed_ref(ref);
3723
3724                                spin_lock(&delayed_refs->lock);
3725                                continue;
3726                        }
3727
3728                        if (head->must_insert_reserved)
3729                                btrfs_pin_extent(root, ref->bytenr,
3730                                                 ref->num_bytes, 1);
3731                        btrfs_free_delayed_extent_op(head->extent_op);
3732                        delayed_refs->num_heads--;
3733                        if (list_empty(&head->cluster))
3734                                delayed_refs->num_heads_ready--;
3735                        list_del_init(&head->cluster);
3736                }
3737
3738                ref->in_tree = 0;
3739                rb_erase(&ref->rb_node, &delayed_refs->root);
3740                delayed_refs->num_entries--;
3741                if (head)
3742                        mutex_unlock(&head->mutex);
3743                spin_unlock(&delayed_refs->lock);
3744                btrfs_put_delayed_ref(ref);
3745
3746                cond_resched();
3747                spin_lock(&delayed_refs->lock);
3748        }
3749
3750        spin_unlock(&delayed_refs->lock);
3751
3752        return ret;
3753}
3754
3755static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t)
3756{
3757        struct btrfs_pending_snapshot *snapshot;
3758        struct list_head splice;
3759
3760        INIT_LIST_HEAD(&splice);
3761
3762        list_splice_init(&t->pending_snapshots, &splice);
3763
3764        while (!list_empty(&splice)) {
3765                snapshot = list_entry(splice.next,
3766                                      struct btrfs_pending_snapshot,
3767                                      list);
3768                snapshot->error = -ECANCELED;
3769                list_del_init(&snapshot->list);
3770        }
3771}
3772
3773static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3774{
3775        struct btrfs_inode *btrfs_inode;
3776        struct list_head splice;
3777
3778        INIT_LIST_HEAD(&splice);
3779
3780        spin_lock(&root->fs_info->delalloc_lock);
3781        list_splice_init(&root->fs_info->delalloc_inodes, &splice);
3782
3783        while (!list_empty(&splice)) {
3784                btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3785                                    delalloc_inodes);
3786
3787                list_del_init(&btrfs_inode->delalloc_inodes);
3788                clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
3789                          &btrfs_inode->runtime_flags);
3790                spin_unlock(&root->fs_info->delalloc_lock);
3791
3792                btrfs_invalidate_inodes(btrfs_inode->root);
3793
3794                spin_lock(&root->fs_info->delalloc_lock);
3795        }
3796
3797        spin_unlock(&root->fs_info->delalloc_lock);
3798}
3799
3800static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3801                                        struct extent_io_tree *dirty_pages,
3802                                        int mark)
3803{
3804        int ret;
3805        struct extent_buffer *eb;
3806        u64 start = 0;
3807        u64 end;
3808
3809        while (1) {
3810                ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3811                                            mark, NULL);
3812                if (ret)
3813                        break;
3814
3815                clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
3816                while (start <= end) {
3817                        eb = btrfs_find_tree_block(root, start,
3818                                                   root->leafsize);
3819                        start += root->leafsize;
3820                        if (!eb)
3821                                continue;
3822                        wait_on_extent_buffer_writeback(eb);
3823
3824                        if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3825                                               &eb->bflags))
3826                                clear_extent_buffer_dirty(eb);
3827                        free_extent_buffer_stale(eb);
3828                }
3829        }
3830
3831        return ret;
3832}
3833
3834static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3835                                       struct extent_io_tree *pinned_extents)
3836{
3837        struct extent_io_tree *unpin;
3838        u64 start;
3839        u64 end;
3840        int ret;
3841        bool loop = true;
3842
3843        unpin = pinned_extents;
3844again:
3845        while (1) {
3846                ret = find_first_extent_bit(unpin, 0, &start, &end,
3847                                            EXTENT_DIRTY, NULL);
3848                if (ret)
3849                        break;
3850
3851                /* opt_discard */
3852                if (btrfs_test_opt(root, DISCARD))
3853                        ret = btrfs_error_discard_extent(root, start,
3854                                                         end + 1 - start,
3855                                                         NULL);
3856
3857                clear_extent_dirty(unpin, start, end, GFP_NOFS);
3858                btrfs_error_unpin_extent_range(root, start, end);
3859                cond_resched();
3860        }
3861
3862        if (loop) {
3863                if (unpin == &root->fs_info->freed_extents[0])
3864                        unpin = &root->fs_info->freed_extents[1];
3865                else
3866                        unpin = &root->fs_info->freed_extents[0];
3867                loop = false;
3868                goto again;
3869        }
3870
3871        return 0;
3872}
3873
3874void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3875                                   struct btrfs_root *root)
3876{
3877        btrfs_destroy_delayed_refs(cur_trans, root);
3878        btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
3879                                cur_trans->dirty_pages.dirty_bytes);
3880
3881        /* FIXME: cleanup wait for commit */
3882        cur_trans->in_commit = 1;
3883        cur_trans->blocked = 1;
3884        wake_up(&root->fs_info->transaction_blocked_wait);
3885
3886        btrfs_evict_pending_snapshots(cur_trans);
3887
3888        cur_trans->blocked = 0;
3889        wake_up(&root->fs_info->transaction_wait);
3890
3891        cur_trans->commit_done = 1;
3892        wake_up(&cur_trans->commit_wait);
3893
3894        btrfs_destroy_delayed_inodes(root);
3895        btrfs_assert_delayed_root_empty(root);
3896
3897        btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
3898                                     EXTENT_DIRTY);
3899        btrfs_destroy_pinned_extent(root,
3900                                    root->fs_info->pinned_extents);
3901
3902        /*
3903        memset(cur_trans, 0, sizeof(*cur_trans));
3904        kmem_cache_free(btrfs_transaction_cachep, cur_trans);
3905        */
3906}
3907
3908static int btrfs_cleanup_transaction(struct btrfs_root *root)
3909{
3910        struct btrfs_transaction *t;
3911        LIST_HEAD(list);
3912
3913        mutex_lock(&root->fs_info->transaction_kthread_mutex);
3914
3915        spin_lock(&root->fs_info->trans_lock);
3916        list_splice_init(&root->fs_info->trans_list, &list);
3917        root->fs_info->trans_no_join = 1;
3918        spin_unlock(&root->fs_info->trans_lock);
3919
3920        while (!list_empty(&list)) {
3921                t = list_entry(list.next, struct btrfs_transaction, list);
3922
3923                btrfs_destroy_ordered_operations(t, root);
3924
3925                btrfs_destroy_ordered_extents(root);
3926
3927                btrfs_destroy_delayed_refs(t, root);
3928
3929                /* FIXME: cleanup wait for commit */
3930                t->in_commit = 1;
3931                t->blocked = 1;
3932                smp_mb();
3933                if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3934                        wake_up(&root->fs_info->transaction_blocked_wait);
3935
3936                btrfs_evict_pending_snapshots(t);
3937
3938                t->blocked = 0;
3939                smp_mb();
3940                if (waitqueue_active(&root->fs_info->transaction_wait))
3941                        wake_up(&root->fs_info->transaction_wait);
3942
3943                t->commit_done = 1;
3944                smp_mb();
3945                if (waitqueue_active(&t->commit_wait))
3946                        wake_up(&t->commit_wait);
3947
3948                btrfs_destroy_delayed_inodes(root);
3949                btrfs_assert_delayed_root_empty(root);
3950
3951                btrfs_destroy_delalloc_inodes(root);
3952
3953                spin_lock(&root->fs_info->trans_lock);
3954                root->fs_info->running_transaction = NULL;
3955                spin_unlock(&root->fs_info->trans_lock);
3956
3957                btrfs_destroy_marked_extents(root, &t->dirty_pages,
3958                                             EXTENT_DIRTY);
3959
3960                btrfs_destroy_pinned_extent(root,
3961                                            root->fs_info->pinned_extents);
3962
3963                atomic_set(&t->use_count, 0);
3964                list_del_init(&t->list);
3965                memset(t, 0, sizeof(*t));
3966                kmem_cache_free(btrfs_transaction_cachep, t);
3967        }
3968
3969        spin_lock(&root->fs_info->trans_lock);
3970        root->fs_info->trans_no_join = 0;
3971        spin_unlock(&root->fs_info->trans_lock);
3972        mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3973
3974        return 0;
3975}
3976
3977static struct extent_io_ops btree_extent_io_ops = {
3978        .readpage_end_io_hook = btree_readpage_end_io_hook,
3979        .readpage_io_failed_hook = btree_io_failed_hook,
3980        .submit_bio_hook = btree_submit_bio_hook,
3981        /* note we're sharing with inode.c for the merge bio hook */
3982        .merge_bio_hook = btrfs_merge_bio_hook,
3983};
3984