linux/fs/btrfs/relocation.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2009 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/sched.h>
  20#include <linux/pagemap.h>
  21#include <linux/writeback.h>
  22#include <linux/blkdev.h>
  23#include <linux/rbtree.h>
  24#include <linux/slab.h>
  25#include "ctree.h"
  26#include "disk-io.h"
  27#include "transaction.h"
  28#include "volumes.h"
  29#include "locking.h"
  30#include "btrfs_inode.h"
  31#include "async-thread.h"
  32#include "free-space-cache.h"
  33#include "inode-map.h"
  34
  35/*
  36 * backref_node, mapping_node and tree_block start with this
  37 */
  38struct tree_entry {
  39        struct rb_node rb_node;
  40        u64 bytenr;
  41};
  42
  43/*
  44 * present a tree block in the backref cache
  45 */
  46struct backref_node {
  47        struct rb_node rb_node;
  48        u64 bytenr;
  49
  50        u64 new_bytenr;
  51        /* objectid of tree block owner, can be not uptodate */
  52        u64 owner;
  53        /* link to pending, changed or detached list */
  54        struct list_head list;
  55        /* list of upper level blocks reference this block */
  56        struct list_head upper;
  57        /* list of child blocks in the cache */
  58        struct list_head lower;
  59        /* NULL if this node is not tree root */
  60        struct btrfs_root *root;
  61        /* extent buffer got by COW the block */
  62        struct extent_buffer *eb;
  63        /* level of tree block */
  64        unsigned int level:8;
  65        /* is the block in non-reference counted tree */
  66        unsigned int cowonly:1;
  67        /* 1 if no child node in the cache */
  68        unsigned int lowest:1;
  69        /* is the extent buffer locked */
  70        unsigned int locked:1;
  71        /* has the block been processed */
  72        unsigned int processed:1;
  73        /* have backrefs of this block been checked */
  74        unsigned int checked:1;
  75        /*
  76         * 1 if corresponding block has been cowed but some upper
  77         * level block pointers may not point to the new location
  78         */
  79        unsigned int pending:1;
  80        /*
  81         * 1 if the backref node isn't connected to any other
  82         * backref node.
  83         */
  84        unsigned int detached:1;
  85};
  86
  87/*
  88 * present a block pointer in the backref cache
  89 */
  90struct backref_edge {
  91        struct list_head list[2];
  92        struct backref_node *node[2];
  93};
  94
  95#define LOWER   0
  96#define UPPER   1
  97
  98struct backref_cache {
  99        /* red black tree of all backref nodes in the cache */
 100        struct rb_root rb_root;
 101        /* for passing backref nodes to btrfs_reloc_cow_block */
 102        struct backref_node *path[BTRFS_MAX_LEVEL];
 103        /*
 104         * list of blocks that have been cowed but some block
 105         * pointers in upper level blocks may not reflect the
 106         * new location
 107         */
 108        struct list_head pending[BTRFS_MAX_LEVEL];
 109        /* list of backref nodes with no child node */
 110        struct list_head leaves;
 111        /* list of blocks that have been cowed in current transaction */
 112        struct list_head changed;
 113        /* list of detached backref node. */
 114        struct list_head detached;
 115
 116        u64 last_trans;
 117
 118        int nr_nodes;
 119        int nr_edges;
 120};
 121
 122/*
 123 * map address of tree root to tree
 124 */
 125struct mapping_node {
 126        struct rb_node rb_node;
 127        u64 bytenr;
 128        void *data;
 129};
 130
 131struct mapping_tree {
 132        struct rb_root rb_root;
 133        spinlock_t lock;
 134};
 135
 136/*
 137 * present a tree block to process
 138 */
 139struct tree_block {
 140        struct rb_node rb_node;
 141        u64 bytenr;
 142        struct btrfs_key key;
 143        unsigned int level:8;
 144        unsigned int key_ready:1;
 145};
 146
 147#define MAX_EXTENTS 128
 148
 149struct file_extent_cluster {
 150        u64 start;
 151        u64 end;
 152        u64 boundary[MAX_EXTENTS];
 153        unsigned int nr;
 154};
 155
 156struct reloc_control {
 157        /* block group to relocate */
 158        struct btrfs_block_group_cache *block_group;
 159        /* extent tree */
 160        struct btrfs_root *extent_root;
 161        /* inode for moving data */
 162        struct inode *data_inode;
 163
 164        struct btrfs_block_rsv *block_rsv;
 165
 166        struct backref_cache backref_cache;
 167
 168        struct file_extent_cluster cluster;
 169        /* tree blocks have been processed */
 170        struct extent_io_tree processed_blocks;
 171        /* map start of tree root to corresponding reloc tree */
 172        struct mapping_tree reloc_root_tree;
 173        /* list of reloc trees */
 174        struct list_head reloc_roots;
 175        /* size of metadata reservation for merging reloc trees */
 176        u64 merging_rsv_size;
 177        /* size of relocated tree nodes */
 178        u64 nodes_relocated;
 179
 180        u64 search_start;
 181        u64 extents_found;
 182
 183        unsigned int stage:8;
 184        unsigned int create_reloc_tree:1;
 185        unsigned int merge_reloc_tree:1;
 186        unsigned int found_file_extent:1;
 187        unsigned int commit_transaction:1;
 188};
 189
 190/* stages of data relocation */
 191#define MOVE_DATA_EXTENTS       0
 192#define UPDATE_DATA_PTRS        1
 193
 194static void remove_backref_node(struct backref_cache *cache,
 195                                struct backref_node *node);
 196static void __mark_block_processed(struct reloc_control *rc,
 197                                   struct backref_node *node);
 198
 199static void mapping_tree_init(struct mapping_tree *tree)
 200{
 201        tree->rb_root = RB_ROOT;
 202        spin_lock_init(&tree->lock);
 203}
 204
 205static void backref_cache_init(struct backref_cache *cache)
 206{
 207        int i;
 208        cache->rb_root = RB_ROOT;
 209        for (i = 0; i < BTRFS_MAX_LEVEL; i++)
 210                INIT_LIST_HEAD(&cache->pending[i]);
 211        INIT_LIST_HEAD(&cache->changed);
 212        INIT_LIST_HEAD(&cache->detached);
 213        INIT_LIST_HEAD(&cache->leaves);
 214}
 215
 216static void backref_cache_cleanup(struct backref_cache *cache)
 217{
 218        struct backref_node *node;
 219        int i;
 220
 221        while (!list_empty(&cache->detached)) {
 222                node = list_entry(cache->detached.next,
 223                                  struct backref_node, list);
 224                remove_backref_node(cache, node);
 225        }
 226
 227        while (!list_empty(&cache->leaves)) {
 228                node = list_entry(cache->leaves.next,
 229                                  struct backref_node, lower);
 230                remove_backref_node(cache, node);
 231        }
 232
 233        cache->last_trans = 0;
 234
 235        for (i = 0; i < BTRFS_MAX_LEVEL; i++)
 236                BUG_ON(!list_empty(&cache->pending[i]));
 237        BUG_ON(!list_empty(&cache->changed));
 238        BUG_ON(!list_empty(&cache->detached));
 239        BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root));
 240        BUG_ON(cache->nr_nodes);
 241        BUG_ON(cache->nr_edges);
 242}
 243
 244static struct backref_node *alloc_backref_node(struct backref_cache *cache)
 245{
 246        struct backref_node *node;
 247
 248        node = kzalloc(sizeof(*node), GFP_NOFS);
 249        if (node) {
 250                INIT_LIST_HEAD(&node->list);
 251                INIT_LIST_HEAD(&node->upper);
 252                INIT_LIST_HEAD(&node->lower);
 253                RB_CLEAR_NODE(&node->rb_node);
 254                cache->nr_nodes++;
 255        }
 256        return node;
 257}
 258
 259static void free_backref_node(struct backref_cache *cache,
 260                              struct backref_node *node)
 261{
 262        if (node) {
 263                cache->nr_nodes--;
 264                kfree(node);
 265        }
 266}
 267
 268static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
 269{
 270        struct backref_edge *edge;
 271
 272        edge = kzalloc(sizeof(*edge), GFP_NOFS);
 273        if (edge)
 274                cache->nr_edges++;
 275        return edge;
 276}
 277
 278static void free_backref_edge(struct backref_cache *cache,
 279                              struct backref_edge *edge)
 280{
 281        if (edge) {
 282                cache->nr_edges--;
 283                kfree(edge);
 284        }
 285}
 286
 287static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
 288                                   struct rb_node *node)
 289{
 290        struct rb_node **p = &root->rb_node;
 291        struct rb_node *parent = NULL;
 292        struct tree_entry *entry;
 293
 294        while (*p) {
 295                parent = *p;
 296                entry = rb_entry(parent, struct tree_entry, rb_node);
 297
 298                if (bytenr < entry->bytenr)
 299                        p = &(*p)->rb_left;
 300                else if (bytenr > entry->bytenr)
 301                        p = &(*p)->rb_right;
 302                else
 303                        return parent;
 304        }
 305
 306        rb_link_node(node, parent, p);
 307        rb_insert_color(node, root);
 308        return NULL;
 309}
 310
 311static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
 312{
 313        struct rb_node *n = root->rb_node;
 314        struct tree_entry *entry;
 315
 316        while (n) {
 317                entry = rb_entry(n, struct tree_entry, rb_node);
 318
 319                if (bytenr < entry->bytenr)
 320                        n = n->rb_left;
 321                else if (bytenr > entry->bytenr)
 322                        n = n->rb_right;
 323                else
 324                        return n;
 325        }
 326        return NULL;
 327}
 328
 329static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
 330{
 331
 332        struct btrfs_fs_info *fs_info = NULL;
 333        struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
 334                                              rb_node);
 335        if (bnode->root)
 336                fs_info = bnode->root->fs_info;
 337        btrfs_panic(fs_info, errno, "Inconsistency in backref cache "
 338                    "found at offset %llu\n", (unsigned long long)bytenr);
 339}
 340
 341/*
 342 * walk up backref nodes until reach node presents tree root
 343 */
 344static struct backref_node *walk_up_backref(struct backref_node *node,
 345                                            struct backref_edge *edges[],
 346                                            int *index)
 347{
 348        struct backref_edge *edge;
 349        int idx = *index;
 350
 351        while (!list_empty(&node->upper)) {
 352                edge = list_entry(node->upper.next,
 353                                  struct backref_edge, list[LOWER]);
 354                edges[idx++] = edge;
 355                node = edge->node[UPPER];
 356        }
 357        BUG_ON(node->detached);
 358        *index = idx;
 359        return node;
 360}
 361
 362/*
 363 * walk down backref nodes to find start of next reference path
 364 */
 365static struct backref_node *walk_down_backref(struct backref_edge *edges[],
 366                                              int *index)
 367{
 368        struct backref_edge *edge;
 369        struct backref_node *lower;
 370        int idx = *index;
 371
 372        while (idx > 0) {
 373                edge = edges[idx - 1];
 374                lower = edge->node[LOWER];
 375                if (list_is_last(&edge->list[LOWER], &lower->upper)) {
 376                        idx--;
 377                        continue;
 378                }
 379                edge = list_entry(edge->list[LOWER].next,
 380                                  struct backref_edge, list[LOWER]);
 381                edges[idx - 1] = edge;
 382                *index = idx;
 383                return edge->node[UPPER];
 384        }
 385        *index = 0;
 386        return NULL;
 387}
 388
 389static void unlock_node_buffer(struct backref_node *node)
 390{
 391        if (node->locked) {
 392                btrfs_tree_unlock(node->eb);
 393                node->locked = 0;
 394        }
 395}
 396
 397static void drop_node_buffer(struct backref_node *node)
 398{
 399        if (node->eb) {
 400                unlock_node_buffer(node);
 401                free_extent_buffer(node->eb);
 402                node->eb = NULL;
 403        }
 404}
 405
 406static void drop_backref_node(struct backref_cache *tree,
 407                              struct backref_node *node)
 408{
 409        BUG_ON(!list_empty(&node->upper));
 410
 411        drop_node_buffer(node);
 412        list_del(&node->list);
 413        list_del(&node->lower);
 414        if (!RB_EMPTY_NODE(&node->rb_node))
 415                rb_erase(&node->rb_node, &tree->rb_root);
 416        free_backref_node(tree, node);
 417}
 418
 419/*
 420 * remove a backref node from the backref cache
 421 */
 422static void remove_backref_node(struct backref_cache *cache,
 423                                struct backref_node *node)
 424{
 425        struct backref_node *upper;
 426        struct backref_edge *edge;
 427
 428        if (!node)
 429                return;
 430
 431        BUG_ON(!node->lowest && !node->detached);
 432        while (!list_empty(&node->upper)) {
 433                edge = list_entry(node->upper.next, struct backref_edge,
 434                                  list[LOWER]);
 435                upper = edge->node[UPPER];
 436                list_del(&edge->list[LOWER]);
 437                list_del(&edge->list[UPPER]);
 438                free_backref_edge(cache, edge);
 439
 440                if (RB_EMPTY_NODE(&upper->rb_node)) {
 441                        BUG_ON(!list_empty(&node->upper));
 442                        drop_backref_node(cache, node);
 443                        node = upper;
 444                        node->lowest = 1;
 445                        continue;
 446                }
 447                /*
 448                 * add the node to leaf node list if no other
 449                 * child block cached.
 450                 */
 451                if (list_empty(&upper->lower)) {
 452                        list_add_tail(&upper->lower, &cache->leaves);
 453                        upper->lowest = 1;
 454                }
 455        }
 456
 457        drop_backref_node(cache, node);
 458}
 459
 460static void update_backref_node(struct backref_cache *cache,
 461                                struct backref_node *node, u64 bytenr)
 462{
 463        struct rb_node *rb_node;
 464        rb_erase(&node->rb_node, &cache->rb_root);
 465        node->bytenr = bytenr;
 466        rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
 467        if (rb_node)
 468                backref_tree_panic(rb_node, -EEXIST, bytenr);
 469}
 470
 471/*
 472 * update backref cache after a transaction commit
 473 */
 474static int update_backref_cache(struct btrfs_trans_handle *trans,
 475                                struct backref_cache *cache)
 476{
 477        struct backref_node *node;
 478        int level = 0;
 479
 480        if (cache->last_trans == 0) {
 481                cache->last_trans = trans->transid;
 482                return 0;
 483        }
 484
 485        if (cache->last_trans == trans->transid)
 486                return 0;
 487
 488        /*
 489         * detached nodes are used to avoid unnecessary backref
 490         * lookup. transaction commit changes the extent tree.
 491         * so the detached nodes are no longer useful.
 492         */
 493        while (!list_empty(&cache->detached)) {
 494                node = list_entry(cache->detached.next,
 495                                  struct backref_node, list);
 496                remove_backref_node(cache, node);
 497        }
 498
 499        while (!list_empty(&cache->changed)) {
 500                node = list_entry(cache->changed.next,
 501                                  struct backref_node, list);
 502                list_del_init(&node->list);
 503                BUG_ON(node->pending);
 504                update_backref_node(cache, node, node->new_bytenr);
 505        }
 506
 507        /*
 508         * some nodes can be left in the pending list if there were
 509         * errors during processing the pending nodes.
 510         */
 511        for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
 512                list_for_each_entry(node, &cache->pending[level], list) {
 513                        BUG_ON(!node->pending);
 514                        if (node->bytenr == node->new_bytenr)
 515                                continue;
 516                        update_backref_node(cache, node, node->new_bytenr);
 517                }
 518        }
 519
 520        cache->last_trans = 0;
 521        return 1;
 522}
 523
 524
 525static int should_ignore_root(struct btrfs_root *root)
 526{
 527        struct btrfs_root *reloc_root;
 528
 529        if (!root->ref_cows)
 530                return 0;
 531
 532        reloc_root = root->reloc_root;
 533        if (!reloc_root)
 534                return 0;
 535
 536        if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
 537            root->fs_info->running_transaction->transid - 1)
 538                return 0;
 539        /*
 540         * if there is reloc tree and it was created in previous
 541         * transaction backref lookup can find the reloc tree,
 542         * so backref node for the fs tree root is useless for
 543         * relocation.
 544         */
 545        return 1;
 546}
 547/*
 548 * find reloc tree by address of tree root
 549 */
 550static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
 551                                          u64 bytenr)
 552{
 553        struct rb_node *rb_node;
 554        struct mapping_node *node;
 555        struct btrfs_root *root = NULL;
 556
 557        spin_lock(&rc->reloc_root_tree.lock);
 558        rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
 559        if (rb_node) {
 560                node = rb_entry(rb_node, struct mapping_node, rb_node);
 561                root = (struct btrfs_root *)node->data;
 562        }
 563        spin_unlock(&rc->reloc_root_tree.lock);
 564        return root;
 565}
 566
 567static int is_cowonly_root(u64 root_objectid)
 568{
 569        if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
 570            root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
 571            root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
 572            root_objectid == BTRFS_DEV_TREE_OBJECTID ||
 573            root_objectid == BTRFS_TREE_LOG_OBJECTID ||
 574            root_objectid == BTRFS_CSUM_TREE_OBJECTID)
 575                return 1;
 576        return 0;
 577}
 578
 579static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
 580                                        u64 root_objectid)
 581{
 582        struct btrfs_key key;
 583
 584        key.objectid = root_objectid;
 585        key.type = BTRFS_ROOT_ITEM_KEY;
 586        if (is_cowonly_root(root_objectid))
 587                key.offset = 0;
 588        else
 589                key.offset = (u64)-1;
 590
 591        return btrfs_read_fs_root_no_name(fs_info, &key);
 592}
 593
 594#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
 595static noinline_for_stack
 596struct btrfs_root *find_tree_root(struct reloc_control *rc,
 597                                  struct extent_buffer *leaf,
 598                                  struct btrfs_extent_ref_v0 *ref0)
 599{
 600        struct btrfs_root *root;
 601        u64 root_objectid = btrfs_ref_root_v0(leaf, ref0);
 602        u64 generation = btrfs_ref_generation_v0(leaf, ref0);
 603
 604        BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID);
 605
 606        root = read_fs_root(rc->extent_root->fs_info, root_objectid);
 607        BUG_ON(IS_ERR(root));
 608
 609        if (root->ref_cows &&
 610            generation != btrfs_root_generation(&root->root_item))
 611                return NULL;
 612
 613        return root;
 614}
 615#endif
 616
 617static noinline_for_stack
 618int find_inline_backref(struct extent_buffer *leaf, int slot,
 619                        unsigned long *ptr, unsigned long *end)
 620{
 621        struct btrfs_key key;
 622        struct btrfs_extent_item *ei;
 623        struct btrfs_tree_block_info *bi;
 624        u32 item_size;
 625
 626        btrfs_item_key_to_cpu(leaf, &key, slot);
 627
 628        item_size = btrfs_item_size_nr(leaf, slot);
 629#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
 630        if (item_size < sizeof(*ei)) {
 631                WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
 632                return 1;
 633        }
 634#endif
 635        ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
 636        WARN_ON(!(btrfs_extent_flags(leaf, ei) &
 637                  BTRFS_EXTENT_FLAG_TREE_BLOCK));
 638
 639        if (key.type == BTRFS_EXTENT_ITEM_KEY &&
 640            item_size <= sizeof(*ei) + sizeof(*bi)) {
 641                WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
 642                return 1;
 643        }
 644
 645        if (key.type == BTRFS_EXTENT_ITEM_KEY) {
 646                bi = (struct btrfs_tree_block_info *)(ei + 1);
 647                *ptr = (unsigned long)(bi + 1);
 648        } else {
 649                *ptr = (unsigned long)(ei + 1);
 650        }
 651        *end = (unsigned long)ei + item_size;
 652        return 0;
 653}
 654
 655/*
 656 * build backref tree for a given tree block. root of the backref tree
 657 * corresponds the tree block, leaves of the backref tree correspond
 658 * roots of b-trees that reference the tree block.
 659 *
 660 * the basic idea of this function is check backrefs of a given block
 661 * to find upper level blocks that refernece the block, and then check
 662 * bakcrefs of these upper level blocks recursively. the recursion stop
 663 * when tree root is reached or backrefs for the block is cached.
 664 *
 665 * NOTE: if we find backrefs for a block are cached, we know backrefs
 666 * for all upper level blocks that directly/indirectly reference the
 667 * block are also cached.
 668 */
 669static noinline_for_stack
 670struct backref_node *build_backref_tree(struct reloc_control *rc,
 671                                        struct btrfs_key *node_key,
 672                                        int level, u64 bytenr)
 673{
 674        struct backref_cache *cache = &rc->backref_cache;
 675        struct btrfs_path *path1;
 676        struct btrfs_path *path2;
 677        struct extent_buffer *eb;
 678        struct btrfs_root *root;
 679        struct backref_node *cur;
 680        struct backref_node *upper;
 681        struct backref_node *lower;
 682        struct backref_node *node = NULL;
 683        struct backref_node *exist = NULL;
 684        struct backref_edge *edge;
 685        struct rb_node *rb_node;
 686        struct btrfs_key key;
 687        unsigned long end;
 688        unsigned long ptr;
 689        LIST_HEAD(list);
 690        LIST_HEAD(useless);
 691        int cowonly;
 692        int ret;
 693        int err = 0;
 694
 695        path1 = btrfs_alloc_path();
 696        path2 = btrfs_alloc_path();
 697        if (!path1 || !path2) {
 698                err = -ENOMEM;
 699                goto out;
 700        }
 701        path1->reada = 1;
 702        path2->reada = 2;
 703
 704        node = alloc_backref_node(cache);
 705        if (!node) {
 706                err = -ENOMEM;
 707                goto out;
 708        }
 709
 710        node->bytenr = bytenr;
 711        node->level = level;
 712        node->lowest = 1;
 713        cur = node;
 714again:
 715        end = 0;
 716        ptr = 0;
 717        key.objectid = cur->bytenr;
 718        key.type = BTRFS_METADATA_ITEM_KEY;
 719        key.offset = (u64)-1;
 720
 721        path1->search_commit_root = 1;
 722        path1->skip_locking = 1;
 723        ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
 724                                0, 0);
 725        if (ret < 0) {
 726                err = ret;
 727                goto out;
 728        }
 729        BUG_ON(!ret || !path1->slots[0]);
 730
 731        path1->slots[0]--;
 732
 733        WARN_ON(cur->checked);
 734        if (!list_empty(&cur->upper)) {
 735                /*
 736                 * the backref was added previously when processing
 737                 * backref of type BTRFS_TREE_BLOCK_REF_KEY
 738                 */
 739                BUG_ON(!list_is_singular(&cur->upper));
 740                edge = list_entry(cur->upper.next, struct backref_edge,
 741                                  list[LOWER]);
 742                BUG_ON(!list_empty(&edge->list[UPPER]));
 743                exist = edge->node[UPPER];
 744                /*
 745                 * add the upper level block to pending list if we need
 746                 * check its backrefs
 747                 */
 748                if (!exist->checked)
 749                        list_add_tail(&edge->list[UPPER], &list);
 750        } else {
 751                exist = NULL;
 752        }
 753
 754        while (1) {
 755                cond_resched();
 756                eb = path1->nodes[0];
 757
 758                if (ptr >= end) {
 759                        if (path1->slots[0] >= btrfs_header_nritems(eb)) {
 760                                ret = btrfs_next_leaf(rc->extent_root, path1);
 761                                if (ret < 0) {
 762                                        err = ret;
 763                                        goto out;
 764                                }
 765                                if (ret > 0)
 766                                        break;
 767                                eb = path1->nodes[0];
 768                        }
 769
 770                        btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
 771                        if (key.objectid != cur->bytenr) {
 772                                WARN_ON(exist);
 773                                break;
 774                        }
 775
 776                        if (key.type == BTRFS_EXTENT_ITEM_KEY ||
 777                            key.type == BTRFS_METADATA_ITEM_KEY) {
 778                                ret = find_inline_backref(eb, path1->slots[0],
 779                                                          &ptr, &end);
 780                                if (ret)
 781                                        goto next;
 782                        }
 783                }
 784
 785                if (ptr < end) {
 786                        /* update key for inline back ref */
 787                        struct btrfs_extent_inline_ref *iref;
 788                        iref = (struct btrfs_extent_inline_ref *)ptr;
 789                        key.type = btrfs_extent_inline_ref_type(eb, iref);
 790                        key.offset = btrfs_extent_inline_ref_offset(eb, iref);
 791                        WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
 792                                key.type != BTRFS_SHARED_BLOCK_REF_KEY);
 793                }
 794
 795                if (exist &&
 796                    ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
 797                      exist->owner == key.offset) ||
 798                     (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
 799                      exist->bytenr == key.offset))) {
 800                        exist = NULL;
 801                        goto next;
 802                }
 803
 804#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
 805                if (key.type == BTRFS_SHARED_BLOCK_REF_KEY ||
 806                    key.type == BTRFS_EXTENT_REF_V0_KEY) {
 807                        if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
 808                                struct btrfs_extent_ref_v0 *ref0;
 809                                ref0 = btrfs_item_ptr(eb, path1->slots[0],
 810                                                struct btrfs_extent_ref_v0);
 811                                if (key.objectid == key.offset) {
 812                                        root = find_tree_root(rc, eb, ref0);
 813                                        if (root && !should_ignore_root(root))
 814                                                cur->root = root;
 815                                        else
 816                                                list_add(&cur->list, &useless);
 817                                        break;
 818                                }
 819                                if (is_cowonly_root(btrfs_ref_root_v0(eb,
 820                                                                      ref0)))
 821                                        cur->cowonly = 1;
 822                        }
 823#else
 824                BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
 825                if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
 826#endif
 827                        if (key.objectid == key.offset) {
 828                                /*
 829                                 * only root blocks of reloc trees use
 830                                 * backref of this type.
 831                                 */
 832                                root = find_reloc_root(rc, cur->bytenr);
 833                                BUG_ON(!root);
 834                                cur->root = root;
 835                                break;
 836                        }
 837
 838                        edge = alloc_backref_edge(cache);
 839                        if (!edge) {
 840                                err = -ENOMEM;
 841                                goto out;
 842                        }
 843                        rb_node = tree_search(&cache->rb_root, key.offset);
 844                        if (!rb_node) {
 845                                upper = alloc_backref_node(cache);
 846                                if (!upper) {
 847                                        free_backref_edge(cache, edge);
 848                                        err = -ENOMEM;
 849                                        goto out;
 850                                }
 851                                upper->bytenr = key.offset;
 852                                upper->level = cur->level + 1;
 853                                /*
 854                                 *  backrefs for the upper level block isn't
 855                                 *  cached, add the block to pending list
 856                                 */
 857                                list_add_tail(&edge->list[UPPER], &list);
 858                        } else {
 859                                upper = rb_entry(rb_node, struct backref_node,
 860                                                 rb_node);
 861                                BUG_ON(!upper->checked);
 862                                INIT_LIST_HEAD(&edge->list[UPPER]);
 863                        }
 864                        list_add_tail(&edge->list[LOWER], &cur->upper);
 865                        edge->node[LOWER] = cur;
 866                        edge->node[UPPER] = upper;
 867
 868                        goto next;
 869                } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
 870                        goto next;
 871                }
 872
 873                /* key.type == BTRFS_TREE_BLOCK_REF_KEY */
 874                root = read_fs_root(rc->extent_root->fs_info, key.offset);
 875                if (IS_ERR(root)) {
 876                        err = PTR_ERR(root);
 877                        goto out;
 878                }
 879
 880                if (!root->ref_cows)
 881                        cur->cowonly = 1;
 882
 883                if (btrfs_root_level(&root->root_item) == cur->level) {
 884                        /* tree root */
 885                        BUG_ON(btrfs_root_bytenr(&root->root_item) !=
 886                               cur->bytenr);
 887                        if (should_ignore_root(root))
 888                                list_add(&cur->list, &useless);
 889                        else
 890                                cur->root = root;
 891                        break;
 892                }
 893
 894                level = cur->level + 1;
 895
 896                /*
 897                 * searching the tree to find upper level blocks
 898                 * reference the block.
 899                 */
 900                path2->search_commit_root = 1;
 901                path2->skip_locking = 1;
 902                path2->lowest_level = level;
 903                ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
 904                path2->lowest_level = 0;
 905                if (ret < 0) {
 906                        err = ret;
 907                        goto out;
 908                }
 909                if (ret > 0 && path2->slots[level] > 0)
 910                        path2->slots[level]--;
 911
 912                eb = path2->nodes[level];
 913                WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
 914                        cur->bytenr);
 915
 916                lower = cur;
 917                for (; level < BTRFS_MAX_LEVEL; level++) {
 918                        if (!path2->nodes[level]) {
 919                                BUG_ON(btrfs_root_bytenr(&root->root_item) !=
 920                                       lower->bytenr);
 921                                if (should_ignore_root(root))
 922                                        list_add(&lower->list, &useless);
 923                                else
 924                                        lower->root = root;
 925                                break;
 926                        }
 927
 928                        edge = alloc_backref_edge(cache);
 929                        if (!edge) {
 930                                err = -ENOMEM;
 931                                goto out;
 932                        }
 933
 934                        eb = path2->nodes[level];
 935                        rb_node = tree_search(&cache->rb_root, eb->start);
 936                        if (!rb_node) {
 937                                upper = alloc_backref_node(cache);
 938                                if (!upper) {
 939                                        free_backref_edge(cache, edge);
 940                                        err = -ENOMEM;
 941                                        goto out;
 942                                }
 943                                upper->bytenr = eb->start;
 944                                upper->owner = btrfs_header_owner(eb);
 945                                upper->level = lower->level + 1;
 946                                if (!root->ref_cows)
 947                                        upper->cowonly = 1;
 948
 949                                /*
 950                                 * if we know the block isn't shared
 951                                 * we can void checking its backrefs.
 952                                 */
 953                                if (btrfs_block_can_be_shared(root, eb))
 954                                        upper->checked = 0;
 955                                else
 956                                        upper->checked = 1;
 957
 958                                /*
 959                                 * add the block to pending list if we
 960                                 * need check its backrefs. only block
 961                                 * at 'cur->level + 1' is added to the
 962                                 * tail of pending list. this guarantees
 963                                 * we check backrefs from lower level
 964                                 * blocks to upper level blocks.
 965                                 */
 966                                if (!upper->checked &&
 967                                    level == cur->level + 1) {
 968                                        list_add_tail(&edge->list[UPPER],
 969                                                      &list);
 970                                } else
 971                                        INIT_LIST_HEAD(&edge->list[UPPER]);
 972                        } else {
 973                                upper = rb_entry(rb_node, struct backref_node,
 974                                                 rb_node);
 975                                BUG_ON(!upper->checked);
 976                                INIT_LIST_HEAD(&edge->list[UPPER]);
 977                                if (!upper->owner)
 978                                        upper->owner = btrfs_header_owner(eb);
 979                        }
 980                        list_add_tail(&edge->list[LOWER], &lower->upper);
 981                        edge->node[LOWER] = lower;
 982                        edge->node[UPPER] = upper;
 983
 984                        if (rb_node)
 985                                break;
 986                        lower = upper;
 987                        upper = NULL;
 988                }
 989                btrfs_release_path(path2);
 990next:
 991                if (ptr < end) {
 992                        ptr += btrfs_extent_inline_ref_size(key.type);
 993                        if (ptr >= end) {
 994                                WARN_ON(ptr > end);
 995                                ptr = 0;
 996                                end = 0;
 997                        }
 998                }
 999                if (ptr >= end)
1000                        path1->slots[0]++;
1001        }
1002        btrfs_release_path(path1);
1003
1004        cur->checked = 1;
1005        WARN_ON(exist);
1006
1007        /* the pending list isn't empty, take the first block to process */
1008        if (!list_empty(&list)) {
1009                edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1010                list_del_init(&edge->list[UPPER]);
1011                cur = edge->node[UPPER];
1012                goto again;
1013        }
1014
1015        /*
1016         * everything goes well, connect backref nodes and insert backref nodes
1017         * into the cache.
1018         */
1019        BUG_ON(!node->checked);
1020        cowonly = node->cowonly;
1021        if (!cowonly) {
1022                rb_node = tree_insert(&cache->rb_root, node->bytenr,
1023                                      &node->rb_node);
1024                if (rb_node)
1025                        backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1026                list_add_tail(&node->lower, &cache->leaves);
1027        }
1028
1029        list_for_each_entry(edge, &node->upper, list[LOWER])
1030                list_add_tail(&edge->list[UPPER], &list);
1031
1032        while (!list_empty(&list)) {
1033                edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1034                list_del_init(&edge->list[UPPER]);
1035                upper = edge->node[UPPER];
1036                if (upper->detached) {
1037                        list_del(&edge->list[LOWER]);
1038                        lower = edge->node[LOWER];
1039                        free_backref_edge(cache, edge);
1040                        if (list_empty(&lower->upper))
1041                                list_add(&lower->list, &useless);
1042                        continue;
1043                }
1044
1045                if (!RB_EMPTY_NODE(&upper->rb_node)) {
1046                        if (upper->lowest) {
1047                                list_del_init(&upper->lower);
1048                                upper->lowest = 0;
1049                        }
1050
1051                        list_add_tail(&edge->list[UPPER], &upper->lower);
1052                        continue;
1053                }
1054
1055                BUG_ON(!upper->checked);
1056                BUG_ON(cowonly != upper->cowonly);
1057                if (!cowonly) {
1058                        rb_node = tree_insert(&cache->rb_root, upper->bytenr,
1059                                              &upper->rb_node);
1060                        if (rb_node)
1061                                backref_tree_panic(rb_node, -EEXIST,
1062                                                   upper->bytenr);
1063                }
1064
1065                list_add_tail(&edge->list[UPPER], &upper->lower);
1066
1067                list_for_each_entry(edge, &upper->upper, list[LOWER])
1068                        list_add_tail(&edge->list[UPPER], &list);
1069        }
1070        /*
1071         * process useless backref nodes. backref nodes for tree leaves
1072         * are deleted from the cache. backref nodes for upper level
1073         * tree blocks are left in the cache to avoid unnecessary backref
1074         * lookup.
1075         */
1076        while (!list_empty(&useless)) {
1077                upper = list_entry(useless.next, struct backref_node, list);
1078                list_del_init(&upper->list);
1079                BUG_ON(!list_empty(&upper->upper));
1080                if (upper == node)
1081                        node = NULL;
1082                if (upper->lowest) {
1083                        list_del_init(&upper->lower);
1084                        upper->lowest = 0;
1085                }
1086                while (!list_empty(&upper->lower)) {
1087                        edge = list_entry(upper->lower.next,
1088                                          struct backref_edge, list[UPPER]);
1089                        list_del(&edge->list[UPPER]);
1090                        list_del(&edge->list[LOWER]);
1091                        lower = edge->node[LOWER];
1092                        free_backref_edge(cache, edge);
1093
1094                        if (list_empty(&lower->upper))
1095                                list_add(&lower->list, &useless);
1096                }
1097                __mark_block_processed(rc, upper);
1098                if (upper->level > 0) {
1099                        list_add(&upper->list, &cache->detached);
1100                        upper->detached = 1;
1101                } else {
1102                        rb_erase(&upper->rb_node, &cache->rb_root);
1103                        free_backref_node(cache, upper);
1104                }
1105        }
1106out:
1107        btrfs_free_path(path1);
1108        btrfs_free_path(path2);
1109        if (err) {
1110                while (!list_empty(&useless)) {
1111                        lower = list_entry(useless.next,
1112                                           struct backref_node, upper);
1113                        list_del_init(&lower->upper);
1114                }
1115                upper = node;
1116                INIT_LIST_HEAD(&list);
1117                while (upper) {
1118                        if (RB_EMPTY_NODE(&upper->rb_node)) {
1119                                list_splice_tail(&upper->upper, &list);
1120                                free_backref_node(cache, upper);
1121                        }
1122
1123                        if (list_empty(&list))
1124                                break;
1125
1126                        edge = list_entry(list.next, struct backref_edge,
1127                                          list[LOWER]);
1128                        list_del(&edge->list[LOWER]);
1129                        upper = edge->node[UPPER];
1130                        free_backref_edge(cache, edge);
1131                }
1132                return ERR_PTR(err);
1133        }
1134        BUG_ON(node && node->detached);
1135        return node;
1136}
1137
1138/*
1139 * helper to add backref node for the newly created snapshot.
1140 * the backref node is created by cloning backref node that
1141 * corresponds to root of source tree
1142 */
1143static int clone_backref_node(struct btrfs_trans_handle *trans,
1144                              struct reloc_control *rc,
1145                              struct btrfs_root *src,
1146                              struct btrfs_root *dest)
1147{
1148        struct btrfs_root *reloc_root = src->reloc_root;
1149        struct backref_cache *cache = &rc->backref_cache;
1150        struct backref_node *node = NULL;
1151        struct backref_node *new_node;
1152        struct backref_edge *edge;
1153        struct backref_edge *new_edge;
1154        struct rb_node *rb_node;
1155
1156        if (cache->last_trans > 0)
1157                update_backref_cache(trans, cache);
1158
1159        rb_node = tree_search(&cache->rb_root, src->commit_root->start);
1160        if (rb_node) {
1161                node = rb_entry(rb_node, struct backref_node, rb_node);
1162                if (node->detached)
1163                        node = NULL;
1164                else
1165                        BUG_ON(node->new_bytenr != reloc_root->node->start);
1166        }
1167
1168        if (!node) {
1169                rb_node = tree_search(&cache->rb_root,
1170                                      reloc_root->commit_root->start);
1171                if (rb_node) {
1172                        node = rb_entry(rb_node, struct backref_node,
1173                                        rb_node);
1174                        BUG_ON(node->detached);
1175                }
1176        }
1177
1178        if (!node)
1179                return 0;
1180
1181        new_node = alloc_backref_node(cache);
1182        if (!new_node)
1183                return -ENOMEM;
1184
1185        new_node->bytenr = dest->node->start;
1186        new_node->level = node->level;
1187        new_node->lowest = node->lowest;
1188        new_node->checked = 1;
1189        new_node->root = dest;
1190
1191        if (!node->lowest) {
1192                list_for_each_entry(edge, &node->lower, list[UPPER]) {
1193                        new_edge = alloc_backref_edge(cache);
1194                        if (!new_edge)
1195                                goto fail;
1196
1197                        new_edge->node[UPPER] = new_node;
1198                        new_edge->node[LOWER] = edge->node[LOWER];
1199                        list_add_tail(&new_edge->list[UPPER],
1200                                      &new_node->lower);
1201                }
1202        } else {
1203                list_add_tail(&new_node->lower, &cache->leaves);
1204        }
1205
1206        rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
1207                              &new_node->rb_node);
1208        if (rb_node)
1209                backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
1210
1211        if (!new_node->lowest) {
1212                list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
1213                        list_add_tail(&new_edge->list[LOWER],
1214                                      &new_edge->node[LOWER]->upper);
1215                }
1216        }
1217        return 0;
1218fail:
1219        while (!list_empty(&new_node->lower)) {
1220                new_edge = list_entry(new_node->lower.next,
1221                                      struct backref_edge, list[UPPER]);
1222                list_del(&new_edge->list[UPPER]);
1223                free_backref_edge(cache, new_edge);
1224        }
1225        free_backref_node(cache, new_node);
1226        return -ENOMEM;
1227}
1228
1229/*
1230 * helper to add 'address of tree root -> reloc tree' mapping
1231 */
1232static int __must_check __add_reloc_root(struct btrfs_root *root)
1233{
1234        struct rb_node *rb_node;
1235        struct mapping_node *node;
1236        struct reloc_control *rc = root->fs_info->reloc_ctl;
1237
1238        node = kmalloc(sizeof(*node), GFP_NOFS);
1239        if (!node)
1240                return -ENOMEM;
1241
1242        node->bytenr = root->node->start;
1243        node->data = root;
1244
1245        spin_lock(&rc->reloc_root_tree.lock);
1246        rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1247                              node->bytenr, &node->rb_node);
1248        spin_unlock(&rc->reloc_root_tree.lock);
1249        if (rb_node) {
1250                btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
1251                            "for start=%llu while inserting into relocation "
1252                            "tree\n", node->bytenr);
1253                kfree(node);
1254                return -EEXIST;
1255        }
1256
1257        list_add_tail(&root->root_list, &rc->reloc_roots);
1258        return 0;
1259}
1260
1261/*
1262 * helper to update/delete the 'address of tree root -> reloc tree'
1263 * mapping
1264 */
1265static int __update_reloc_root(struct btrfs_root *root, int del)
1266{
1267        struct rb_node *rb_node;
1268        struct mapping_node *node = NULL;
1269        struct reloc_control *rc = root->fs_info->reloc_ctl;
1270
1271        spin_lock(&rc->reloc_root_tree.lock);
1272        rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1273                              root->commit_root->start);
1274        if (rb_node) {
1275                node = rb_entry(rb_node, struct mapping_node, rb_node);
1276                rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1277        }
1278        spin_unlock(&rc->reloc_root_tree.lock);
1279
1280        if (!node)
1281                return 0;
1282        BUG_ON((struct btrfs_root *)node->data != root);
1283
1284        if (!del) {
1285                spin_lock(&rc->reloc_root_tree.lock);
1286                node->bytenr = root->node->start;
1287                rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1288                                      node->bytenr, &node->rb_node);
1289                spin_unlock(&rc->reloc_root_tree.lock);
1290                if (rb_node)
1291                        backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1292        } else {
1293                spin_lock(&root->fs_info->trans_lock);
1294                list_del_init(&root->root_list);
1295                spin_unlock(&root->fs_info->trans_lock);
1296                kfree(node);
1297        }
1298        return 0;
1299}
1300
1301static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
1302                                        struct btrfs_root *root, u64 objectid)
1303{
1304        struct btrfs_root *reloc_root;
1305        struct extent_buffer *eb;
1306        struct btrfs_root_item *root_item;
1307        struct btrfs_key root_key;
1308        int ret;
1309
1310        root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
1311        BUG_ON(!root_item);
1312
1313        root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
1314        root_key.type = BTRFS_ROOT_ITEM_KEY;
1315        root_key.offset = objectid;
1316
1317        if (root->root_key.objectid == objectid) {
1318                /* called by btrfs_init_reloc_root */
1319                ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
1320                                      BTRFS_TREE_RELOC_OBJECTID);
1321                BUG_ON(ret);
1322
1323                btrfs_set_root_last_snapshot(&root->root_item,
1324                                             trans->transid - 1);
1325        } else {
1326                /*
1327                 * called by btrfs_reloc_post_snapshot_hook.
1328                 * the source tree is a reloc tree, all tree blocks
1329                 * modified after it was created have RELOC flag
1330                 * set in their headers. so it's OK to not update
1331                 * the 'last_snapshot'.
1332                 */
1333                ret = btrfs_copy_root(trans, root, root->node, &eb,
1334                                      BTRFS_TREE_RELOC_OBJECTID);
1335                BUG_ON(ret);
1336        }
1337
1338        memcpy(root_item, &root->root_item, sizeof(*root_item));
1339        btrfs_set_root_bytenr(root_item, eb->start);
1340        btrfs_set_root_level(root_item, btrfs_header_level(eb));
1341        btrfs_set_root_generation(root_item, trans->transid);
1342
1343        if (root->root_key.objectid == objectid) {
1344                btrfs_set_root_refs(root_item, 0);
1345                memset(&root_item->drop_progress, 0,
1346                       sizeof(struct btrfs_disk_key));
1347                root_item->drop_level = 0;
1348        }
1349
1350        btrfs_tree_unlock(eb);
1351        free_extent_buffer(eb);
1352
1353        ret = btrfs_insert_root(trans, root->fs_info->tree_root,
1354                                &root_key, root_item);
1355        BUG_ON(ret);
1356        kfree(root_item);
1357
1358        reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
1359                                                 &root_key);
1360        BUG_ON(IS_ERR(reloc_root));
1361        reloc_root->last_trans = trans->transid;
1362        return reloc_root;
1363}
1364
1365/*
1366 * create reloc tree for a given fs tree. reloc tree is just a
1367 * snapshot of the fs tree with special root objectid.
1368 */
1369int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1370                          struct btrfs_root *root)
1371{
1372        struct btrfs_root *reloc_root;
1373        struct reloc_control *rc = root->fs_info->reloc_ctl;
1374        int clear_rsv = 0;
1375        int ret;
1376
1377        if (root->reloc_root) {
1378                reloc_root = root->reloc_root;
1379                reloc_root->last_trans = trans->transid;
1380                return 0;
1381        }
1382
1383        if (!rc || !rc->create_reloc_tree ||
1384            root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1385                return 0;
1386
1387        if (!trans->block_rsv) {
1388                trans->block_rsv = rc->block_rsv;
1389                clear_rsv = 1;
1390        }
1391        reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
1392        if (clear_rsv)
1393                trans->block_rsv = NULL;
1394
1395        ret = __add_reloc_root(reloc_root);
1396        BUG_ON(ret < 0);
1397        root->reloc_root = reloc_root;
1398        return 0;
1399}
1400
1401/*
1402 * update root item of reloc tree
1403 */
1404int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1405                            struct btrfs_root *root)
1406{
1407        struct btrfs_root *reloc_root;
1408        struct btrfs_root_item *root_item;
1409        int del = 0;
1410        int ret;
1411
1412        if (!root->reloc_root)
1413                goto out;
1414
1415        reloc_root = root->reloc_root;
1416        root_item = &reloc_root->root_item;
1417
1418        if (root->fs_info->reloc_ctl->merge_reloc_tree &&
1419            btrfs_root_refs(root_item) == 0) {
1420                root->reloc_root = NULL;
1421                del = 1;
1422        }
1423
1424        __update_reloc_root(reloc_root, del);
1425
1426        if (reloc_root->commit_root != reloc_root->node) {
1427                btrfs_set_root_node(root_item, reloc_root->node);
1428                free_extent_buffer(reloc_root->commit_root);
1429                reloc_root->commit_root = btrfs_root_node(reloc_root);
1430        }
1431
1432        ret = btrfs_update_root(trans, root->fs_info->tree_root,
1433                                &reloc_root->root_key, root_item);
1434        BUG_ON(ret);
1435
1436out:
1437        return 0;
1438}
1439
1440/*
1441 * helper to find first cached inode with inode number >= objectid
1442 * in a subvolume
1443 */
1444static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
1445{
1446        struct rb_node *node;
1447        struct rb_node *prev;
1448        struct btrfs_inode *entry;
1449        struct inode *inode;
1450
1451        spin_lock(&root->inode_lock);
1452again:
1453        node = root->inode_tree.rb_node;
1454        prev = NULL;
1455        while (node) {
1456                prev = node;
1457                entry = rb_entry(node, struct btrfs_inode, rb_node);
1458
1459                if (objectid < btrfs_ino(&entry->vfs_inode))
1460                        node = node->rb_left;
1461                else if (objectid > btrfs_ino(&entry->vfs_inode))
1462                        node = node->rb_right;
1463                else
1464                        break;
1465        }
1466        if (!node) {
1467                while (prev) {
1468                        entry = rb_entry(prev, struct btrfs_inode, rb_node);
1469                        if (objectid <= btrfs_ino(&entry->vfs_inode)) {
1470                                node = prev;
1471                                break;
1472                        }
1473                        prev = rb_next(prev);
1474                }
1475        }
1476        while (node) {
1477                entry = rb_entry(node, struct btrfs_inode, rb_node);
1478                inode = igrab(&entry->vfs_inode);
1479                if (inode) {
1480                        spin_unlock(&root->inode_lock);
1481                        return inode;
1482                }
1483
1484                objectid = btrfs_ino(&entry->vfs_inode) + 1;
1485                if (cond_resched_lock(&root->inode_lock))
1486                        goto again;
1487
1488                node = rb_next(node);
1489        }
1490        spin_unlock(&root->inode_lock);
1491        return NULL;
1492}
1493
1494static int in_block_group(u64 bytenr,
1495                          struct btrfs_block_group_cache *block_group)
1496{
1497        if (bytenr >= block_group->key.objectid &&
1498            bytenr < block_group->key.objectid + block_group->key.offset)
1499                return 1;
1500        return 0;
1501}
1502
1503/*
1504 * get new location of data
1505 */
1506static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1507                            u64 bytenr, u64 num_bytes)
1508{
1509        struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1510        struct btrfs_path *path;
1511        struct btrfs_file_extent_item *fi;
1512        struct extent_buffer *leaf;
1513        int ret;
1514
1515        path = btrfs_alloc_path();
1516        if (!path)
1517                return -ENOMEM;
1518
1519        bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1520        ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode),
1521                                       bytenr, 0);
1522        if (ret < 0)
1523                goto out;
1524        if (ret > 0) {
1525                ret = -ENOENT;
1526                goto out;
1527        }
1528
1529        leaf = path->nodes[0];
1530        fi = btrfs_item_ptr(leaf, path->slots[0],
1531                            struct btrfs_file_extent_item);
1532
1533        BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1534               btrfs_file_extent_compression(leaf, fi) ||
1535               btrfs_file_extent_encryption(leaf, fi) ||
1536               btrfs_file_extent_other_encoding(leaf, fi));
1537
1538        if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1539                ret = 1;
1540                goto out;
1541        }
1542
1543        *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1544        ret = 0;
1545out:
1546        btrfs_free_path(path);
1547        return ret;
1548}
1549
1550/*
1551 * update file extent items in the tree leaf to point to
1552 * the new locations.
1553 */
1554static noinline_for_stack
1555int replace_file_extents(struct btrfs_trans_handle *trans,
1556                         struct reloc_control *rc,
1557                         struct btrfs_root *root,
1558                         struct extent_buffer *leaf)
1559{
1560        struct btrfs_key key;
1561        struct btrfs_file_extent_item *fi;
1562        struct inode *inode = NULL;
1563        u64 parent;
1564        u64 bytenr;
1565        u64 new_bytenr = 0;
1566        u64 num_bytes;
1567        u64 end;
1568        u32 nritems;
1569        u32 i;
1570        int ret;
1571        int first = 1;
1572        int dirty = 0;
1573
1574        if (rc->stage != UPDATE_DATA_PTRS)
1575                return 0;
1576
1577        /* reloc trees always use full backref */
1578        if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1579                parent = leaf->start;
1580        else
1581                parent = 0;
1582
1583        nritems = btrfs_header_nritems(leaf);
1584        for (i = 0; i < nritems; i++) {
1585                cond_resched();
1586                btrfs_item_key_to_cpu(leaf, &key, i);
1587                if (key.type != BTRFS_EXTENT_DATA_KEY)
1588                        continue;
1589                fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1590                if (btrfs_file_extent_type(leaf, fi) ==
1591                    BTRFS_FILE_EXTENT_INLINE)
1592                        continue;
1593                bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1594                num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1595                if (bytenr == 0)
1596                        continue;
1597                if (!in_block_group(bytenr, rc->block_group))
1598                        continue;
1599
1600                /*
1601                 * if we are modifying block in fs tree, wait for readpage
1602                 * to complete and drop the extent cache
1603                 */
1604                if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1605                        if (first) {
1606                                inode = find_next_inode(root, key.objectid);
1607                                first = 0;
1608                        } else if (inode && btrfs_ino(inode) < key.objectid) {
1609                                btrfs_add_delayed_iput(inode);
1610                                inode = find_next_inode(root, key.objectid);
1611                        }
1612                        if (inode && btrfs_ino(inode) == key.objectid) {
1613                                end = key.offset +
1614                                      btrfs_file_extent_num_bytes(leaf, fi);
1615                                WARN_ON(!IS_ALIGNED(key.offset,
1616                                                    root->sectorsize));
1617                                WARN_ON(!IS_ALIGNED(end, root->sectorsize));
1618                                end--;
1619                                ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1620                                                      key.offset, end);
1621                                if (!ret)
1622                                        continue;
1623
1624                                btrfs_drop_extent_cache(inode, key.offset, end,
1625                                                        1);
1626                                unlock_extent(&BTRFS_I(inode)->io_tree,
1627                                              key.offset, end);
1628                        }
1629                }
1630
1631                ret = get_new_location(rc->data_inode, &new_bytenr,
1632                                       bytenr, num_bytes);
1633                if (ret > 0) {
1634                        WARN_ON(1);
1635                        continue;
1636                }
1637                BUG_ON(ret < 0);
1638
1639                btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1640                dirty = 1;
1641
1642                key.offset -= btrfs_file_extent_offset(leaf, fi);
1643                ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
1644                                           num_bytes, parent,
1645                                           btrfs_header_owner(leaf),
1646                                           key.objectid, key.offset, 1);
1647                BUG_ON(ret);
1648
1649                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1650                                        parent, btrfs_header_owner(leaf),
1651                                        key.objectid, key.offset, 1);
1652                BUG_ON(ret);
1653        }
1654        if (dirty)
1655                btrfs_mark_buffer_dirty(leaf);
1656        if (inode)
1657                btrfs_add_delayed_iput(inode);
1658        return 0;
1659}
1660
1661static noinline_for_stack
1662int memcmp_node_keys(struct extent_buffer *eb, int slot,
1663                     struct btrfs_path *path, int level)
1664{
1665        struct btrfs_disk_key key1;
1666        struct btrfs_disk_key key2;
1667        btrfs_node_key(eb, &key1, slot);
1668        btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1669        return memcmp(&key1, &key2, sizeof(key1));
1670}
1671
1672/*
1673 * try to replace tree blocks in fs tree with the new blocks
1674 * in reloc tree. tree blocks haven't been modified since the
1675 * reloc tree was create can be replaced.
1676 *
1677 * if a block was replaced, level of the block + 1 is returned.
1678 * if no block got replaced, 0 is returned. if there are other
1679 * errors, a negative error number is returned.
1680 */
1681static noinline_for_stack
1682int replace_path(struct btrfs_trans_handle *trans,
1683                 struct btrfs_root *dest, struct btrfs_root *src,
1684                 struct btrfs_path *path, struct btrfs_key *next_key,
1685                 int lowest_level, int max_level)
1686{
1687        struct extent_buffer *eb;
1688        struct extent_buffer *parent;
1689        struct btrfs_key key;
1690        u64 old_bytenr;
1691        u64 new_bytenr;
1692        u64 old_ptr_gen;
1693        u64 new_ptr_gen;
1694        u64 last_snapshot;
1695        u32 blocksize;
1696        int cow = 0;
1697        int level;
1698        int ret;
1699        int slot;
1700
1701        BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1702        BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1703
1704        last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1705again:
1706        slot = path->slots[lowest_level];
1707        btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1708
1709        eb = btrfs_lock_root_node(dest);
1710        btrfs_set_lock_blocking(eb);
1711        level = btrfs_header_level(eb);
1712
1713        if (level < lowest_level) {
1714                btrfs_tree_unlock(eb);
1715                free_extent_buffer(eb);
1716                return 0;
1717        }
1718
1719        if (cow) {
1720                ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
1721                BUG_ON(ret);
1722        }
1723        btrfs_set_lock_blocking(eb);
1724
1725        if (next_key) {
1726                next_key->objectid = (u64)-1;
1727                next_key->type = (u8)-1;
1728                next_key->offset = (u64)-1;
1729        }
1730
1731        parent = eb;
1732        while (1) {
1733                level = btrfs_header_level(parent);
1734                BUG_ON(level < lowest_level);
1735
1736                ret = btrfs_bin_search(parent, &key, level, &slot);
1737                if (ret && slot > 0)
1738                        slot--;
1739
1740                if (next_key && slot + 1 < btrfs_header_nritems(parent))
1741                        btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1742
1743                old_bytenr = btrfs_node_blockptr(parent, slot);
1744                blocksize = btrfs_level_size(dest, level - 1);
1745                old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1746
1747                if (level <= max_level) {
1748                        eb = path->nodes[level];
1749                        new_bytenr = btrfs_node_blockptr(eb,
1750                                                        path->slots[level]);
1751                        new_ptr_gen = btrfs_node_ptr_generation(eb,
1752                                                        path->slots[level]);
1753                } else {
1754                        new_bytenr = 0;
1755                        new_ptr_gen = 0;
1756                }
1757
1758                if (new_bytenr > 0 && new_bytenr == old_bytenr) {
1759                        WARN_ON(1);
1760                        ret = level;
1761                        break;
1762                }
1763
1764                if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1765                    memcmp_node_keys(parent, slot, path, level)) {
1766                        if (level <= lowest_level) {
1767                                ret = 0;
1768                                break;
1769                        }
1770
1771                        eb = read_tree_block(dest, old_bytenr, blocksize,
1772                                             old_ptr_gen);
1773                        if (!eb || !extent_buffer_uptodate(eb)) {
1774                                ret = (!eb) ? -ENOMEM : -EIO;
1775                                free_extent_buffer(eb);
1776                                break;
1777                        }
1778                        btrfs_tree_lock(eb);
1779                        if (cow) {
1780                                ret = btrfs_cow_block(trans, dest, eb, parent,
1781                                                      slot, &eb);
1782                                BUG_ON(ret);
1783                        }
1784                        btrfs_set_lock_blocking(eb);
1785
1786                        btrfs_tree_unlock(parent);
1787                        free_extent_buffer(parent);
1788
1789                        parent = eb;
1790                        continue;
1791                }
1792
1793                if (!cow) {
1794                        btrfs_tree_unlock(parent);
1795                        free_extent_buffer(parent);
1796                        cow = 1;
1797                        goto again;
1798                }
1799
1800                btrfs_node_key_to_cpu(path->nodes[level], &key,
1801                                      path->slots[level]);
1802                btrfs_release_path(path);
1803
1804                path->lowest_level = level;
1805                ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1806                path->lowest_level = 0;
1807                BUG_ON(ret);
1808
1809                /*
1810                 * swap blocks in fs tree and reloc tree.
1811                 */
1812                btrfs_set_node_blockptr(parent, slot, new_bytenr);
1813                btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1814                btrfs_mark_buffer_dirty(parent);
1815
1816                btrfs_set_node_blockptr(path->nodes[level],
1817                                        path->slots[level], old_bytenr);
1818                btrfs_set_node_ptr_generation(path->nodes[level],
1819                                              path->slots[level], old_ptr_gen);
1820                btrfs_mark_buffer_dirty(path->nodes[level]);
1821
1822                ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
1823                                        path->nodes[level]->start,
1824                                        src->root_key.objectid, level - 1, 0,
1825                                        1);
1826                BUG_ON(ret);
1827                ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
1828                                        0, dest->root_key.objectid, level - 1,
1829                                        0, 1);
1830                BUG_ON(ret);
1831
1832                ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
1833                                        path->nodes[level]->start,
1834                                        src->root_key.objectid, level - 1, 0,
1835                                        1);
1836                BUG_ON(ret);
1837
1838                ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
1839                                        0, dest->root_key.objectid, level - 1,
1840                                        0, 1);
1841                BUG_ON(ret);
1842
1843                btrfs_unlock_up_safe(path, 0);
1844
1845                ret = level;
1846                break;
1847        }
1848        btrfs_tree_unlock(parent);
1849        free_extent_buffer(parent);
1850        return ret;
1851}
1852
1853/*
1854 * helper to find next relocated block in reloc tree
1855 */
1856static noinline_for_stack
1857int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1858                       int *level)
1859{
1860        struct extent_buffer *eb;
1861        int i;
1862        u64 last_snapshot;
1863        u32 nritems;
1864
1865        last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1866
1867        for (i = 0; i < *level; i++) {
1868                free_extent_buffer(path->nodes[i]);
1869                path->nodes[i] = NULL;
1870        }
1871
1872        for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1873                eb = path->nodes[i];
1874                nritems = btrfs_header_nritems(eb);
1875                while (path->slots[i] + 1 < nritems) {
1876                        path->slots[i]++;
1877                        if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1878                            last_snapshot)
1879                                continue;
1880
1881                        *level = i;
1882                        return 0;
1883                }
1884                free_extent_buffer(path->nodes[i]);
1885                path->nodes[i] = NULL;
1886        }
1887        return 1;
1888}
1889
1890/*
1891 * walk down reloc tree to find relocated block of lowest level
1892 */
1893static noinline_for_stack
1894int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1895                         int *level)
1896{
1897        struct extent_buffer *eb = NULL;
1898        int i;
1899        u64 bytenr;
1900        u64 ptr_gen = 0;
1901        u64 last_snapshot;
1902        u32 blocksize;
1903        u32 nritems;
1904
1905        last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1906
1907        for (i = *level; i > 0; i--) {
1908                eb = path->nodes[i];
1909                nritems = btrfs_header_nritems(eb);
1910                while (path->slots[i] < nritems) {
1911                        ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1912                        if (ptr_gen > last_snapshot)
1913                                break;
1914                        path->slots[i]++;
1915                }
1916                if (path->slots[i] >= nritems) {
1917                        if (i == *level)
1918                                break;
1919                        *level = i + 1;
1920                        return 0;
1921                }
1922                if (i == 1) {
1923                        *level = i;
1924                        return 0;
1925                }
1926
1927                bytenr = btrfs_node_blockptr(eb, path->slots[i]);
1928                blocksize = btrfs_level_size(root, i - 1);
1929                eb = read_tree_block(root, bytenr, blocksize, ptr_gen);
1930                if (!eb || !extent_buffer_uptodate(eb)) {
1931                        free_extent_buffer(eb);
1932                        return -EIO;
1933                }
1934                BUG_ON(btrfs_header_level(eb) != i - 1);
1935                path->nodes[i - 1] = eb;
1936                path->slots[i - 1] = 0;
1937        }
1938        return 1;
1939}
1940
1941/*
1942 * invalidate extent cache for file extents whose key in range of
1943 * [min_key, max_key)
1944 */
1945static int invalidate_extent_cache(struct btrfs_root *root,
1946                                   struct btrfs_key *min_key,
1947                                   struct btrfs_key *max_key)
1948{
1949        struct inode *inode = NULL;
1950        u64 objectid;
1951        u64 start, end;
1952        u64 ino;
1953
1954        objectid = min_key->objectid;
1955        while (1) {
1956                cond_resched();
1957                iput(inode);
1958
1959                if (objectid > max_key->objectid)
1960                        break;
1961
1962                inode = find_next_inode(root, objectid);
1963                if (!inode)
1964                        break;
1965                ino = btrfs_ino(inode);
1966
1967                if (ino > max_key->objectid) {
1968                        iput(inode);
1969                        break;
1970                }
1971
1972                objectid = ino + 1;
1973                if (!S_ISREG(inode->i_mode))
1974                        continue;
1975
1976                if (unlikely(min_key->objectid == ino)) {
1977                        if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1978                                continue;
1979                        if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1980                                start = 0;
1981                        else {
1982                                start = min_key->offset;
1983                                WARN_ON(!IS_ALIGNED(start, root->sectorsize));
1984                        }
1985                } else {
1986                        start = 0;
1987                }
1988
1989                if (unlikely(max_key->objectid == ino)) {
1990                        if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1991                                continue;
1992                        if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1993                                end = (u64)-1;
1994                        } else {
1995                                if (max_key->offset == 0)
1996                                        continue;
1997                                end = max_key->offset;
1998                                WARN_ON(!IS_ALIGNED(end, root->sectorsize));
1999                                end--;
2000                        }
2001                } else {
2002                        end = (u64)-1;
2003                }
2004
2005                /* the lock_extent waits for readpage to complete */
2006                lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2007                btrfs_drop_extent_cache(inode, start, end, 1);
2008                unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2009        }
2010        return 0;
2011}
2012
2013static int find_next_key(struct btrfs_path *path, int level,
2014                         struct btrfs_key *key)
2015
2016{
2017        while (level < BTRFS_MAX_LEVEL) {
2018                if (!path->nodes[level])
2019                        break;
2020                if (path->slots[level] + 1 <
2021                    btrfs_header_nritems(path->nodes[level])) {
2022                        btrfs_node_key_to_cpu(path->nodes[level], key,
2023                                              path->slots[level] + 1);
2024                        return 0;
2025                }
2026                level++;
2027        }
2028        return 1;
2029}
2030
2031/*
2032 * merge the relocated tree blocks in reloc tree with corresponding
2033 * fs tree.
2034 */
2035static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
2036                                               struct btrfs_root *root)
2037{
2038        LIST_HEAD(inode_list);
2039        struct btrfs_key key;
2040        struct btrfs_key next_key;
2041        struct btrfs_trans_handle *trans;
2042        struct btrfs_root *reloc_root;
2043        struct btrfs_root_item *root_item;
2044        struct btrfs_path *path;
2045        struct extent_buffer *leaf;
2046        int level;
2047        int max_level;
2048        int replaced = 0;
2049        int ret;
2050        int err = 0;
2051        u32 min_reserved;
2052
2053        path = btrfs_alloc_path();
2054        if (!path)
2055                return -ENOMEM;
2056        path->reada = 1;
2057
2058        reloc_root = root->reloc_root;
2059        root_item = &reloc_root->root_item;
2060
2061        if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2062                level = btrfs_root_level(root_item);
2063                extent_buffer_get(reloc_root->node);
2064                path->nodes[level] = reloc_root->node;
2065                path->slots[level] = 0;
2066        } else {
2067                btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2068
2069                level = root_item->drop_level;
2070                BUG_ON(level == 0);
2071                path->lowest_level = level;
2072                ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
2073                path->lowest_level = 0;
2074                if (ret < 0) {
2075                        btrfs_free_path(path);
2076                        return ret;
2077                }
2078
2079                btrfs_node_key_to_cpu(path->nodes[level], &next_key,
2080                                      path->slots[level]);
2081                WARN_ON(memcmp(&key, &next_key, sizeof(key)));
2082
2083                btrfs_unlock_up_safe(path, 0);
2084        }
2085
2086        min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2087        memset(&next_key, 0, sizeof(next_key));
2088
2089        while (1) {
2090                trans = btrfs_start_transaction(root, 0);
2091                BUG_ON(IS_ERR(trans));
2092                trans->block_rsv = rc->block_rsv;
2093
2094                ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
2095                                             BTRFS_RESERVE_FLUSH_ALL);
2096                if (ret) {
2097                        BUG_ON(ret != -EAGAIN);
2098                        ret = btrfs_commit_transaction(trans, root);
2099                        BUG_ON(ret);
2100                        continue;
2101                }
2102
2103                replaced = 0;
2104                max_level = level;
2105
2106                ret = walk_down_reloc_tree(reloc_root, path, &level);
2107                if (ret < 0) {
2108                        err = ret;
2109                        goto out;
2110                }
2111                if (ret > 0)
2112                        break;
2113
2114                if (!find_next_key(path, level, &key) &&
2115                    btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
2116                        ret = 0;
2117                } else {
2118                        ret = replace_path(trans, root, reloc_root, path,
2119                                           &next_key, level, max_level);
2120                }
2121                if (ret < 0) {
2122                        err = ret;
2123                        goto out;
2124                }
2125
2126                if (ret > 0) {
2127                        level = ret;
2128                        btrfs_node_key_to_cpu(path->nodes[level], &key,
2129                                              path->slots[level]);
2130                        replaced = 1;
2131                }
2132
2133                ret = walk_up_reloc_tree(reloc_root, path, &level);
2134                if (ret > 0)
2135                        break;
2136
2137                BUG_ON(level == 0);
2138                /*
2139                 * save the merging progress in the drop_progress.
2140                 * this is OK since root refs == 1 in this case.
2141                 */
2142                btrfs_node_key(path->nodes[level], &root_item->drop_progress,
2143                               path->slots[level]);
2144                root_item->drop_level = level;
2145
2146                btrfs_end_transaction_throttle(trans, root);
2147
2148                btrfs_btree_balance_dirty(root);
2149
2150                if (replaced && rc->stage == UPDATE_DATA_PTRS)
2151                        invalidate_extent_cache(root, &key, &next_key);
2152        }
2153
2154        /*
2155         * handle the case only one block in the fs tree need to be
2156         * relocated and the block is tree root.
2157         */
2158        leaf = btrfs_lock_root_node(root);
2159        ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf);
2160        btrfs_tree_unlock(leaf);
2161        free_extent_buffer(leaf);
2162        if (ret < 0)
2163                err = ret;
2164out:
2165        btrfs_free_path(path);
2166
2167        if (err == 0) {
2168                memset(&root_item->drop_progress, 0,
2169                       sizeof(root_item->drop_progress));
2170                root_item->drop_level = 0;
2171                btrfs_set_root_refs(root_item, 0);
2172                btrfs_update_reloc_root(trans, root);
2173        }
2174
2175        btrfs_end_transaction_throttle(trans, root);
2176
2177        btrfs_btree_balance_dirty(root);
2178
2179        if (replaced && rc->stage == UPDATE_DATA_PTRS)
2180                invalidate_extent_cache(root, &key, &next_key);
2181
2182        return err;
2183}
2184
2185static noinline_for_stack
2186int prepare_to_merge(struct reloc_control *rc, int err)
2187{
2188        struct btrfs_root *root = rc->extent_root;
2189        struct btrfs_root *reloc_root;
2190        struct btrfs_trans_handle *trans;
2191        LIST_HEAD(reloc_roots);
2192        u64 num_bytes = 0;
2193        int ret;
2194
2195        mutex_lock(&root->fs_info->reloc_mutex);
2196        rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2197        rc->merging_rsv_size += rc->nodes_relocated * 2;
2198        mutex_unlock(&root->fs_info->reloc_mutex);
2199
2200again:
2201        if (!err) {
2202                num_bytes = rc->merging_rsv_size;
2203                ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
2204                                          BTRFS_RESERVE_FLUSH_ALL);
2205                if (ret)
2206                        err = ret;
2207        }
2208
2209        trans = btrfs_join_transaction(rc->extent_root);
2210        if (IS_ERR(trans)) {
2211                if (!err)
2212                        btrfs_block_rsv_release(rc->extent_root,
2213                                                rc->block_rsv, num_bytes);
2214                return PTR_ERR(trans);
2215        }
2216
2217        if (!err) {
2218                if (num_bytes != rc->merging_rsv_size) {
2219                        btrfs_end_transaction(trans, rc->extent_root);
2220                        btrfs_block_rsv_release(rc->extent_root,
2221                                                rc->block_rsv, num_bytes);
2222                        goto again;
2223                }
2224        }
2225
2226        rc->merge_reloc_tree = 1;
2227
2228        while (!list_empty(&rc->reloc_roots)) {
2229                reloc_root = list_entry(rc->reloc_roots.next,
2230                                        struct btrfs_root, root_list);
2231                list_del_init(&reloc_root->root_list);
2232
2233                root = read_fs_root(reloc_root->fs_info,
2234                                    reloc_root->root_key.offset);
2235                BUG_ON(IS_ERR(root));
2236                BUG_ON(root->reloc_root != reloc_root);
2237
2238                /*
2239                 * set reference count to 1, so btrfs_recover_relocation
2240                 * knows it should resumes merging
2241                 */
2242                if (!err)
2243                        btrfs_set_root_refs(&reloc_root->root_item, 1);
2244                btrfs_update_reloc_root(trans, root);
2245
2246                list_add(&reloc_root->root_list, &reloc_roots);
2247        }
2248
2249        list_splice(&reloc_roots, &rc->reloc_roots);
2250
2251        if (!err)
2252                btrfs_commit_transaction(trans, rc->extent_root);
2253        else
2254                btrfs_end_transaction(trans, rc->extent_root);
2255        return err;
2256}
2257
2258static noinline_for_stack
2259void free_reloc_roots(struct list_head *list)
2260{
2261        struct btrfs_root *reloc_root;
2262
2263        while (!list_empty(list)) {
2264                reloc_root = list_entry(list->next, struct btrfs_root,
2265                                        root_list);
2266                __update_reloc_root(reloc_root, 1);
2267                free_extent_buffer(reloc_root->node);
2268                free_extent_buffer(reloc_root->commit_root);
2269                kfree(reloc_root);
2270        }
2271}
2272
2273static noinline_for_stack
2274int merge_reloc_roots(struct reloc_control *rc)
2275{
2276        struct btrfs_root *root;
2277        struct btrfs_root *reloc_root;
2278        LIST_HEAD(reloc_roots);
2279        int found = 0;
2280        int ret = 0;
2281again:
2282        root = rc->extent_root;
2283
2284        /*
2285         * this serializes us with btrfs_record_root_in_transaction,
2286         * we have to make sure nobody is in the middle of
2287         * adding their roots to the list while we are
2288         * doing this splice
2289         */
2290        mutex_lock(&root->fs_info->reloc_mutex);
2291        list_splice_init(&rc->reloc_roots, &reloc_roots);
2292        mutex_unlock(&root->fs_info->reloc_mutex);
2293
2294        while (!list_empty(&reloc_roots)) {
2295                found = 1;
2296                reloc_root = list_entry(reloc_roots.next,
2297                                        struct btrfs_root, root_list);
2298
2299                if (btrfs_root_refs(&reloc_root->root_item) > 0) {
2300                        root = read_fs_root(reloc_root->fs_info,
2301                                            reloc_root->root_key.offset);
2302                        BUG_ON(IS_ERR(root));
2303                        BUG_ON(root->reloc_root != reloc_root);
2304
2305                        ret = merge_reloc_root(rc, root);
2306                        if (ret)
2307                                goto out;
2308                } else {
2309                        list_del_init(&reloc_root->root_list);
2310                }
2311                ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
2312                if (ret < 0) {
2313                        if (list_empty(&reloc_root->root_list))
2314                                list_add_tail(&reloc_root->root_list,
2315                                              &reloc_roots);
2316                        goto out;
2317                }
2318        }
2319
2320        if (found) {
2321                found = 0;
2322                goto again;
2323        }
2324out:
2325        if (ret) {
2326                btrfs_std_error(root->fs_info, ret);
2327                if (!list_empty(&reloc_roots))
2328                        free_reloc_roots(&reloc_roots);
2329        }
2330
2331        BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2332        return ret;
2333}
2334
2335static void free_block_list(struct rb_root *blocks)
2336{
2337        struct tree_block *block;
2338        struct rb_node *rb_node;
2339        while ((rb_node = rb_first(blocks))) {
2340                block = rb_entry(rb_node, struct tree_block, rb_node);
2341                rb_erase(rb_node, blocks);
2342                kfree(block);
2343        }
2344}
2345
2346static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2347                                      struct btrfs_root *reloc_root)
2348{
2349        struct btrfs_root *root;
2350
2351        if (reloc_root->last_trans == trans->transid)
2352                return 0;
2353
2354        root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset);
2355        BUG_ON(IS_ERR(root));
2356        BUG_ON(root->reloc_root != reloc_root);
2357
2358        return btrfs_record_root_in_trans(trans, root);
2359}
2360
2361static noinline_for_stack
2362struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2363                                     struct reloc_control *rc,
2364                                     struct backref_node *node,
2365                                     struct backref_edge *edges[], int *nr)
2366{
2367        struct backref_node *next;
2368        struct btrfs_root *root;
2369        int index = 0;
2370
2371        next = node;
2372        while (1) {
2373                cond_resched();
2374                next = walk_up_backref(next, edges, &index);
2375                root = next->root;
2376                BUG_ON(!root);
2377                BUG_ON(!root->ref_cows);
2378
2379                if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2380                        record_reloc_root_in_trans(trans, root);
2381                        break;
2382                }
2383
2384                btrfs_record_root_in_trans(trans, root);
2385                root = root->reloc_root;
2386
2387                if (next->new_bytenr != root->node->start) {
2388                        BUG_ON(next->new_bytenr);
2389                        BUG_ON(!list_empty(&next->list));
2390                        next->new_bytenr = root->node->start;
2391                        next->root = root;
2392                        list_add_tail(&next->list,
2393                                      &rc->backref_cache.changed);
2394                        __mark_block_processed(rc, next);
2395                        break;
2396                }
2397
2398                WARN_ON(1);
2399                root = NULL;
2400                next = walk_down_backref(edges, &index);
2401                if (!next || next->level <= node->level)
2402                        break;
2403        }
2404        if (!root)
2405                return NULL;
2406
2407        *nr = index;
2408        next = node;
2409        /* setup backref node path for btrfs_reloc_cow_block */
2410        while (1) {
2411                rc->backref_cache.path[next->level] = next;
2412                if (--index < 0)
2413                        break;
2414                next = edges[index]->node[UPPER];
2415        }
2416        return root;
2417}
2418
2419/*
2420 * select a tree root for relocation. return NULL if the block
2421 * is reference counted. we should use do_relocation() in this
2422 * case. return a tree root pointer if the block isn't reference
2423 * counted. return -ENOENT if the block is root of reloc tree.
2424 */
2425static noinline_for_stack
2426struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans,
2427                                   struct backref_node *node)
2428{
2429        struct backref_node *next;
2430        struct btrfs_root *root;
2431        struct btrfs_root *fs_root = NULL;
2432        struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2433        int index = 0;
2434
2435        next = node;
2436        while (1) {
2437                cond_resched();
2438                next = walk_up_backref(next, edges, &index);
2439                root = next->root;
2440                BUG_ON(!root);
2441
2442                /* no other choice for non-references counted tree */
2443                if (!root->ref_cows)
2444                        return root;
2445
2446                if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2447                        fs_root = root;
2448
2449                if (next != node)
2450                        return NULL;
2451
2452                next = walk_down_backref(edges, &index);
2453                if (!next || next->level <= node->level)
2454                        break;
2455        }
2456
2457        if (!fs_root)
2458                return ERR_PTR(-ENOENT);
2459        return fs_root;
2460}
2461
2462static noinline_for_stack
2463u64 calcu_metadata_size(struct reloc_control *rc,
2464                        struct backref_node *node, int reserve)
2465{
2466        struct backref_node *next = node;
2467        struct backref_edge *edge;
2468        struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2469        u64 num_bytes = 0;
2470        int index = 0;
2471
2472        BUG_ON(reserve && node->processed);
2473
2474        while (next) {
2475                cond_resched();
2476                while (1) {
2477                        if (next->processed && (reserve || next != node))
2478                                break;
2479
2480                        num_bytes += btrfs_level_size(rc->extent_root,
2481                                                      next->level);
2482
2483                        if (list_empty(&next->upper))
2484                                break;
2485
2486                        edge = list_entry(next->upper.next,
2487                                          struct backref_edge, list[LOWER]);
2488                        edges[index++] = edge;
2489                        next = edge->node[UPPER];
2490                }
2491                next = walk_down_backref(edges, &index);
2492        }
2493        return num_bytes;
2494}
2495
2496static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2497                                  struct reloc_control *rc,
2498                                  struct backref_node *node)
2499{
2500        struct btrfs_root *root = rc->extent_root;
2501        u64 num_bytes;
2502        int ret;
2503
2504        num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2505
2506        trans->block_rsv = rc->block_rsv;
2507        ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
2508                                  BTRFS_RESERVE_FLUSH_ALL);
2509        if (ret) {
2510                if (ret == -EAGAIN)
2511                        rc->commit_transaction = 1;
2512                return ret;
2513        }
2514
2515        return 0;
2516}
2517
2518static void release_metadata_space(struct reloc_control *rc,
2519                                   struct backref_node *node)
2520{
2521        u64 num_bytes = calcu_metadata_size(rc, node, 0) * 2;
2522        btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, num_bytes);
2523}
2524
2525/*
2526 * relocate a block tree, and then update pointers in upper level
2527 * blocks that reference the block to point to the new location.
2528 *
2529 * if called by link_to_upper, the block has already been relocated.
2530 * in that case this function just updates pointers.
2531 */
2532static int do_relocation(struct btrfs_trans_handle *trans,
2533                         struct reloc_control *rc,
2534                         struct backref_node *node,
2535                         struct btrfs_key *key,
2536                         struct btrfs_path *path, int lowest)
2537{
2538        struct backref_node *upper;
2539        struct backref_edge *edge;
2540        struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2541        struct btrfs_root *root;
2542        struct extent_buffer *eb;
2543        u32 blocksize;
2544        u64 bytenr;
2545        u64 generation;
2546        int nr;
2547        int slot;
2548        int ret;
2549        int err = 0;
2550
2551        BUG_ON(lowest && node->eb);
2552
2553        path->lowest_level = node->level + 1;
2554        rc->backref_cache.path[node->level] = node;
2555        list_for_each_entry(edge, &node->upper, list[LOWER]) {
2556                cond_resched();
2557
2558                upper = edge->node[UPPER];
2559                root = select_reloc_root(trans, rc, upper, edges, &nr);
2560                BUG_ON(!root);
2561
2562                if (upper->eb && !upper->locked) {
2563                        if (!lowest) {
2564                                ret = btrfs_bin_search(upper->eb, key,
2565                                                       upper->level, &slot);
2566                                BUG_ON(ret);
2567                                bytenr = btrfs_node_blockptr(upper->eb, slot);
2568                                if (node->eb->start == bytenr)
2569                                        goto next;
2570                        }
2571                        drop_node_buffer(upper);
2572                }
2573
2574                if (!upper->eb) {
2575                        ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2576                        if (ret < 0) {
2577                                err = ret;
2578                                break;
2579                        }
2580                        BUG_ON(ret > 0);
2581
2582                        if (!upper->eb) {
2583                                upper->eb = path->nodes[upper->level];
2584                                path->nodes[upper->level] = NULL;
2585                        } else {
2586                                BUG_ON(upper->eb != path->nodes[upper->level]);
2587                        }
2588
2589                        upper->locked = 1;
2590                        path->locks[upper->level] = 0;
2591
2592                        slot = path->slots[upper->level];
2593                        btrfs_release_path(path);
2594                } else {
2595                        ret = btrfs_bin_search(upper->eb, key, upper->level,
2596                                               &slot);
2597                        BUG_ON(ret);
2598                }
2599
2600                bytenr = btrfs_node_blockptr(upper->eb, slot);
2601                if (lowest) {
2602                        BUG_ON(bytenr != node->bytenr);
2603                } else {
2604                        if (node->eb->start == bytenr)
2605                                goto next;
2606                }
2607
2608                blocksize = btrfs_level_size(root, node->level);
2609                generation = btrfs_node_ptr_generation(upper->eb, slot);
2610                eb = read_tree_block(root, bytenr, blocksize, generation);
2611                if (!eb || !extent_buffer_uptodate(eb)) {
2612                        free_extent_buffer(eb);
2613                        err = -EIO;
2614                        goto next;
2615                }
2616                btrfs_tree_lock(eb);
2617                btrfs_set_lock_blocking(eb);
2618
2619                if (!node->eb) {
2620                        ret = btrfs_cow_block(trans, root, eb, upper->eb,
2621                                              slot, &eb);
2622                        btrfs_tree_unlock(eb);
2623                        free_extent_buffer(eb);
2624                        if (ret < 0) {
2625                                err = ret;
2626                                goto next;
2627                        }
2628                        BUG_ON(node->eb != eb);
2629                } else {
2630                        btrfs_set_node_blockptr(upper->eb, slot,
2631                                                node->eb->start);
2632                        btrfs_set_node_ptr_generation(upper->eb, slot,
2633                                                      trans->transid);
2634                        btrfs_mark_buffer_dirty(upper->eb);
2635
2636                        ret = btrfs_inc_extent_ref(trans, root,
2637                                                node->eb->start, blocksize,
2638                                                upper->eb->start,
2639                                                btrfs_header_owner(upper->eb),
2640                                                node->level, 0, 1);
2641                        BUG_ON(ret);
2642
2643                        ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
2644                        BUG_ON(ret);
2645                }
2646next:
2647                if (!upper->pending)
2648                        drop_node_buffer(upper);
2649                else
2650                        unlock_node_buffer(upper);
2651                if (err)
2652                        break;
2653        }
2654
2655        if (!err && node->pending) {
2656                drop_node_buffer(node);
2657                list_move_tail(&node->list, &rc->backref_cache.changed);
2658                node->pending = 0;
2659        }
2660
2661        path->lowest_level = 0;
2662        BUG_ON(err == -ENOSPC);
2663        return err;
2664}
2665
2666static int link_to_upper(struct btrfs_trans_handle *trans,
2667                         struct reloc_control *rc,
2668                         struct backref_node *node,
2669                         struct btrfs_path *path)
2670{
2671        struct btrfs_key key;
2672
2673        btrfs_node_key_to_cpu(node->eb, &key, 0);
2674        return do_relocation(trans, rc, node, &key, path, 0);
2675}
2676
2677static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2678                                struct reloc_control *rc,
2679                                struct btrfs_path *path, int err)
2680{
2681        LIST_HEAD(list);
2682        struct backref_cache *cache = &rc->backref_cache;
2683        struct backref_node *node;
2684        int level;
2685        int ret;
2686
2687        for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2688                while (!list_empty(&cache->pending[level])) {
2689                        node = list_entry(cache->pending[level].next,
2690                                          struct backref_node, list);
2691                        list_move_tail(&node->list, &list);
2692                        BUG_ON(!node->pending);
2693
2694                        if (!err) {
2695                                ret = link_to_upper(trans, rc, node, path);
2696                                if (ret < 0)
2697                                        err = ret;
2698                        }
2699                }
2700                list_splice_init(&list, &cache->pending[level]);
2701        }
2702        return err;
2703}
2704
2705static void mark_block_processed(struct reloc_control *rc,
2706                                 u64 bytenr, u32 blocksize)
2707{
2708        set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
2709                        EXTENT_DIRTY, GFP_NOFS);
2710}
2711
2712static void __mark_block_processed(struct reloc_control *rc,
2713                                   struct backref_node *node)
2714{
2715        u32 blocksize;
2716        if (node->level == 0 ||
2717            in_block_group(node->bytenr, rc->block_group)) {
2718                blocksize = btrfs_level_size(rc->extent_root, node->level);
2719                mark_block_processed(rc, node->bytenr, blocksize);
2720        }
2721        node->processed = 1;
2722}
2723
2724/*
2725 * mark a block and all blocks directly/indirectly reference the block
2726 * as processed.
2727 */
2728static void update_processed_blocks(struct reloc_control *rc,
2729                                    struct backref_node *node)
2730{
2731        struct backref_node *next = node;
2732        struct backref_edge *edge;
2733        struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2734        int index = 0;
2735
2736        while (next) {
2737                cond_resched();
2738                while (1) {
2739                        if (next->processed)
2740                                break;
2741
2742                        __mark_block_processed(rc, next);
2743
2744                        if (list_empty(&next->upper))
2745                                break;
2746
2747                        edge = list_entry(next->upper.next,
2748                                          struct backref_edge, list[LOWER]);
2749                        edges[index++] = edge;
2750                        next = edge->node[UPPER];
2751                }
2752                next = walk_down_backref(edges, &index);
2753        }
2754}
2755
2756static int tree_block_processed(u64 bytenr, u32 blocksize,
2757                                struct reloc_control *rc)
2758{
2759        if (test_range_bit(&rc->processed_blocks, bytenr,
2760                           bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2761                return 1;
2762        return 0;
2763}
2764
2765static int get_tree_block_key(struct reloc_control *rc,
2766                              struct tree_block *block)
2767{
2768        struct extent_buffer *eb;
2769
2770        BUG_ON(block->key_ready);
2771        eb = read_tree_block(rc->extent_root, block->bytenr,
2772                             block->key.objectid, block->key.offset);
2773        if (!eb || !extent_buffer_uptodate(eb)) {
2774                free_extent_buffer(eb);
2775                return -EIO;
2776        }
2777        WARN_ON(btrfs_header_level(eb) != block->level);
2778        if (block->level == 0)
2779                btrfs_item_key_to_cpu(eb, &block->key, 0);
2780        else
2781                btrfs_node_key_to_cpu(eb, &block->key, 0);
2782        free_extent_buffer(eb);
2783        block->key_ready = 1;
2784        return 0;
2785}
2786
2787static int reada_tree_block(struct reloc_control *rc,
2788                            struct tree_block *block)
2789{
2790        BUG_ON(block->key_ready);
2791        if (block->key.type == BTRFS_METADATA_ITEM_KEY)
2792                readahead_tree_block(rc->extent_root, block->bytenr,
2793                                     block->key.objectid,
2794                                     rc->extent_root->leafsize);
2795        else
2796                readahead_tree_block(rc->extent_root, block->bytenr,
2797                                     block->key.objectid, block->key.offset);
2798        return 0;
2799}
2800
2801/*
2802 * helper function to relocate a tree block
2803 */
2804static int relocate_tree_block(struct btrfs_trans_handle *trans,
2805                                struct reloc_control *rc,
2806                                struct backref_node *node,
2807                                struct btrfs_key *key,
2808                                struct btrfs_path *path)
2809{
2810        struct btrfs_root *root;
2811        int release = 0;
2812        int ret = 0;
2813
2814        if (!node)
2815                return 0;
2816
2817        BUG_ON(node->processed);
2818        root = select_one_root(trans, node);
2819        if (root == ERR_PTR(-ENOENT)) {
2820                update_processed_blocks(rc, node);
2821                goto out;
2822        }
2823
2824        if (!root || root->ref_cows) {
2825                ret = reserve_metadata_space(trans, rc, node);
2826                if (ret)
2827                        goto out;
2828                release = 1;
2829        }
2830
2831        if (root) {
2832                if (root->ref_cows) {
2833                        BUG_ON(node->new_bytenr);
2834                        BUG_ON(!list_empty(&node->list));
2835                        btrfs_record_root_in_trans(trans, root);
2836                        root = root->reloc_root;
2837                        node->new_bytenr = root->node->start;
2838                        node->root = root;
2839                        list_add_tail(&node->list, &rc->backref_cache.changed);
2840                } else {
2841                        path->lowest_level = node->level;
2842                        ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2843                        btrfs_release_path(path);
2844                        if (ret > 0)
2845                                ret = 0;
2846                }
2847                if (!ret)
2848                        update_processed_blocks(rc, node);
2849        } else {
2850                ret = do_relocation(trans, rc, node, key, path, 1);
2851        }
2852out:
2853        if (ret || node->level == 0 || node->cowonly) {
2854                if (release)
2855                        release_metadata_space(rc, node);
2856                remove_backref_node(&rc->backref_cache, node);
2857        }
2858        return ret;
2859}
2860
2861/*
2862 * relocate a list of blocks
2863 */
2864static noinline_for_stack
2865int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2866                         struct reloc_control *rc, struct rb_root *blocks)
2867{
2868        struct backref_node *node;
2869        struct btrfs_path *path;
2870        struct tree_block *block;
2871        struct rb_node *rb_node;
2872        int ret;
2873        int err = 0;
2874
2875        path = btrfs_alloc_path();
2876        if (!path) {
2877                err = -ENOMEM;
2878                goto out_free_blocks;
2879        }
2880
2881        rb_node = rb_first(blocks);
2882        while (rb_node) {
2883                block = rb_entry(rb_node, struct tree_block, rb_node);
2884                if (!block->key_ready)
2885                        reada_tree_block(rc, block);
2886                rb_node = rb_next(rb_node);
2887        }
2888
2889        rb_node = rb_first(blocks);
2890        while (rb_node) {
2891                block = rb_entry(rb_node, struct tree_block, rb_node);
2892                if (!block->key_ready) {
2893                        err = get_tree_block_key(rc, block);
2894                        if (err)
2895                                goto out_free_path;
2896                }
2897                rb_node = rb_next(rb_node);
2898        }
2899
2900        rb_node = rb_first(blocks);
2901        while (rb_node) {
2902                block = rb_entry(rb_node, struct tree_block, rb_node);
2903
2904                node = build_backref_tree(rc, &block->key,
2905                                          block->level, block->bytenr);
2906                if (IS_ERR(node)) {
2907                        err = PTR_ERR(node);
2908                        goto out;
2909                }
2910
2911                ret = relocate_tree_block(trans, rc, node, &block->key,
2912                                          path);
2913                if (ret < 0) {
2914                        if (ret != -EAGAIN || rb_node == rb_first(blocks))
2915                                err = ret;
2916                        goto out;
2917                }
2918                rb_node = rb_next(rb_node);
2919        }
2920out:
2921        err = finish_pending_nodes(trans, rc, path, err);
2922
2923out_free_path:
2924        btrfs_free_path(path);
2925out_free_blocks:
2926        free_block_list(blocks);
2927        return err;
2928}
2929
2930static noinline_for_stack
2931int prealloc_file_extent_cluster(struct inode *inode,
2932                                 struct file_extent_cluster *cluster)
2933{
2934        u64 alloc_hint = 0;
2935        u64 start;
2936        u64 end;
2937        u64 offset = BTRFS_I(inode)->index_cnt;
2938        u64 num_bytes;
2939        int nr = 0;
2940        int ret = 0;
2941
2942        BUG_ON(cluster->start != cluster->boundary[0]);
2943        mutex_lock(&inode->i_mutex);
2944
2945        ret = btrfs_check_data_free_space(inode, cluster->end +
2946                                          1 - cluster->start);
2947        if (ret)
2948                goto out;
2949
2950        while (nr < cluster->nr) {
2951                start = cluster->boundary[nr] - offset;
2952                if (nr + 1 < cluster->nr)
2953                        end = cluster->boundary[nr + 1] - 1 - offset;
2954                else
2955                        end = cluster->end - offset;
2956
2957                lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2958                num_bytes = end + 1 - start;
2959                ret = btrfs_prealloc_file_range(inode, 0, start,
2960                                                num_bytes, num_bytes,
2961                                                end + 1, &alloc_hint);
2962                unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2963                if (ret)
2964                        break;
2965                nr++;
2966        }
2967        btrfs_free_reserved_data_space(inode, cluster->end +
2968                                       1 - cluster->start);
2969out:
2970        mutex_unlock(&inode->i_mutex);
2971        return ret;
2972}
2973
2974static noinline_for_stack
2975int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
2976                         u64 block_start)
2977{
2978        struct btrfs_root *root = BTRFS_I(inode)->root;
2979        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2980        struct extent_map *em;
2981        int ret = 0;
2982
2983        em = alloc_extent_map();
2984        if (!em)
2985                return -ENOMEM;
2986
2987        em->start = start;
2988        em->len = end + 1 - start;
2989        em->block_len = em->len;
2990        em->block_start = block_start;
2991        em->bdev = root->fs_info->fs_devices->latest_bdev;
2992        set_bit(EXTENT_FLAG_PINNED, &em->flags);
2993
2994        lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2995        while (1) {
2996                write_lock(&em_tree->lock);
2997                ret = add_extent_mapping(em_tree, em, 0);
2998                write_unlock(&em_tree->lock);
2999                if (ret != -EEXIST) {
3000                        free_extent_map(em);
3001                        break;
3002                }
3003                btrfs_drop_extent_cache(inode, start, end, 0);
3004        }
3005        unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3006        return ret;
3007}
3008
3009static int relocate_file_extent_cluster(struct inode *inode,
3010                                        struct file_extent_cluster *cluster)
3011{
3012        u64 page_start;
3013        u64 page_end;
3014        u64 offset = BTRFS_I(inode)->index_cnt;
3015        unsigned long index;
3016        unsigned long last_index;
3017        struct page *page;
3018        struct file_ra_state *ra;
3019        gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
3020        int nr = 0;
3021        int ret = 0;
3022
3023        if (!cluster->nr)
3024                return 0;
3025
3026        ra = kzalloc(sizeof(*ra), GFP_NOFS);
3027        if (!ra)
3028                return -ENOMEM;
3029
3030        ret = prealloc_file_extent_cluster(inode, cluster);
3031        if (ret)
3032                goto out;
3033
3034        file_ra_state_init(ra, inode->i_mapping);
3035
3036        ret = setup_extent_mapping(inode, cluster->start - offset,
3037                                   cluster->end - offset, cluster->start);
3038        if (ret)
3039                goto out;
3040
3041        index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
3042        last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
3043        while (index <= last_index) {
3044                ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
3045                if (ret)
3046                        goto out;
3047
3048                page = find_lock_page(inode->i_mapping, index);
3049                if (!page) {
3050                        page_cache_sync_readahead(inode->i_mapping,
3051                                                  ra, NULL, index,
3052                                                  last_index + 1 - index);
3053                        page = find_or_create_page(inode->i_mapping, index,
3054                                                   mask);
3055                        if (!page) {
3056                                btrfs_delalloc_release_metadata(inode,
3057                                                        PAGE_CACHE_SIZE);
3058                                ret = -ENOMEM;
3059                                goto out;
3060                        }
3061                }
3062
3063                if (PageReadahead(page)) {
3064                        page_cache_async_readahead(inode->i_mapping,
3065                                                   ra, NULL, page, index,
3066                                                   last_index + 1 - index);
3067                }
3068
3069                if (!PageUptodate(page)) {
3070                        btrfs_readpage(NULL, page);
3071                        lock_page(page);
3072                        if (!PageUptodate(page)) {
3073                                unlock_page(page);
3074                                page_cache_release(page);
3075                                btrfs_delalloc_release_metadata(inode,
3076                                                        PAGE_CACHE_SIZE);
3077                                ret = -EIO;
3078                                goto out;
3079                        }
3080                }
3081
3082                page_start = page_offset(page);
3083                page_end = page_start + PAGE_CACHE_SIZE - 1;
3084
3085                lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
3086
3087                set_page_extent_mapped(page);
3088
3089                if (nr < cluster->nr &&
3090                    page_start + offset == cluster->boundary[nr]) {
3091                        set_extent_bits(&BTRFS_I(inode)->io_tree,
3092                                        page_start, page_end,
3093                                        EXTENT_BOUNDARY, GFP_NOFS);
3094                        nr++;
3095                }
3096
3097                btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
3098                set_page_dirty(page);
3099
3100                unlock_extent(&BTRFS_I(inode)->io_tree,
3101                              page_start, page_end);
3102                unlock_page(page);
3103                page_cache_release(page);
3104
3105                index++;
3106                balance_dirty_pages_ratelimited(inode->i_mapping);
3107                btrfs_throttle(BTRFS_I(inode)->root);
3108        }
3109        WARN_ON(nr != cluster->nr);
3110out:
3111        kfree(ra);
3112        return ret;
3113}
3114
3115static noinline_for_stack
3116int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3117                         struct file_extent_cluster *cluster)
3118{
3119        int ret;
3120
3121        if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3122                ret = relocate_file_extent_cluster(inode, cluster);
3123                if (ret)
3124                        return ret;
3125                cluster->nr = 0;
3126        }
3127
3128        if (!cluster->nr)
3129                cluster->start = extent_key->objectid;
3130        else
3131                BUG_ON(cluster->nr >= MAX_EXTENTS);
3132        cluster->end = extent_key->objectid + extent_key->offset - 1;
3133        cluster->boundary[cluster->nr] = extent_key->objectid;
3134        cluster->nr++;
3135
3136        if (cluster->nr >= MAX_EXTENTS) {
3137                ret = relocate_file_extent_cluster(inode, cluster);
3138                if (ret)
3139                        return ret;
3140                cluster->nr = 0;
3141        }
3142        return 0;
3143}
3144
3145#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3146static int get_ref_objectid_v0(struct reloc_control *rc,
3147                               struct btrfs_path *path,
3148                               struct btrfs_key *extent_key,
3149                               u64 *ref_objectid, int *path_change)
3150{
3151        struct btrfs_key key;
3152        struct extent_buffer *leaf;
3153        struct btrfs_extent_ref_v0 *ref0;
3154        int ret;
3155        int slot;
3156
3157        leaf = path->nodes[0];
3158        slot = path->slots[0];
3159        while (1) {
3160                if (slot >= btrfs_header_nritems(leaf)) {
3161                        ret = btrfs_next_leaf(rc->extent_root, path);
3162                        if (ret < 0)
3163                                return ret;
3164                        BUG_ON(ret > 0);
3165                        leaf = path->nodes[0];
3166                        slot = path->slots[0];
3167                        if (path_change)
3168                                *path_change = 1;
3169                }
3170                btrfs_item_key_to_cpu(leaf, &key, slot);
3171                if (key.objectid != extent_key->objectid)
3172                        return -ENOENT;
3173
3174                if (key.type != BTRFS_EXTENT_REF_V0_KEY) {
3175                        slot++;
3176                        continue;
3177                }
3178                ref0 = btrfs_item_ptr(leaf, slot,
3179                                struct btrfs_extent_ref_v0);
3180                *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0);
3181                break;
3182        }
3183        return 0;
3184}
3185#endif
3186
3187/*
3188 * helper to add a tree block to the list.
3189 * the major work is getting the generation and level of the block
3190 */
3191static int add_tree_block(struct reloc_control *rc,
3192                          struct btrfs_key *extent_key,
3193                          struct btrfs_path *path,
3194                          struct rb_root *blocks)
3195{
3196        struct extent_buffer *eb;
3197        struct btrfs_extent_item *ei;
3198        struct btrfs_tree_block_info *bi;
3199        struct tree_block *block;
3200        struct rb_node *rb_node;
3201        u32 item_size;
3202        int level = -1;
3203        int generation;
3204
3205        eb =  path->nodes[0];
3206        item_size = btrfs_item_size_nr(eb, path->slots[0]);
3207
3208        if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3209            item_size >= sizeof(*ei) + sizeof(*bi)) {
3210                ei = btrfs_item_ptr(eb, path->slots[0],
3211                                struct btrfs_extent_item);
3212                if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3213                        bi = (struct btrfs_tree_block_info *)(ei + 1);
3214                        level = btrfs_tree_block_level(eb, bi);
3215                } else {
3216                        level = (int)extent_key->offset;
3217                }
3218                generation = btrfs_extent_generation(eb, ei);
3219        } else {
3220#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3221                u64 ref_owner;
3222                int ret;
3223
3224                BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3225                ret = get_ref_objectid_v0(rc, path, extent_key,
3226                                          &ref_owner, NULL);
3227                if (ret < 0)
3228                        return ret;
3229                BUG_ON(ref_owner >= BTRFS_MAX_LEVEL);
3230                level = (int)ref_owner;
3231                /* FIXME: get real generation */
3232                generation = 0;
3233#else
3234                BUG();
3235#endif
3236        }
3237
3238        btrfs_release_path(path);
3239
3240        BUG_ON(level == -1);
3241
3242        block = kmalloc(sizeof(*block), GFP_NOFS);
3243        if (!block)
3244                return -ENOMEM;
3245
3246        block->bytenr = extent_key->objectid;
3247        block->key.objectid = rc->extent_root->leafsize;
3248        block->key.offset = generation;
3249        block->level = level;
3250        block->key_ready = 0;
3251
3252        rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
3253        if (rb_node)
3254                backref_tree_panic(rb_node, -EEXIST, block->bytenr);
3255
3256        return 0;
3257}
3258
3259/*
3260 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3261 */
3262static int __add_tree_block(struct reloc_control *rc,
3263                            u64 bytenr, u32 blocksize,
3264                            struct rb_root *blocks)
3265{
3266        struct btrfs_path *path;
3267        struct btrfs_key key;
3268        int ret;
3269
3270        if (tree_block_processed(bytenr, blocksize, rc))
3271                return 0;
3272
3273        if (tree_search(blocks, bytenr))
3274                return 0;
3275
3276        path = btrfs_alloc_path();
3277        if (!path)
3278                return -ENOMEM;
3279
3280        key.objectid = bytenr;
3281        key.type = BTRFS_EXTENT_ITEM_KEY;
3282        key.offset = blocksize;
3283
3284        path->search_commit_root = 1;
3285        path->skip_locking = 1;
3286        ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3287        if (ret < 0)
3288                goto out;
3289
3290        btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3291        if (ret > 0) {
3292                if (key.objectid == bytenr &&
3293                    key.type == BTRFS_METADATA_ITEM_KEY)
3294                        ret = 0;
3295        }
3296        BUG_ON(ret);
3297
3298        ret = add_tree_block(rc, &key, path, blocks);
3299out:
3300        btrfs_free_path(path);
3301        return ret;
3302}
3303
3304/*
3305 * helper to check if the block use full backrefs for pointers in it
3306 */
3307static int block_use_full_backref(struct reloc_control *rc,
3308                                  struct extent_buffer *eb)
3309{
3310        u64 flags;
3311        int ret;
3312
3313        if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) ||
3314            btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
3315                return 1;
3316
3317        ret = btrfs_lookup_extent_info(NULL, rc->extent_root,
3318                                       eb->start, btrfs_header_level(eb), 1,
3319                                       NULL, &flags);
3320        BUG_ON(ret);
3321
3322        if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
3323                ret = 1;
3324        else
3325                ret = 0;
3326        return ret;
3327}
3328
3329static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3330                                    struct inode *inode, u64 ino)
3331{
3332        struct btrfs_key key;
3333        struct btrfs_path *path;
3334        struct btrfs_root *root = fs_info->tree_root;
3335        struct btrfs_trans_handle *trans;
3336        int ret = 0;
3337
3338        if (inode)
3339                goto truncate;
3340
3341        key.objectid = ino;
3342        key.type = BTRFS_INODE_ITEM_KEY;
3343        key.offset = 0;
3344
3345        inode = btrfs_iget(fs_info->sb, &key, root, NULL);
3346        if (IS_ERR(inode) || is_bad_inode(inode)) {
3347                if (!IS_ERR(inode))
3348                        iput(inode);
3349                return -ENOENT;
3350        }
3351
3352truncate:
3353        ret = btrfs_check_trunc_cache_free_space(root,
3354                                                 &fs_info->global_block_rsv);
3355        if (ret)
3356                goto out;
3357
3358        path = btrfs_alloc_path();
3359        if (!path) {
3360                ret = -ENOMEM;
3361                goto out;
3362        }
3363
3364        trans = btrfs_join_transaction(root);
3365        if (IS_ERR(trans)) {
3366                btrfs_free_path(path);
3367                ret = PTR_ERR(trans);
3368                goto out;
3369        }
3370
3371        ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
3372
3373        btrfs_free_path(path);
3374        btrfs_end_transaction(trans, root);
3375        btrfs_btree_balance_dirty(root);
3376out:
3377        iput(inode);
3378        return ret;
3379}
3380
3381/*
3382 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY
3383 * this function scans fs tree to find blocks reference the data extent
3384 */
3385static int find_data_references(struct reloc_control *rc,
3386                                struct btrfs_key *extent_key,
3387                                struct extent_buffer *leaf,
3388                                struct btrfs_extent_data_ref *ref,
3389                                struct rb_root *blocks)
3390{
3391        struct btrfs_path *path;
3392        struct tree_block *block;
3393        struct btrfs_root *root;
3394        struct btrfs_file_extent_item *fi;
3395        struct rb_node *rb_node;
3396        struct btrfs_key key;
3397        u64 ref_root;
3398        u64 ref_objectid;
3399        u64 ref_offset;
3400        u32 ref_count;
3401        u32 nritems;
3402        int err = 0;
3403        int added = 0;
3404        int counted;
3405        int ret;
3406
3407        ref_root = btrfs_extent_data_ref_root(leaf, ref);
3408        ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref);
3409        ref_offset = btrfs_extent_data_ref_offset(leaf, ref);
3410        ref_count = btrfs_extent_data_ref_count(leaf, ref);
3411
3412        /*
3413         * This is an extent belonging to the free space cache, lets just delete
3414         * it and redo the search.
3415         */
3416        if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
3417                ret = delete_block_group_cache(rc->extent_root->fs_info,
3418                                               NULL, ref_objectid);
3419                if (ret != -ENOENT)
3420                        return ret;
3421                ret = 0;
3422        }
3423
3424        path = btrfs_alloc_path();
3425        if (!path)
3426                return -ENOMEM;
3427        path->reada = 1;
3428
3429        root = read_fs_root(rc->extent_root->fs_info, ref_root);
3430        if (IS_ERR(root)) {
3431                err = PTR_ERR(root);
3432                goto out;
3433        }
3434
3435        key.objectid = ref_objectid;
3436        key.type = BTRFS_EXTENT_DATA_KEY;
3437        if (ref_offset > ((u64)-1 << 32))
3438                key.offset = 0;
3439        else
3440                key.offset = ref_offset;
3441
3442        path->search_commit_root = 1;
3443        path->skip_locking = 1;
3444        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3445        if (ret < 0) {
3446                err = ret;
3447                goto out;
3448        }
3449
3450        leaf = path->nodes[0];
3451        nritems = btrfs_header_nritems(leaf);
3452        /*
3453         * the references in tree blocks that use full backrefs
3454         * are not counted in
3455         */
3456        if (block_use_full_backref(rc, leaf))
3457                counted = 0;
3458        else
3459                counted = 1;
3460        rb_node = tree_search(blocks, leaf->start);
3461        if (rb_node) {
3462                if (counted)
3463                        added = 1;
3464                else
3465                        path->slots[0] = nritems;
3466        }
3467
3468        while (ref_count > 0) {
3469                while (path->slots[0] >= nritems) {
3470                        ret = btrfs_next_leaf(root, path);
3471                        if (ret < 0) {
3472                                err = ret;
3473                                goto out;
3474                        }
3475                        if (ret > 0) {
3476                                WARN_ON(1);
3477                                goto out;
3478                        }
3479
3480                        leaf = path->nodes[0];
3481                        nritems = btrfs_header_nritems(leaf);
3482                        added = 0;
3483
3484                        if (block_use_full_backref(rc, leaf))
3485                                counted = 0;
3486                        else
3487                                counted = 1;
3488                        rb_node = tree_search(blocks, leaf->start);
3489                        if (rb_node) {
3490                                if (counted)
3491                                        added = 1;
3492                                else
3493                                        path->slots[0] = nritems;
3494                        }
3495                }
3496
3497                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3498                if (key.objectid != ref_objectid ||
3499                    key.type != BTRFS_EXTENT_DATA_KEY) {
3500                        WARN_ON(1);
3501                        break;
3502                }
3503
3504                fi = btrfs_item_ptr(leaf, path->slots[0],
3505                                    struct btrfs_file_extent_item);
3506
3507                if (btrfs_file_extent_type(leaf, fi) ==
3508                    BTRFS_FILE_EXTENT_INLINE)
3509                        goto next;
3510
3511                if (btrfs_file_extent_disk_bytenr(leaf, fi) !=
3512                    extent_key->objectid)
3513                        goto next;
3514
3515                key.offset -= btrfs_file_extent_offset(leaf, fi);
3516                if (key.offset != ref_offset)
3517                        goto next;
3518
3519                if (counted)
3520                        ref_count--;
3521                if (added)
3522                        goto next;
3523
3524                if (!tree_block_processed(leaf->start, leaf->len, rc)) {
3525                        block = kmalloc(sizeof(*block), GFP_NOFS);
3526                        if (!block) {
3527                                err = -ENOMEM;
3528                                break;
3529                        }
3530                        block->bytenr = leaf->start;
3531                        btrfs_item_key_to_cpu(leaf, &block->key, 0);
3532                        block->level = 0;
3533                        block->key_ready = 1;
3534                        rb_node = tree_insert(blocks, block->bytenr,
3535                                              &block->rb_node);
3536                        if (rb_node)
3537                                backref_tree_panic(rb_node, -EEXIST,
3538                                                   block->bytenr);
3539                }
3540                if (counted)
3541                        added = 1;
3542                else
3543                        path->slots[0] = nritems;
3544next:
3545                path->slots[0]++;
3546
3547        }
3548out:
3549        btrfs_free_path(path);
3550        return err;
3551}
3552
3553/*
3554 * helper to find all tree blocks that reference a given data extent
3555 */
3556static noinline_for_stack
3557int add_data_references(struct reloc_control *rc,
3558                        struct btrfs_key *extent_key,
3559                        struct btrfs_path *path,
3560                        struct rb_root *blocks)
3561{
3562        struct btrfs_key key;
3563        struct extent_buffer *eb;
3564        struct btrfs_extent_data_ref *dref;
3565        struct btrfs_extent_inline_ref *iref;
3566        unsigned long ptr;
3567        unsigned long end;
3568        u32 blocksize = btrfs_level_size(rc->extent_root, 0);
3569        int ret;
3570        int err = 0;
3571
3572        eb = path->nodes[0];
3573        ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
3574        end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
3575#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3576        if (ptr + sizeof(struct btrfs_extent_item_v0) == end)
3577                ptr = end;
3578        else
3579#endif
3580                ptr += sizeof(struct btrfs_extent_item);
3581
3582        while (ptr < end) {
3583                iref = (struct btrfs_extent_inline_ref *)ptr;
3584                key.type = btrfs_extent_inline_ref_type(eb, iref);
3585                if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
3586                        key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3587                        ret = __add_tree_block(rc, key.offset, blocksize,
3588                                               blocks);
3589                } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
3590                        dref = (struct btrfs_extent_data_ref *)(&iref->offset);
3591                        ret = find_data_references(rc, extent_key,
3592                                                   eb, dref, blocks);
3593                } else {
3594                        BUG();
3595                }
3596                ptr += btrfs_extent_inline_ref_size(key.type);
3597        }
3598        WARN_ON(ptr > end);
3599
3600        while (1) {
3601                cond_resched();
3602                eb = path->nodes[0];
3603                if (path->slots[0] >= btrfs_header_nritems(eb)) {
3604                        ret = btrfs_next_leaf(rc->extent_root, path);
3605                        if (ret < 0) {
3606                                err = ret;
3607                                break;
3608                        }
3609                        if (ret > 0)
3610                                break;
3611                        eb = path->nodes[0];
3612                }
3613
3614                btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
3615                if (key.objectid != extent_key->objectid)
3616                        break;
3617
3618#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3619                if (key.type == BTRFS_SHARED_DATA_REF_KEY ||
3620                    key.type == BTRFS_EXTENT_REF_V0_KEY) {
3621#else
3622                BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
3623                if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
3624#endif
3625                        ret = __add_tree_block(rc, key.offset, blocksize,
3626                                               blocks);
3627                } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
3628                        dref = btrfs_item_ptr(eb, path->slots[0],
3629                                              struct btrfs_extent_data_ref);
3630                        ret = find_data_references(rc, extent_key,
3631                                                   eb, dref, blocks);
3632                } else {
3633                        ret = 0;
3634                }
3635                if (ret) {
3636                        err = ret;
3637                        break;
3638                }
3639                path->slots[0]++;
3640        }
3641        btrfs_release_path(path);
3642        if (err)
3643                free_block_list(blocks);
3644        return err;
3645}
3646
3647/*
3648 * helper to find next unprocessed extent
3649 */
3650static noinline_for_stack
3651int find_next_extent(struct btrfs_trans_handle *trans,
3652                     struct reloc_control *rc, struct btrfs_path *path,
3653                     struct btrfs_key *extent_key)
3654{
3655        struct btrfs_key key;
3656        struct extent_buffer *leaf;
3657        u64 start, end, last;
3658        int ret;
3659
3660        last = rc->block_group->key.objectid + rc->block_group->key.offset;
3661        while (1) {
3662                cond_resched();
3663                if (rc->search_start >= last) {
3664                        ret = 1;
3665                        break;
3666                }
3667
3668                key.objectid = rc->search_start;
3669                key.type = BTRFS_EXTENT_ITEM_KEY;
3670                key.offset = 0;
3671
3672                path->search_commit_root = 1;
3673                path->skip_locking = 1;
3674                ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3675                                        0, 0);
3676                if (ret < 0)
3677                        break;
3678next:
3679                leaf = path->nodes[0];
3680                if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3681                        ret = btrfs_next_leaf(rc->extent_root, path);
3682                        if (ret != 0)
3683                                break;
3684                        leaf = path->nodes[0];
3685                }
3686
3687                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3688                if (key.objectid >= last) {
3689                        ret = 1;
3690                        break;
3691                }
3692
3693                if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3694                    key.type != BTRFS_METADATA_ITEM_KEY) {
3695                        path->slots[0]++;
3696                        goto next;
3697                }
3698
3699                if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3700                    key.objectid + key.offset <= rc->search_start) {
3701                        path->slots[0]++;
3702                        goto next;
3703                }
3704
3705                if (key.type == BTRFS_METADATA_ITEM_KEY &&
3706                    key.objectid + rc->extent_root->leafsize <=
3707                    rc->search_start) {
3708                        path->slots[0]++;
3709                        goto next;
3710                }
3711
3712                ret = find_first_extent_bit(&rc->processed_blocks,
3713                                            key.objectid, &start, &end,
3714                                            EXTENT_DIRTY, NULL);
3715
3716                if (ret == 0 && start <= key.objectid) {
3717                        btrfs_release_path(path);
3718                        rc->search_start = end + 1;
3719                } else {
3720                        if (key.type == BTRFS_EXTENT_ITEM_KEY)
3721                                rc->search_start = key.objectid + key.offset;
3722                        else
3723                                rc->search_start = key.objectid +
3724                                        rc->extent_root->leafsize;
3725                        memcpy(extent_key, &key, sizeof(key));
3726                        return 0;
3727                }
3728        }
3729        btrfs_release_path(path);
3730        return ret;
3731}
3732
3733static void set_reloc_control(struct reloc_control *rc)
3734{
3735        struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3736
3737        mutex_lock(&fs_info->reloc_mutex);
3738        fs_info->reloc_ctl = rc;
3739        mutex_unlock(&fs_info->reloc_mutex);
3740}
3741
3742static void unset_reloc_control(struct reloc_control *rc)
3743{
3744        struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3745
3746        mutex_lock(&fs_info->reloc_mutex);
3747        fs_info->reloc_ctl = NULL;
3748        mutex_unlock(&fs_info->reloc_mutex);
3749}
3750
3751static int check_extent_flags(u64 flags)
3752{
3753        if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3754            (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3755                return 1;
3756        if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
3757            !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3758                return 1;
3759        if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3760            (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
3761                return 1;
3762        return 0;
3763}
3764
3765static noinline_for_stack
3766int prepare_to_relocate(struct reloc_control *rc)
3767{
3768        struct btrfs_trans_handle *trans;
3769        int ret;
3770
3771        rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root,
3772                                              BTRFS_BLOCK_RSV_TEMP);
3773        if (!rc->block_rsv)
3774                return -ENOMEM;
3775
3776        /*
3777         * reserve some space for creating reloc trees.
3778         * btrfs_init_reloc_root will use them when there
3779         * is no reservation in transaction handle.
3780         */
3781        ret = btrfs_block_rsv_add(rc->extent_root, rc->block_rsv,
3782                                  rc->extent_root->nodesize * 256,
3783                                  BTRFS_RESERVE_FLUSH_ALL);
3784        if (ret)
3785                return ret;
3786
3787        memset(&rc->cluster, 0, sizeof(rc->cluster));
3788        rc->search_start = rc->block_group->key.objectid;
3789        rc->extents_found = 0;
3790        rc->nodes_relocated = 0;
3791        rc->merging_rsv_size = 0;
3792
3793        rc->create_reloc_tree = 1;
3794        set_reloc_control(rc);
3795
3796        trans = btrfs_join_transaction(rc->extent_root);
3797        if (IS_ERR(trans)) {
3798                unset_reloc_control(rc);
3799                /*
3800                 * extent tree is not a ref_cow tree and has no reloc_root to
3801                 * cleanup.  And callers are responsible to free the above
3802                 * block rsv.
3803                 */
3804                return PTR_ERR(trans);
3805        }
3806        btrfs_commit_transaction(trans, rc->extent_root);
3807        return 0;
3808}
3809
3810static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3811{
3812        struct rb_root blocks = RB_ROOT;
3813        struct btrfs_key key;
3814        struct btrfs_trans_handle *trans = NULL;
3815        struct btrfs_path *path;
3816        struct btrfs_extent_item *ei;
3817        u64 flags;
3818        u32 item_size;
3819        int ret;
3820        int err = 0;
3821        int progress = 0;
3822
3823        path = btrfs_alloc_path();
3824        if (!path)
3825                return -ENOMEM;
3826        path->reada = 1;
3827
3828        ret = prepare_to_relocate(rc);
3829        if (ret) {
3830                err = ret;
3831                goto out_free;
3832        }
3833
3834        while (1) {
3835                progress++;
3836                trans = btrfs_start_transaction(rc->extent_root, 0);
3837                if (IS_ERR(trans)) {
3838                        err = PTR_ERR(trans);
3839                        trans = NULL;
3840                        break;
3841                }
3842restart:
3843                if (update_backref_cache(trans, &rc->backref_cache)) {
3844                        btrfs_end_transaction(trans, rc->extent_root);
3845                        continue;
3846                }
3847
3848                ret = find_next_extent(trans, rc, path, &key);
3849                if (ret < 0)
3850                        err = ret;
3851                if (ret != 0)
3852                        break;
3853
3854                rc->extents_found++;
3855
3856                ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3857                                    struct btrfs_extent_item);
3858                item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
3859                if (item_size >= sizeof(*ei)) {
3860                        flags = btrfs_extent_flags(path->nodes[0], ei);
3861                        ret = check_extent_flags(flags);
3862                        BUG_ON(ret);
3863
3864                } else {
3865#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3866                        u64 ref_owner;
3867                        int path_change = 0;
3868
3869                        BUG_ON(item_size !=
3870                               sizeof(struct btrfs_extent_item_v0));
3871                        ret = get_ref_objectid_v0(rc, path, &key, &ref_owner,
3872                                                  &path_change);
3873                        if (ref_owner < BTRFS_FIRST_FREE_OBJECTID)
3874                                flags = BTRFS_EXTENT_FLAG_TREE_BLOCK;
3875                        else
3876                                flags = BTRFS_EXTENT_FLAG_DATA;
3877
3878                        if (path_change) {
3879                                btrfs_release_path(path);
3880
3881                                path->search_commit_root = 1;
3882                                path->skip_locking = 1;
3883                                ret = btrfs_search_slot(NULL, rc->extent_root,
3884                                                        &key, path, 0, 0);
3885                                if (ret < 0) {
3886                                        err = ret;
3887                                        break;
3888                                }
3889                                BUG_ON(ret > 0);
3890                        }
3891#else
3892                        BUG();
3893#endif
3894                }
3895
3896                if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3897                        ret = add_tree_block(rc, &key, path, &blocks);
3898                } else if (rc->stage == UPDATE_DATA_PTRS &&
3899                           (flags & BTRFS_EXTENT_FLAG_DATA)) {
3900                        ret = add_data_references(rc, &key, path, &blocks);
3901                } else {
3902                        btrfs_release_path(path);
3903                        ret = 0;
3904                }
3905                if (ret < 0) {
3906                        err = ret;
3907                        break;
3908                }
3909
3910                if (!RB_EMPTY_ROOT(&blocks)) {
3911                        ret = relocate_tree_blocks(trans, rc, &blocks);
3912                        if (ret < 0) {
3913                                if (ret != -EAGAIN) {
3914                                        err = ret;
3915                                        break;
3916                                }
3917                                rc->extents_found--;
3918                                rc->search_start = key.objectid;
3919                        }
3920                }
3921
3922                ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 5);
3923                if (ret < 0) {
3924                        if (ret != -ENOSPC) {
3925                                err = ret;
3926                                WARN_ON(1);
3927                                break;
3928                        }
3929                        rc->commit_transaction = 1;
3930                }
3931
3932                if (rc->commit_transaction) {
3933                        rc->commit_transaction = 0;
3934                        ret = btrfs_commit_transaction(trans, rc->extent_root);
3935                        BUG_ON(ret);
3936                } else {
3937                        btrfs_end_transaction_throttle(trans, rc->extent_root);
3938                        btrfs_btree_balance_dirty(rc->extent_root);
3939                }
3940                trans = NULL;
3941
3942                if (rc->stage == MOVE_DATA_EXTENTS &&
3943                    (flags & BTRFS_EXTENT_FLAG_DATA)) {
3944                        rc->found_file_extent = 1;
3945                        ret = relocate_data_extent(rc->data_inode,
3946                                                   &key, &rc->cluster);
3947                        if (ret < 0) {
3948                                err = ret;
3949                                break;
3950                        }
3951                }
3952        }
3953        if (trans && progress && err == -ENOSPC) {
3954                ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
3955                                              rc->block_group->flags);
3956                if (ret == 0) {
3957                        err = 0;
3958                        progress = 0;
3959                        goto restart;
3960                }
3961        }
3962
3963        btrfs_release_path(path);
3964        clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
3965                          GFP_NOFS);
3966
3967        if (trans) {
3968                btrfs_end_transaction_throttle(trans, rc->extent_root);
3969                btrfs_btree_balance_dirty(rc->extent_root);
3970        }
3971
3972        if (!err) {
3973                ret = relocate_file_extent_cluster(rc->data_inode,
3974                                                   &rc->cluster);
3975                if (ret < 0)
3976                        err = ret;
3977        }
3978
3979        rc->create_reloc_tree = 0;
3980        set_reloc_control(rc);
3981
3982        backref_cache_cleanup(&rc->backref_cache);
3983        btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
3984
3985        err = prepare_to_merge(rc, err);
3986
3987        merge_reloc_roots(rc);
3988
3989        rc->merge_reloc_tree = 0;
3990        unset_reloc_control(rc);
3991        btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
3992
3993        /* get rid of pinned extents */
3994        trans = btrfs_join_transaction(rc->extent_root);
3995        if (IS_ERR(trans))
3996                err = PTR_ERR(trans);
3997        else
3998                btrfs_commit_transaction(trans, rc->extent_root);
3999out_free:
4000        btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
4001        btrfs_free_path(path);
4002        return err;
4003}
4004
4005static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
4006                                 struct btrfs_root *root, u64 objectid)
4007{
4008        struct btrfs_path *path;
4009        struct btrfs_inode_item *item;
4010        struct extent_buffer *leaf;
4011        int ret;
4012
4013        path = btrfs_alloc_path();
4014        if (!path)
4015                return -ENOMEM;
4016
4017        ret = btrfs_insert_empty_inode(trans, root, path, objectid);
4018        if (ret)
4019                goto out;
4020
4021        leaf = path->nodes[0];
4022        item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
4023        memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
4024        btrfs_set_inode_generation(leaf, item, 1);
4025        btrfs_set_inode_size(leaf, item, 0);
4026        btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
4027        btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
4028                                          BTRFS_INODE_PREALLOC);
4029        btrfs_mark_buffer_dirty(leaf);
4030        btrfs_release_path(path);
4031out:
4032        btrfs_free_path(path);
4033        return ret;
4034}
4035
4036/*
4037 * helper to create inode for data relocation.
4038 * the inode is in data relocation tree and its link count is 0
4039 */
4040static noinline_for_stack
4041struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
4042                                 struct btrfs_block_group_cache *group)
4043{
4044        struct inode *inode = NULL;
4045        struct btrfs_trans_handle *trans;
4046        struct btrfs_root *root;
4047        struct btrfs_key key;
4048        u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
4049        int err = 0;
4050
4051        root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
4052        if (IS_ERR(root))
4053                return ERR_CAST(root);
4054
4055        trans = btrfs_start_transaction(root, 6);
4056        if (IS_ERR(trans))
4057                return ERR_CAST(trans);
4058
4059        err = btrfs_find_free_objectid(root, &objectid);
4060        if (err)
4061                goto out;
4062
4063        err = __insert_orphan_inode(trans, root, objectid);
4064        BUG_ON(err);
4065
4066        key.objectid = objectid;
4067        key.type = BTRFS_INODE_ITEM_KEY;
4068        key.offset = 0;
4069        inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
4070        BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
4071        BTRFS_I(inode)->index_cnt = group->key.objectid;
4072
4073        err = btrfs_orphan_add(trans, inode);
4074out:
4075        btrfs_end_transaction(trans, root);
4076        btrfs_btree_balance_dirty(root);
4077        if (err) {
4078                if (inode)
4079                        iput(inode);
4080                inode = ERR_PTR(err);
4081        }
4082        return inode;
4083}
4084
4085static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
4086{
4087        struct reloc_control *rc;
4088
4089        rc = kzalloc(sizeof(*rc), GFP_NOFS);
4090        if (!rc)
4091                return NULL;
4092
4093        INIT_LIST_HEAD(&rc->reloc_roots);
4094        backref_cache_init(&rc->backref_cache);
4095        mapping_tree_init(&rc->reloc_root_tree);
4096        extent_io_tree_init(&rc->processed_blocks,
4097                            fs_info->btree_inode->i_mapping);
4098        return rc;
4099}
4100
4101/*
4102 * function to relocate all extents in a block group.
4103 */
4104int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
4105{
4106        struct btrfs_fs_info *fs_info = extent_root->fs_info;
4107        struct reloc_control *rc;
4108        struct inode *inode;
4109        struct btrfs_path *path;
4110        int ret;
4111        int rw = 0;
4112        int err = 0;
4113
4114        rc = alloc_reloc_control(fs_info);
4115        if (!rc)
4116                return -ENOMEM;
4117
4118        rc->extent_root = extent_root;
4119
4120        rc->block_group = btrfs_lookup_block_group(fs_info, group_start);
4121        BUG_ON(!rc->block_group);
4122
4123        if (!rc->block_group->ro) {
4124                ret = btrfs_set_block_group_ro(extent_root, rc->block_group);
4125                if (ret) {
4126                        err = ret;
4127                        goto out;
4128                }
4129                rw = 1;
4130        }
4131
4132        path = btrfs_alloc_path();
4133        if (!path) {
4134                err = -ENOMEM;
4135                goto out;
4136        }
4137
4138        inode = lookup_free_space_inode(fs_info->tree_root, rc->block_group,
4139                                        path);
4140        btrfs_free_path(path);
4141
4142        if (!IS_ERR(inode))
4143                ret = delete_block_group_cache(fs_info, inode, 0);
4144        else
4145                ret = PTR_ERR(inode);
4146
4147        if (ret && ret != -ENOENT) {
4148                err = ret;
4149                goto out;
4150        }
4151
4152        rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4153        if (IS_ERR(rc->data_inode)) {
4154                err = PTR_ERR(rc->data_inode);
4155                rc->data_inode = NULL;
4156                goto out;
4157        }
4158
4159        printk(KERN_INFO "btrfs: relocating block group %llu flags %llu\n",
4160               (unsigned long long)rc->block_group->key.objectid,
4161               (unsigned long long)rc->block_group->flags);
4162
4163        ret = btrfs_start_delalloc_inodes(fs_info->tree_root, 0);
4164        if (ret < 0) {
4165                err = ret;
4166                goto out;
4167        }
4168        btrfs_wait_ordered_extents(fs_info->tree_root, 0);
4169
4170        while (1) {
4171                mutex_lock(&fs_info->cleaner_mutex);
4172                ret = relocate_block_group(rc);
4173                mutex_unlock(&fs_info->cleaner_mutex);
4174                if (ret < 0) {
4175                        err = ret;
4176                        goto out;
4177                }
4178
4179                if (rc->extents_found == 0)
4180                        break;
4181
4182                printk(KERN_INFO "btrfs: found %llu extents\n",
4183                        (unsigned long long)rc->extents_found);
4184
4185                if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4186                        btrfs_wait_ordered_range(rc->data_inode, 0, (u64)-1);
4187                        invalidate_mapping_pages(rc->data_inode->i_mapping,
4188                                                 0, -1);
4189                        rc->stage = UPDATE_DATA_PTRS;
4190                }
4191        }
4192
4193        filemap_write_and_wait_range(fs_info->btree_inode->i_mapping,
4194                                     rc->block_group->key.objectid,
4195                                     rc->block_group->key.objectid +
4196                                     rc->block_group->key.offset - 1);
4197
4198        WARN_ON(rc->block_group->pinned > 0);
4199        WARN_ON(rc->block_group->reserved > 0);
4200        WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
4201out:
4202        if (err && rw)
4203                btrfs_set_block_group_rw(extent_root, rc->block_group);
4204        iput(rc->data_inode);
4205        btrfs_put_block_group(rc->block_group);
4206        kfree(rc);
4207        return err;
4208}
4209
4210static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4211{
4212        struct btrfs_trans_handle *trans;
4213        int ret, err;
4214
4215        trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
4216        if (IS_ERR(trans))
4217                return PTR_ERR(trans);
4218
4219        memset(&root->root_item.drop_progress, 0,
4220                sizeof(root->root_item.drop_progress));
4221        root->root_item.drop_level = 0;
4222        btrfs_set_root_refs(&root->root_item, 0);
4223        ret = btrfs_update_root(trans, root->fs_info->tree_root,
4224                                &root->root_key, &root->root_item);
4225
4226        err = btrfs_end_transaction(trans, root->fs_info->tree_root);
4227        if (err)
4228                return err;
4229        return ret;
4230}
4231
4232/*
4233 * recover relocation interrupted by system crash.
4234 *
4235 * this function resumes merging reloc trees with corresponding fs trees.
4236 * this is important for keeping the sharing of tree blocks
4237 */
4238int btrfs_recover_relocation(struct btrfs_root *root)
4239{
4240        LIST_HEAD(reloc_roots);
4241        struct btrfs_key key;
4242        struct btrfs_root *fs_root;
4243        struct btrfs_root *reloc_root;
4244        struct btrfs_path *path;
4245        struct extent_buffer *leaf;
4246        struct reloc_control *rc = NULL;
4247        struct btrfs_trans_handle *trans;
4248        int ret;
4249        int err = 0;
4250
4251        path = btrfs_alloc_path();
4252        if (!path)
4253                return -ENOMEM;
4254        path->reada = -1;
4255
4256        key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4257        key.type = BTRFS_ROOT_ITEM_KEY;
4258        key.offset = (u64)-1;
4259
4260        while (1) {
4261                ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key,
4262                                        path, 0, 0);
4263                if (ret < 0) {
4264                        err = ret;
4265                        goto out;
4266                }
4267                if (ret > 0) {
4268                        if (path->slots[0] == 0)
4269                                break;
4270                        path->slots[0]--;
4271                }
4272                leaf = path->nodes[0];
4273                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4274                btrfs_release_path(path);
4275
4276                if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4277                    key.type != BTRFS_ROOT_ITEM_KEY)
4278                        break;
4279
4280                reloc_root = btrfs_read_fs_root_no_radix(root, &key);
4281                if (IS_ERR(reloc_root)) {
4282                        err = PTR_ERR(reloc_root);
4283                        goto out;
4284                }
4285
4286                list_add(&reloc_root->root_list, &reloc_roots);
4287
4288                if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4289                        fs_root = read_fs_root(root->fs_info,
4290                                               reloc_root->root_key.offset);
4291                        if (IS_ERR(fs_root)) {
4292                                ret = PTR_ERR(fs_root);
4293                                if (ret != -ENOENT) {
4294                                        err = ret;
4295                                        goto out;
4296                                }
4297                                ret = mark_garbage_root(reloc_root);
4298                                if (ret < 0) {
4299                                        err = ret;
4300                                        goto out;
4301                                }
4302                        }
4303                }
4304
4305                if (key.offset == 0)
4306                        break;
4307
4308                key.offset--;
4309        }
4310        btrfs_release_path(path);
4311
4312        if (list_empty(&reloc_roots))
4313                goto out;
4314
4315        rc = alloc_reloc_control(root->fs_info);
4316        if (!rc) {
4317                err = -ENOMEM;
4318                goto out;
4319        }
4320
4321        rc->extent_root = root->fs_info->extent_root;
4322
4323        set_reloc_control(rc);
4324
4325        trans = btrfs_join_transaction(rc->extent_root);
4326        if (IS_ERR(trans)) {
4327                unset_reloc_control(rc);
4328                err = PTR_ERR(trans);
4329                goto out_free;
4330        }
4331
4332        rc->merge_reloc_tree = 1;
4333
4334        while (!list_empty(&reloc_roots)) {
4335                reloc_root = list_entry(reloc_roots.next,
4336                                        struct btrfs_root, root_list);
4337                list_del(&reloc_root->root_list);
4338
4339                if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4340                        list_add_tail(&reloc_root->root_list,
4341                                      &rc->reloc_roots);
4342                        continue;
4343                }
4344
4345                fs_root = read_fs_root(root->fs_info,
4346                                       reloc_root->root_key.offset);
4347                if (IS_ERR(fs_root)) {
4348                        err = PTR_ERR(fs_root);
4349                        goto out_free;
4350                }
4351
4352                err = __add_reloc_root(reloc_root);
4353                BUG_ON(err < 0); /* -ENOMEM or logic error */
4354                fs_root->reloc_root = reloc_root;
4355        }
4356
4357        err = btrfs_commit_transaction(trans, rc->extent_root);
4358        if (err)
4359                goto out_free;
4360
4361        merge_reloc_roots(rc);
4362
4363        unset_reloc_control(rc);
4364
4365        trans = btrfs_join_transaction(rc->extent_root);
4366        if (IS_ERR(trans))
4367                err = PTR_ERR(trans);
4368        else
4369                err = btrfs_commit_transaction(trans, rc->extent_root);
4370out_free:
4371        kfree(rc);
4372out:
4373        if (!list_empty(&reloc_roots))
4374                free_reloc_roots(&reloc_roots);
4375
4376        btrfs_free_path(path);
4377
4378        if (err == 0) {
4379                /* cleanup orphan inode in data relocation tree */
4380                fs_root = read_fs_root(root->fs_info,
4381                                       BTRFS_DATA_RELOC_TREE_OBJECTID);
4382                if (IS_ERR(fs_root))
4383                        err = PTR_ERR(fs_root);
4384                else
4385                        err = btrfs_orphan_cleanup(fs_root);
4386        }
4387        return err;
4388}
4389
4390/*
4391 * helper to add ordered checksum for data relocation.
4392 *
4393 * cloning checksum properly handles the nodatasum extents.
4394 * it also saves CPU time to re-calculate the checksum.
4395 */
4396int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
4397{
4398        struct btrfs_ordered_sum *sums;
4399        struct btrfs_sector_sum *sector_sum;
4400        struct btrfs_ordered_extent *ordered;
4401        struct btrfs_root *root = BTRFS_I(inode)->root;
4402        size_t offset;
4403        int ret;
4404        u64 disk_bytenr;
4405        LIST_HEAD(list);
4406
4407        ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4408        BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
4409
4410        disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
4411        ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
4412                                       disk_bytenr + len - 1, &list, 0);
4413        if (ret)
4414                goto out;
4415
4416        while (!list_empty(&list)) {
4417                sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4418                list_del_init(&sums->list);
4419
4420                sector_sum = sums->sums;
4421                sums->bytenr = ordered->start;
4422
4423                offset = 0;
4424                while (offset < sums->len) {
4425                        sector_sum->bytenr += ordered->start - disk_bytenr;
4426                        sector_sum++;
4427                        offset += root->sectorsize;
4428                }
4429
4430                btrfs_add_ordered_sum(inode, ordered, sums);
4431        }
4432out:
4433        btrfs_put_ordered_extent(ordered);
4434        return ret;
4435}
4436
4437void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4438                           struct btrfs_root *root, struct extent_buffer *buf,
4439                           struct extent_buffer *cow)
4440{
4441        struct reloc_control *rc;
4442        struct backref_node *node;
4443        int first_cow = 0;
4444        int level;
4445        int ret;
4446
4447        rc = root->fs_info->reloc_ctl;
4448        if (!rc)
4449                return;
4450
4451        BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4452               root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
4453
4454        level = btrfs_header_level(buf);
4455        if (btrfs_header_generation(buf) <=
4456            btrfs_root_last_snapshot(&root->root_item))
4457                first_cow = 1;
4458
4459        if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4460            rc->create_reloc_tree) {
4461                WARN_ON(!first_cow && level == 0);
4462
4463                node = rc->backref_cache.path[level];
4464                BUG_ON(node->bytenr != buf->start &&
4465                       node->new_bytenr != buf->start);
4466
4467                drop_node_buffer(node);
4468                extent_buffer_get(cow);
4469                node->eb = cow;
4470                node->new_bytenr = cow->start;
4471
4472                if (!node->pending) {
4473                        list_move_tail(&node->list,
4474                                       &rc->backref_cache.pending[level]);
4475                        node->pending = 1;
4476                }
4477
4478                if (first_cow)
4479                        __mark_block_processed(rc, node);
4480
4481                if (first_cow && level > 0)
4482                        rc->nodes_relocated += buf->len;
4483        }
4484
4485        if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) {
4486                ret = replace_file_extents(trans, rc, root, cow);
4487                BUG_ON(ret);
4488        }
4489}
4490
4491/*
4492 * called before creating snapshot. it calculates metadata reservation
4493 * requried for relocating tree blocks in the snapshot
4494 */
4495void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
4496                              struct btrfs_pending_snapshot *pending,
4497                              u64 *bytes_to_reserve)
4498{
4499        struct btrfs_root *root;
4500        struct reloc_control *rc;
4501
4502        root = pending->root;
4503        if (!root->reloc_root)
4504                return;
4505
4506        rc = root->fs_info->reloc_ctl;
4507        if (!rc->merge_reloc_tree)
4508                return;
4509
4510        root = root->reloc_root;
4511        BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4512        /*
4513         * relocation is in the stage of merging trees. the space
4514         * used by merging a reloc tree is twice the size of
4515         * relocated tree nodes in the worst case. half for cowing
4516         * the reloc tree, half for cowing the fs tree. the space
4517         * used by cowing the reloc tree will be freed after the
4518         * tree is dropped. if we create snapshot, cowing the fs
4519         * tree may use more space than it frees. so we need
4520         * reserve extra space.
4521         */
4522        *bytes_to_reserve += rc->nodes_relocated;
4523}
4524
4525/*
4526 * called after snapshot is created. migrate block reservation
4527 * and create reloc root for the newly created snapshot
4528 */
4529int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4530                               struct btrfs_pending_snapshot *pending)
4531{
4532        struct btrfs_root *root = pending->root;
4533        struct btrfs_root *reloc_root;
4534        struct btrfs_root *new_root;
4535        struct reloc_control *rc;
4536        int ret;
4537
4538        if (!root->reloc_root)
4539                return 0;
4540
4541        rc = root->fs_info->reloc_ctl;
4542        rc->merging_rsv_size += rc->nodes_relocated;
4543
4544        if (rc->merge_reloc_tree) {
4545                ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4546                                              rc->block_rsv,
4547                                              rc->nodes_relocated);
4548                if (ret)
4549                        return ret;
4550        }
4551
4552        new_root = pending->snap;
4553        reloc_root = create_reloc_root(trans, root->reloc_root,
4554                                       new_root->root_key.objectid);
4555        if (IS_ERR(reloc_root))
4556                return PTR_ERR(reloc_root);
4557
4558        ret = __add_reloc_root(reloc_root);
4559        BUG_ON(ret < 0);
4560        new_root->reloc_root = reloc_root;
4561
4562        if (rc->create_reloc_tree)
4563                ret = clone_backref_node(trans, rc, root, reloc_root);
4564        return ret;
4565}
4566