linux/fs/btrfs/extent-tree.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18#include <linux/sched.h>
  19#include <linux/pagemap.h>
  20#include <linux/writeback.h>
  21#include <linux/blkdev.h>
  22#include <linux/sort.h>
  23#include <linux/rcupdate.h>
  24#include <linux/kthread.h>
  25#include <linux/slab.h>
  26#include <linux/ratelimit.h>
  27#include "compat.h"
  28#include "hash.h"
  29#include "ctree.h"
  30#include "disk-io.h"
  31#include "print-tree.h"
  32#include "transaction.h"
  33#include "volumes.h"
  34#include "locking.h"
  35#include "free-space-cache.h"
  36
  37/*
  38 * control flags for do_chunk_alloc's force field
  39 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
  40 * if we really need one.
  41 *
  42 * CHUNK_ALLOC_LIMITED means to only try and allocate one
  43 * if we have very few chunks already allocated.  This is
  44 * used as part of the clustering code to help make sure
  45 * we have a good pool of storage to cluster in, without
  46 * filling the FS with empty chunks
  47 *
  48 * CHUNK_ALLOC_FORCE means it must try to allocate one
  49 *
  50 */
  51enum {
  52        CHUNK_ALLOC_NO_FORCE = 0,
  53        CHUNK_ALLOC_LIMITED = 1,
  54        CHUNK_ALLOC_FORCE = 2,
  55};
  56
  57/*
  58 * Control how reservations are dealt with.
  59 *
  60 * RESERVE_FREE - freeing a reservation.
  61 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
  62 *   ENOSPC accounting
  63 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
  64 *   bytes_may_use as the ENOSPC accounting is done elsewhere
  65 */
  66enum {
  67        RESERVE_FREE = 0,
  68        RESERVE_ALLOC = 1,
  69        RESERVE_ALLOC_NO_ACCOUNT = 2,
  70};
  71
  72static int update_block_group(struct btrfs_trans_handle *trans,
  73                              struct btrfs_root *root,
  74                              u64 bytenr, u64 num_bytes, int alloc);
  75static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  76                                struct btrfs_root *root,
  77                                u64 bytenr, u64 num_bytes, u64 parent,
  78                                u64 root_objectid, u64 owner_objectid,
  79                                u64 owner_offset, int refs_to_drop,
  80                                struct btrfs_delayed_extent_op *extra_op);
  81static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  82                                    struct extent_buffer *leaf,
  83                                    struct btrfs_extent_item *ei);
  84static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  85                                      struct btrfs_root *root,
  86                                      u64 parent, u64 root_objectid,
  87                                      u64 flags, u64 owner, u64 offset,
  88                                      struct btrfs_key *ins, int ref_mod);
  89static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  90                                     struct btrfs_root *root,
  91                                     u64 parent, u64 root_objectid,
  92                                     u64 flags, struct btrfs_disk_key *key,
  93                                     int level, struct btrfs_key *ins);
  94static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  95                          struct btrfs_root *extent_root, u64 alloc_bytes,
  96                          u64 flags, int force);
  97static int find_next_key(struct btrfs_path *path, int level,
  98                         struct btrfs_key *key);
  99static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
 100                            int dump_block_groups);
 101static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
 102                                       u64 num_bytes, int reserve);
 103
 104static noinline int
 105block_group_cache_done(struct btrfs_block_group_cache *cache)
 106{
 107        smp_mb();
 108        return cache->cached == BTRFS_CACHE_FINISHED;
 109}
 110
 111static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
 112{
 113        return (cache->flags & bits) == bits;
 114}
 115
 116static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
 117{
 118        atomic_inc(&cache->count);
 119}
 120
 121void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
 122{
 123        if (atomic_dec_and_test(&cache->count)) {
 124                WARN_ON(cache->pinned > 0);
 125                WARN_ON(cache->reserved > 0);
 126                kfree(cache->free_space_ctl);
 127                kfree(cache);
 128        }
 129}
 130
 131/*
 132 * this adds the block group to the fs_info rb tree for the block group
 133 * cache
 134 */
 135static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
 136                                struct btrfs_block_group_cache *block_group)
 137{
 138        struct rb_node **p;
 139        struct rb_node *parent = NULL;
 140        struct btrfs_block_group_cache *cache;
 141
 142        spin_lock(&info->block_group_cache_lock);
 143        p = &info->block_group_cache_tree.rb_node;
 144
 145        while (*p) {
 146                parent = *p;
 147                cache = rb_entry(parent, struct btrfs_block_group_cache,
 148                                 cache_node);
 149                if (block_group->key.objectid < cache->key.objectid) {
 150                        p = &(*p)->rb_left;
 151                } else if (block_group->key.objectid > cache->key.objectid) {
 152                        p = &(*p)->rb_right;
 153                } else {
 154                        spin_unlock(&info->block_group_cache_lock);
 155                        return -EEXIST;
 156                }
 157        }
 158
 159        rb_link_node(&block_group->cache_node, parent, p);
 160        rb_insert_color(&block_group->cache_node,
 161                        &info->block_group_cache_tree);
 162        spin_unlock(&info->block_group_cache_lock);
 163
 164        return 0;
 165}
 166
 167/*
 168 * This will return the block group at or after bytenr if contains is 0, else
 169 * it will return the block group that contains the bytenr
 170 */
 171static struct btrfs_block_group_cache *
 172block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
 173                              int contains)
 174{
 175        struct btrfs_block_group_cache *cache, *ret = NULL;
 176        struct rb_node *n;
 177        u64 end, start;
 178
 179        spin_lock(&info->block_group_cache_lock);
 180        n = info->block_group_cache_tree.rb_node;
 181
 182        while (n) {
 183                cache = rb_entry(n, struct btrfs_block_group_cache,
 184                                 cache_node);
 185                end = cache->key.objectid + cache->key.offset - 1;
 186                start = cache->key.objectid;
 187
 188                if (bytenr < start) {
 189                        if (!contains && (!ret || start < ret->key.objectid))
 190                                ret = cache;
 191                        n = n->rb_left;
 192                } else if (bytenr > start) {
 193                        if (contains && bytenr <= end) {
 194                                ret = cache;
 195                                break;
 196                        }
 197                        n = n->rb_right;
 198                } else {
 199                        ret = cache;
 200                        break;
 201                }
 202        }
 203        if (ret)
 204                btrfs_get_block_group(ret);
 205        spin_unlock(&info->block_group_cache_lock);
 206
 207        return ret;
 208}
 209
 210static int add_excluded_extent(struct btrfs_root *root,
 211                               u64 start, u64 num_bytes)
 212{
 213        u64 end = start + num_bytes - 1;
 214        set_extent_bits(&root->fs_info->freed_extents[0],
 215                        start, end, EXTENT_UPTODATE, GFP_NOFS);
 216        set_extent_bits(&root->fs_info->freed_extents[1],
 217                        start, end, EXTENT_UPTODATE, GFP_NOFS);
 218        return 0;
 219}
 220
 221static void free_excluded_extents(struct btrfs_root *root,
 222                                  struct btrfs_block_group_cache *cache)
 223{
 224        u64 start, end;
 225
 226        start = cache->key.objectid;
 227        end = start + cache->key.offset - 1;
 228
 229        clear_extent_bits(&root->fs_info->freed_extents[0],
 230                          start, end, EXTENT_UPTODATE, GFP_NOFS);
 231        clear_extent_bits(&root->fs_info->freed_extents[1],
 232                          start, end, EXTENT_UPTODATE, GFP_NOFS);
 233}
 234
 235static int exclude_super_stripes(struct btrfs_root *root,
 236                                 struct btrfs_block_group_cache *cache)
 237{
 238        u64 bytenr;
 239        u64 *logical;
 240        int stripe_len;
 241        int i, nr, ret;
 242
 243        if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
 244                stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
 245                cache->bytes_super += stripe_len;
 246                ret = add_excluded_extent(root, cache->key.objectid,
 247                                          stripe_len);
 248                BUG_ON(ret);
 249        }
 250
 251        for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
 252                bytenr = btrfs_sb_offset(i);
 253                ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
 254                                       cache->key.objectid, bytenr,
 255                                       0, &logical, &nr, &stripe_len);
 256                BUG_ON(ret);
 257
 258                while (nr--) {
 259                        cache->bytes_super += stripe_len;
 260                        ret = add_excluded_extent(root, logical[nr],
 261                                                  stripe_len);
 262                        BUG_ON(ret);
 263                }
 264
 265                kfree(logical);
 266        }
 267        return 0;
 268}
 269
 270static struct btrfs_caching_control *
 271get_caching_control(struct btrfs_block_group_cache *cache)
 272{
 273        struct btrfs_caching_control *ctl;
 274
 275        spin_lock(&cache->lock);
 276        if (cache->cached != BTRFS_CACHE_STARTED) {
 277                spin_unlock(&cache->lock);
 278                return NULL;
 279        }
 280
 281        /* We're loading it the fast way, so we don't have a caching_ctl. */
 282        if (!cache->caching_ctl) {
 283                spin_unlock(&cache->lock);
 284                return NULL;
 285        }
 286
 287        ctl = cache->caching_ctl;
 288        atomic_inc(&ctl->count);
 289        spin_unlock(&cache->lock);
 290        return ctl;
 291}
 292
 293static void put_caching_control(struct btrfs_caching_control *ctl)
 294{
 295        if (atomic_dec_and_test(&ctl->count))
 296                kfree(ctl);
 297}
 298
 299/*
 300 * this is only called by cache_block_group, since we could have freed extents
 301 * we need to check the pinned_extents for any extents that can't be used yet
 302 * since their free space will be released as soon as the transaction commits.
 303 */
 304static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
 305                              struct btrfs_fs_info *info, u64 start, u64 end)
 306{
 307        u64 extent_start, extent_end, size, total_added = 0;
 308        int ret;
 309
 310        while (start < end) {
 311                ret = find_first_extent_bit(info->pinned_extents, start,
 312                                            &extent_start, &extent_end,
 313                                            EXTENT_DIRTY | EXTENT_UPTODATE);
 314                if (ret)
 315                        break;
 316
 317                if (extent_start <= start) {
 318                        start = extent_end + 1;
 319                } else if (extent_start > start && extent_start < end) {
 320                        size = extent_start - start;
 321                        total_added += size;
 322                        ret = btrfs_add_free_space(block_group, start,
 323                                                   size);
 324                        BUG_ON(ret);
 325                        start = extent_end + 1;
 326                } else {
 327                        break;
 328                }
 329        }
 330
 331        if (start < end) {
 332                size = end - start;
 333                total_added += size;
 334                ret = btrfs_add_free_space(block_group, start, size);
 335                BUG_ON(ret);
 336        }
 337
 338        return total_added;
 339}
 340
 341static noinline void caching_thread(struct btrfs_work *work)
 342{
 343        struct btrfs_block_group_cache *block_group;
 344        struct btrfs_fs_info *fs_info;
 345        struct btrfs_caching_control *caching_ctl;
 346        struct btrfs_root *extent_root;
 347        struct btrfs_path *path;
 348        struct extent_buffer *leaf;
 349        struct btrfs_key key;
 350        u64 total_found = 0;
 351        u64 last = 0;
 352        u32 nritems;
 353        int ret = 0;
 354
 355        caching_ctl = container_of(work, struct btrfs_caching_control, work);
 356        block_group = caching_ctl->block_group;
 357        fs_info = block_group->fs_info;
 358        extent_root = fs_info->extent_root;
 359
 360        path = btrfs_alloc_path();
 361        if (!path)
 362                goto out;
 363
 364        last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
 365
 366        /*
 367         * We don't want to deadlock with somebody trying to allocate a new
 368         * extent for the extent root while also trying to search the extent
 369         * root to add free space.  So we skip locking and search the commit
 370         * root, since its read-only
 371         */
 372        path->skip_locking = 1;
 373        path->search_commit_root = 1;
 374        path->reada = 1;
 375
 376        key.objectid = last;
 377        key.offset = 0;
 378        key.type = BTRFS_EXTENT_ITEM_KEY;
 379again:
 380        mutex_lock(&caching_ctl->mutex);
 381        /* need to make sure the commit_root doesn't disappear */
 382        down_read(&fs_info->extent_commit_sem);
 383
 384        ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
 385        if (ret < 0)
 386                goto err;
 387
 388        leaf = path->nodes[0];
 389        nritems = btrfs_header_nritems(leaf);
 390
 391        while (1) {
 392                if (btrfs_fs_closing(fs_info) > 1) {
 393                        last = (u64)-1;
 394                        break;
 395                }
 396
 397                if (path->slots[0] < nritems) {
 398                        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 399                } else {
 400                        ret = find_next_key(path, 0, &key);
 401                        if (ret)
 402                                break;
 403
 404                        if (need_resched() ||
 405                            btrfs_next_leaf(extent_root, path)) {
 406                                caching_ctl->progress = last;
 407                                btrfs_release_path(path);
 408                                up_read(&fs_info->extent_commit_sem);
 409                                mutex_unlock(&caching_ctl->mutex);
 410                                cond_resched();
 411                                goto again;
 412                        }
 413                        leaf = path->nodes[0];
 414                        nritems = btrfs_header_nritems(leaf);
 415                        continue;
 416                }
 417
 418                if (key.objectid < block_group->key.objectid) {
 419                        path->slots[0]++;
 420                        continue;
 421                }
 422
 423                if (key.objectid >= block_group->key.objectid +
 424                    block_group->key.offset)
 425                        break;
 426
 427                if (key.type == BTRFS_EXTENT_ITEM_KEY) {
 428                        total_found += add_new_free_space(block_group,
 429                                                          fs_info, last,
 430                                                          key.objectid);
 431                        last = key.objectid + key.offset;
 432
 433                        if (total_found > (1024 * 1024 * 2)) {
 434                                total_found = 0;
 435                                wake_up(&caching_ctl->wait);
 436                        }
 437                }
 438                path->slots[0]++;
 439        }
 440        ret = 0;
 441
 442        total_found += add_new_free_space(block_group, fs_info, last,
 443                                          block_group->key.objectid +
 444                                          block_group->key.offset);
 445        caching_ctl->progress = (u64)-1;
 446
 447        spin_lock(&block_group->lock);
 448        block_group->caching_ctl = NULL;
 449        block_group->cached = BTRFS_CACHE_FINISHED;
 450        spin_unlock(&block_group->lock);
 451
 452err:
 453        btrfs_free_path(path);
 454        up_read(&fs_info->extent_commit_sem);
 455
 456        free_excluded_extents(extent_root, block_group);
 457
 458        mutex_unlock(&caching_ctl->mutex);
 459out:
 460        wake_up(&caching_ctl->wait);
 461
 462        put_caching_control(caching_ctl);
 463        btrfs_put_block_group(block_group);
 464}
 465
 466static int cache_block_group(struct btrfs_block_group_cache *cache,
 467                             struct btrfs_trans_handle *trans,
 468                             struct btrfs_root *root,
 469                             int load_cache_only)
 470{
 471        DEFINE_WAIT(wait);
 472        struct btrfs_fs_info *fs_info = cache->fs_info;
 473        struct btrfs_caching_control *caching_ctl;
 474        int ret = 0;
 475
 476        caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
 477        BUG_ON(!caching_ctl);
 478
 479        INIT_LIST_HEAD(&caching_ctl->list);
 480        mutex_init(&caching_ctl->mutex);
 481        init_waitqueue_head(&caching_ctl->wait);
 482        caching_ctl->block_group = cache;
 483        caching_ctl->progress = cache->key.objectid;
 484        atomic_set(&caching_ctl->count, 1);
 485        caching_ctl->work.func = caching_thread;
 486
 487        spin_lock(&cache->lock);
 488        /*
 489         * This should be a rare occasion, but this could happen I think in the
 490         * case where one thread starts to load the space cache info, and then
 491         * some other thread starts a transaction commit which tries to do an
 492         * allocation while the other thread is still loading the space cache
 493         * info.  The previous loop should have kept us from choosing this block
 494         * group, but if we've moved to the state where we will wait on caching
 495         * block groups we need to first check if we're doing a fast load here,
 496         * so we can wait for it to finish, otherwise we could end up allocating
 497         * from a block group who's cache gets evicted for one reason or
 498         * another.
 499         */
 500        while (cache->cached == BTRFS_CACHE_FAST) {
 501                struct btrfs_caching_control *ctl;
 502
 503                ctl = cache->caching_ctl;
 504                atomic_inc(&ctl->count);
 505                prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
 506                spin_unlock(&cache->lock);
 507
 508                schedule();
 509
 510                finish_wait(&ctl->wait, &wait);
 511                put_caching_control(ctl);
 512                spin_lock(&cache->lock);
 513        }
 514
 515        if (cache->cached != BTRFS_CACHE_NO) {
 516                spin_unlock(&cache->lock);
 517                kfree(caching_ctl);
 518                return 0;
 519        }
 520        WARN_ON(cache->caching_ctl);
 521        cache->caching_ctl = caching_ctl;
 522        cache->cached = BTRFS_CACHE_FAST;
 523        spin_unlock(&cache->lock);
 524
 525        /*
 526         * We can't do the read from on-disk cache during a commit since we need
 527         * to have the normal tree locking.  Also if we are currently trying to
 528         * allocate blocks for the tree root we can't do the fast caching since
 529         * we likely hold important locks.
 530         */
 531        if (trans && (!trans->transaction->in_commit) &&
 532            (root && root != root->fs_info->tree_root) &&
 533            btrfs_test_opt(root, SPACE_CACHE)) {
 534                ret = load_free_space_cache(fs_info, cache);
 535
 536                spin_lock(&cache->lock);
 537                if (ret == 1) {
 538                        cache->caching_ctl = NULL;
 539                        cache->cached = BTRFS_CACHE_FINISHED;
 540                        cache->last_byte_to_unpin = (u64)-1;
 541                } else {
 542                        if (load_cache_only) {
 543                                cache->caching_ctl = NULL;
 544                                cache->cached = BTRFS_CACHE_NO;
 545                        } else {
 546                                cache->cached = BTRFS_CACHE_STARTED;
 547                        }
 548                }
 549                spin_unlock(&cache->lock);
 550                wake_up(&caching_ctl->wait);
 551                if (ret == 1) {
 552                        put_caching_control(caching_ctl);
 553                        free_excluded_extents(fs_info->extent_root, cache);
 554                        return 0;
 555                }
 556        } else {
 557                /*
 558                 * We are not going to do the fast caching, set cached to the
 559                 * appropriate value and wakeup any waiters.
 560                 */
 561                spin_lock(&cache->lock);
 562                if (load_cache_only) {
 563                        cache->caching_ctl = NULL;
 564                        cache->cached = BTRFS_CACHE_NO;
 565                } else {
 566                        cache->cached = BTRFS_CACHE_STARTED;
 567                }
 568                spin_unlock(&cache->lock);
 569                wake_up(&caching_ctl->wait);
 570        }
 571
 572        if (load_cache_only) {
 573                put_caching_control(caching_ctl);
 574                return 0;
 575        }
 576
 577        down_write(&fs_info->extent_commit_sem);
 578        atomic_inc(&caching_ctl->count);
 579        list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
 580        up_write(&fs_info->extent_commit_sem);
 581
 582        btrfs_get_block_group(cache);
 583
 584        btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
 585
 586        return ret;
 587}
 588
 589/*
 590 * return the block group that starts at or after bytenr
 591 */
 592static struct btrfs_block_group_cache *
 593btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
 594{
 595        struct btrfs_block_group_cache *cache;
 596
 597        cache = block_group_cache_tree_search(info, bytenr, 0);
 598
 599        return cache;
 600}
 601
 602/*
 603 * return the block group that contains the given bytenr
 604 */
 605struct btrfs_block_group_cache *btrfs_lookup_block_group(
 606                                                 struct btrfs_fs_info *info,
 607                                                 u64 bytenr)
 608{
 609        struct btrfs_block_group_cache *cache;
 610
 611        cache = block_group_cache_tree_search(info, bytenr, 1);
 612
 613        return cache;
 614}
 615
 616static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
 617                                                  u64 flags)
 618{
 619        struct list_head *head = &info->space_info;
 620        struct btrfs_space_info *found;
 621
 622        flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
 623
 624        rcu_read_lock();
 625        list_for_each_entry_rcu(found, head, list) {
 626                if (found->flags & flags) {
 627                        rcu_read_unlock();
 628                        return found;
 629                }
 630        }
 631        rcu_read_unlock();
 632        return NULL;
 633}
 634
 635/*
 636 * after adding space to the filesystem, we need to clear the full flags
 637 * on all the space infos.
 638 */
 639void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
 640{
 641        struct list_head *head = &info->space_info;
 642        struct btrfs_space_info *found;
 643
 644        rcu_read_lock();
 645        list_for_each_entry_rcu(found, head, list)
 646                found->full = 0;
 647        rcu_read_unlock();
 648}
 649
 650static u64 div_factor(u64 num, int factor)
 651{
 652        if (factor == 10)
 653                return num;
 654        num *= factor;
 655        do_div(num, 10);
 656        return num;
 657}
 658
 659static u64 div_factor_fine(u64 num, int factor)
 660{
 661        if (factor == 100)
 662                return num;
 663        num *= factor;
 664        do_div(num, 100);
 665        return num;
 666}
 667
 668u64 btrfs_find_block_group(struct btrfs_root *root,
 669                           u64 search_start, u64 search_hint, int owner)
 670{
 671        struct btrfs_block_group_cache *cache;
 672        u64 used;
 673        u64 last = max(search_hint, search_start);
 674        u64 group_start = 0;
 675        int full_search = 0;
 676        int factor = 9;
 677        int wrapped = 0;
 678again:
 679        while (1) {
 680                cache = btrfs_lookup_first_block_group(root->fs_info, last);
 681                if (!cache)
 682                        break;
 683
 684                spin_lock(&cache->lock);
 685                last = cache->key.objectid + cache->key.offset;
 686                used = btrfs_block_group_used(&cache->item);
 687
 688                if ((full_search || !cache->ro) &&
 689                    block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
 690                        if (used + cache->pinned + cache->reserved <
 691                            div_factor(cache->key.offset, factor)) {
 692                                group_start = cache->key.objectid;
 693                                spin_unlock(&cache->lock);
 694                                btrfs_put_block_group(cache);
 695                                goto found;
 696                        }
 697                }
 698                spin_unlock(&cache->lock);
 699                btrfs_put_block_group(cache);
 700                cond_resched();
 701        }
 702        if (!wrapped) {
 703                last = search_start;
 704                wrapped = 1;
 705                goto again;
 706        }
 707        if (!full_search && factor < 10) {
 708                last = search_start;
 709                full_search = 1;
 710                factor = 10;
 711                goto again;
 712        }
 713found:
 714        return group_start;
 715}
 716
 717/* simple helper to search for an existing extent at a given offset */
 718int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
 719{
 720        int ret;
 721        struct btrfs_key key;
 722        struct btrfs_path *path;
 723
 724        path = btrfs_alloc_path();
 725        if (!path)
 726                return -ENOMEM;
 727
 728        key.objectid = start;
 729        key.offset = len;
 730        btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
 731        ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
 732                                0, 0);
 733        btrfs_free_path(path);
 734        return ret;
 735}
 736
 737/*
 738 * helper function to lookup reference count and flags of extent.
 739 *
 740 * the head node for delayed ref is used to store the sum of all the
 741 * reference count modifications queued up in the rbtree. the head
 742 * node may also store the extent flags to set. This way you can check
 743 * to see what the reference count and extent flags would be if all of
 744 * the delayed refs are not processed.
 745 */
 746int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
 747                             struct btrfs_root *root, u64 bytenr,
 748                             u64 num_bytes, u64 *refs, u64 *flags)
 749{
 750        struct btrfs_delayed_ref_head *head;
 751        struct btrfs_delayed_ref_root *delayed_refs;
 752        struct btrfs_path *path;
 753        struct btrfs_extent_item *ei;
 754        struct extent_buffer *leaf;
 755        struct btrfs_key key;
 756        u32 item_size;
 757        u64 num_refs;
 758        u64 extent_flags;
 759        int ret;
 760
 761        path = btrfs_alloc_path();
 762        if (!path)
 763                return -ENOMEM;
 764
 765        key.objectid = bytenr;
 766        key.type = BTRFS_EXTENT_ITEM_KEY;
 767        key.offset = num_bytes;
 768        if (!trans) {
 769                path->skip_locking = 1;
 770                path->search_commit_root = 1;
 771        }
 772again:
 773        ret = btrfs_search_slot(trans, root->fs_info->extent_root,
 774                                &key, path, 0, 0);
 775        if (ret < 0)
 776                goto out_free;
 777
 778        if (ret == 0) {
 779                leaf = path->nodes[0];
 780                item_size = btrfs_item_size_nr(leaf, path->slots[0]);
 781                if (item_size >= sizeof(*ei)) {
 782                        ei = btrfs_item_ptr(leaf, path->slots[0],
 783                                            struct btrfs_extent_item);
 784                        num_refs = btrfs_extent_refs(leaf, ei);
 785                        extent_flags = btrfs_extent_flags(leaf, ei);
 786                } else {
 787#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
 788                        struct btrfs_extent_item_v0 *ei0;
 789                        BUG_ON(item_size != sizeof(*ei0));
 790                        ei0 = btrfs_item_ptr(leaf, path->slots[0],
 791                                             struct btrfs_extent_item_v0);
 792                        num_refs = btrfs_extent_refs_v0(leaf, ei0);
 793                        /* FIXME: this isn't correct for data */
 794                        extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
 795#else
 796                        BUG();
 797#endif
 798                }
 799                BUG_ON(num_refs == 0);
 800        } else {
 801                num_refs = 0;
 802                extent_flags = 0;
 803                ret = 0;
 804        }
 805
 806        if (!trans)
 807                goto out;
 808
 809        delayed_refs = &trans->transaction->delayed_refs;
 810        spin_lock(&delayed_refs->lock);
 811        head = btrfs_find_delayed_ref_head(trans, bytenr);
 812        if (head) {
 813                if (!mutex_trylock(&head->mutex)) {
 814                        atomic_inc(&head->node.refs);
 815                        spin_unlock(&delayed_refs->lock);
 816
 817                        btrfs_release_path(path);
 818
 819                        /*
 820                         * Mutex was contended, block until it's released and try
 821                         * again
 822                         */
 823                        mutex_lock(&head->mutex);
 824                        mutex_unlock(&head->mutex);
 825                        btrfs_put_delayed_ref(&head->node);
 826                        goto again;
 827                }
 828                if (head->extent_op && head->extent_op->update_flags)
 829                        extent_flags |= head->extent_op->flags_to_set;
 830                else
 831                        BUG_ON(num_refs == 0);
 832
 833                num_refs += head->node.ref_mod;
 834                mutex_unlock(&head->mutex);
 835        }
 836        spin_unlock(&delayed_refs->lock);
 837out:
 838        WARN_ON(num_refs == 0);
 839        if (refs)
 840                *refs = num_refs;
 841        if (flags)
 842                *flags = extent_flags;
 843out_free:
 844        btrfs_free_path(path);
 845        return ret;
 846}
 847
 848/*
 849 * Back reference rules.  Back refs have three main goals:
 850 *
 851 * 1) differentiate between all holders of references to an extent so that
 852 *    when a reference is dropped we can make sure it was a valid reference
 853 *    before freeing the extent.
 854 *
 855 * 2) Provide enough information to quickly find the holders of an extent
 856 *    if we notice a given block is corrupted or bad.
 857 *
 858 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
 859 *    maintenance.  This is actually the same as #2, but with a slightly
 860 *    different use case.
 861 *
 862 * There are two kinds of back refs. The implicit back refs is optimized
 863 * for pointers in non-shared tree blocks. For a given pointer in a block,
 864 * back refs of this kind provide information about the block's owner tree
 865 * and the pointer's key. These information allow us to find the block by
 866 * b-tree searching. The full back refs is for pointers in tree blocks not
 867 * referenced by their owner trees. The location of tree block is recorded
 868 * in the back refs. Actually the full back refs is generic, and can be
 869 * used in all cases the implicit back refs is used. The major shortcoming
 870 * of the full back refs is its overhead. Every time a tree block gets
 871 * COWed, we have to update back refs entry for all pointers in it.
 872 *
 873 * For a newly allocated tree block, we use implicit back refs for
 874 * pointers in it. This means most tree related operations only involve
 875 * implicit back refs. For a tree block created in old transaction, the
 876 * only way to drop a reference to it is COW it. So we can detect the
 877 * event that tree block loses its owner tree's reference and do the
 878 * back refs conversion.
 879 *
 880 * When a tree block is COW'd through a tree, there are four cases:
 881 *
 882 * The reference count of the block is one and the tree is the block's
 883 * owner tree. Nothing to do in this case.
 884 *
 885 * The reference count of the block is one and the tree is not the
 886 * block's owner tree. In this case, full back refs is used for pointers
 887 * in the block. Remove these full back refs, add implicit back refs for
 888 * every pointers in the new block.
 889 *
 890 * The reference count of the block is greater than one and the tree is
 891 * the block's owner tree. In this case, implicit back refs is used for
 892 * pointers in the block. Add full back refs for every pointers in the
 893 * block, increase lower level extents' reference counts. The original
 894 * implicit back refs are entailed to the new block.
 895 *
 896 * The reference count of the block is greater than one and the tree is
 897 * not the block's owner tree. Add implicit back refs for every pointer in
 898 * the new block, increase lower level extents' reference count.
 899 *
 900 * Back Reference Key composing:
 901 *
 902 * The key objectid corresponds to the first byte in the extent,
 903 * The key type is used to differentiate between types of back refs.
 904 * There are different meanings of the key offset for different types
 905 * of back refs.
 906 *
 907 * File extents can be referenced by:
 908 *
 909 * - multiple snapshots, subvolumes, or different generations in one subvol
 910 * - different files inside a single subvolume
 911 * - different offsets inside a file (bookend extents in file.c)
 912 *
 913 * The extent ref structure for the implicit back refs has fields for:
 914 *
 915 * - Objectid of the subvolume root
 916 * - objectid of the file holding the reference
 917 * - original offset in the file
 918 * - how many bookend extents
 919 *
 920 * The key offset for the implicit back refs is hash of the first
 921 * three fields.
 922 *
 923 * The extent ref structure for the full back refs has field for:
 924 *
 925 * - number of pointers in the tree leaf
 926 *
 927 * The key offset for the implicit back refs is the first byte of
 928 * the tree leaf
 929 *
 930 * When a file extent is allocated, The implicit back refs is used.
 931 * the fields are filled in:
 932 *
 933 *     (root_key.objectid, inode objectid, offset in file, 1)
 934 *
 935 * When a file extent is removed file truncation, we find the
 936 * corresponding implicit back refs and check the following fields:
 937 *
 938 *     (btrfs_header_owner(leaf), inode objectid, offset in file)
 939 *
 940 * Btree extents can be referenced by:
 941 *
 942 * - Different subvolumes
 943 *
 944 * Both the implicit back refs and the full back refs for tree blocks
 945 * only consist of key. The key offset for the implicit back refs is
 946 * objectid of block's owner tree. The key offset for the full back refs
 947 * is the first byte of parent block.
 948 *
 949 * When implicit back refs is used, information about the lowest key and
 950 * level of the tree block are required. These information are stored in
 951 * tree block info structure.
 952 */
 953
 954#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
 955static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
 956                                  struct btrfs_root *root,
 957                                  struct btrfs_path *path,
 958                                  u64 owner, u32 extra_size)
 959{
 960        struct btrfs_extent_item *item;
 961        struct btrfs_extent_item_v0 *ei0;
 962        struct btrfs_extent_ref_v0 *ref0;
 963        struct btrfs_tree_block_info *bi;
 964        struct extent_buffer *leaf;
 965        struct btrfs_key key;
 966        struct btrfs_key found_key;
 967        u32 new_size = sizeof(*item);
 968        u64 refs;
 969        int ret;
 970
 971        leaf = path->nodes[0];
 972        BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
 973
 974        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 975        ei0 = btrfs_item_ptr(leaf, path->slots[0],
 976                             struct btrfs_extent_item_v0);
 977        refs = btrfs_extent_refs_v0(leaf, ei0);
 978
 979        if (owner == (u64)-1) {
 980                while (1) {
 981                        if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 982                                ret = btrfs_next_leaf(root, path);
 983                                if (ret < 0)
 984                                        return ret;
 985                                BUG_ON(ret > 0);
 986                                leaf = path->nodes[0];
 987                        }
 988                        btrfs_item_key_to_cpu(leaf, &found_key,
 989                                              path->slots[0]);
 990                        BUG_ON(key.objectid != found_key.objectid);
 991                        if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
 992                                path->slots[0]++;
 993                                continue;
 994                        }
 995                        ref0 = btrfs_item_ptr(leaf, path->slots[0],
 996                                              struct btrfs_extent_ref_v0);
 997                        owner = btrfs_ref_objectid_v0(leaf, ref0);
 998                        break;
 999                }
1000        }
1001        btrfs_release_path(path);
1002
1003        if (owner < BTRFS_FIRST_FREE_OBJECTID)
1004                new_size += sizeof(*bi);
1005
1006        new_size -= sizeof(*ei0);
1007        ret = btrfs_search_slot(trans, root, &key, path,
1008                                new_size + extra_size, 1);
1009        if (ret < 0)
1010                return ret;
1011        BUG_ON(ret);
1012
1013        ret = btrfs_extend_item(trans, root, path, new_size);
1014
1015        leaf = path->nodes[0];
1016        item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1017        btrfs_set_extent_refs(leaf, item, refs);
1018        /* FIXME: get real generation */
1019        btrfs_set_extent_generation(leaf, item, 0);
1020        if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1021                btrfs_set_extent_flags(leaf, item,
1022                                       BTRFS_EXTENT_FLAG_TREE_BLOCK |
1023                                       BTRFS_BLOCK_FLAG_FULL_BACKREF);
1024                bi = (struct btrfs_tree_block_info *)(item + 1);
1025                /* FIXME: get first key of the block */
1026                memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1027                btrfs_set_tree_block_level(leaf, bi, (int)owner);
1028        } else {
1029                btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1030        }
1031        btrfs_mark_buffer_dirty(leaf);
1032        return 0;
1033}
1034#endif
1035
1036static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1037{
1038        u32 high_crc = ~(u32)0;
1039        u32 low_crc = ~(u32)0;
1040        __le64 lenum;
1041
1042        lenum = cpu_to_le64(root_objectid);
1043        high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1044        lenum = cpu_to_le64(owner);
1045        low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1046        lenum = cpu_to_le64(offset);
1047        low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1048
1049        return ((u64)high_crc << 31) ^ (u64)low_crc;
1050}
1051
1052static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1053                                     struct btrfs_extent_data_ref *ref)
1054{
1055        return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1056                                    btrfs_extent_data_ref_objectid(leaf, ref),
1057                                    btrfs_extent_data_ref_offset(leaf, ref));
1058}
1059
1060static int match_extent_data_ref(struct extent_buffer *leaf,
1061                                 struct btrfs_extent_data_ref *ref,
1062                                 u64 root_objectid, u64 owner, u64 offset)
1063{
1064        if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1065            btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1066            btrfs_extent_data_ref_offset(leaf, ref) != offset)
1067                return 0;
1068        return 1;
1069}
1070
1071static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1072                                           struct btrfs_root *root,
1073                                           struct btrfs_path *path,
1074                                           u64 bytenr, u64 parent,
1075                                           u64 root_objectid,
1076                                           u64 owner, u64 offset)
1077{
1078        struct btrfs_key key;
1079        struct btrfs_extent_data_ref *ref;
1080        struct extent_buffer *leaf;
1081        u32 nritems;
1082        int ret;
1083        int recow;
1084        int err = -ENOENT;
1085
1086        key.objectid = bytenr;
1087        if (parent) {
1088                key.type = BTRFS_SHARED_DATA_REF_KEY;
1089                key.offset = parent;
1090        } else {
1091                key.type = BTRFS_EXTENT_DATA_REF_KEY;
1092                key.offset = hash_extent_data_ref(root_objectid,
1093                                                  owner, offset);
1094        }
1095again:
1096        recow = 0;
1097        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1098        if (ret < 0) {
1099                err = ret;
1100                goto fail;
1101        }
1102
1103        if (parent) {
1104                if (!ret)
1105                        return 0;
1106#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1107                key.type = BTRFS_EXTENT_REF_V0_KEY;
1108                btrfs_release_path(path);
1109                ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1110                if (ret < 0) {
1111                        err = ret;
1112                        goto fail;
1113                }
1114                if (!ret)
1115                        return 0;
1116#endif
1117                goto fail;
1118        }
1119
1120        leaf = path->nodes[0];
1121        nritems = btrfs_header_nritems(leaf);
1122        while (1) {
1123                if (path->slots[0] >= nritems) {
1124                        ret = btrfs_next_leaf(root, path);
1125                        if (ret < 0)
1126                                err = ret;
1127                        if (ret)
1128                                goto fail;
1129
1130                        leaf = path->nodes[0];
1131                        nritems = btrfs_header_nritems(leaf);
1132                        recow = 1;
1133                }
1134
1135                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1136                if (key.objectid != bytenr ||
1137                    key.type != BTRFS_EXTENT_DATA_REF_KEY)
1138                        goto fail;
1139
1140                ref = btrfs_item_ptr(leaf, path->slots[0],
1141                                     struct btrfs_extent_data_ref);
1142
1143                if (match_extent_data_ref(leaf, ref, root_objectid,
1144                                          owner, offset)) {
1145                        if (recow) {
1146                                btrfs_release_path(path);
1147                                goto again;
1148                        }
1149                        err = 0;
1150                        break;
1151                }
1152                path->slots[0]++;
1153        }
1154fail:
1155        return err;
1156}
1157
1158static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1159                                           struct btrfs_root *root,
1160                                           struct btrfs_path *path,
1161                                           u64 bytenr, u64 parent,
1162                                           u64 root_objectid, u64 owner,
1163                                           u64 offset, int refs_to_add)
1164{
1165        struct btrfs_key key;
1166        struct extent_buffer *leaf;
1167        u32 size;
1168        u32 num_refs;
1169        int ret;
1170
1171        key.objectid = bytenr;
1172        if (parent) {
1173                key.type = BTRFS_SHARED_DATA_REF_KEY;
1174                key.offset = parent;
1175                size = sizeof(struct btrfs_shared_data_ref);
1176        } else {
1177                key.type = BTRFS_EXTENT_DATA_REF_KEY;
1178                key.offset = hash_extent_data_ref(root_objectid,
1179                                                  owner, offset);
1180                size = sizeof(struct btrfs_extent_data_ref);
1181        }
1182
1183        ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1184        if (ret && ret != -EEXIST)
1185                goto fail;
1186
1187        leaf = path->nodes[0];
1188        if (parent) {
1189                struct btrfs_shared_data_ref *ref;
1190                ref = btrfs_item_ptr(leaf, path->slots[0],
1191                                     struct btrfs_shared_data_ref);
1192                if (ret == 0) {
1193                        btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1194                } else {
1195                        num_refs = btrfs_shared_data_ref_count(leaf, ref);
1196                        num_refs += refs_to_add;
1197                        btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1198                }
1199        } else {
1200                struct btrfs_extent_data_ref *ref;
1201                while (ret == -EEXIST) {
1202                        ref = btrfs_item_ptr(leaf, path->slots[0],
1203                                             struct btrfs_extent_data_ref);
1204                        if (match_extent_data_ref(leaf, ref, root_objectid,
1205                                                  owner, offset))
1206                                break;
1207                        btrfs_release_path(path);
1208                        key.offset++;
1209                        ret = btrfs_insert_empty_item(trans, root, path, &key,
1210                                                      size);
1211                        if (ret && ret != -EEXIST)
1212                                goto fail;
1213
1214                        leaf = path->nodes[0];
1215                }
1216                ref = btrfs_item_ptr(leaf, path->slots[0],
1217                                     struct btrfs_extent_data_ref);
1218                if (ret == 0) {
1219                        btrfs_set_extent_data_ref_root(leaf, ref,
1220                                                       root_objectid);
1221                        btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1222                        btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1223                        btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1224                } else {
1225                        num_refs = btrfs_extent_data_ref_count(leaf, ref);
1226                        num_refs += refs_to_add;
1227                        btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1228                }
1229        }
1230        btrfs_mark_buffer_dirty(leaf);
1231        ret = 0;
1232fail:
1233        btrfs_release_path(path);
1234        return ret;
1235}
1236
1237static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1238                                           struct btrfs_root *root,
1239                                           struct btrfs_path *path,
1240                                           int refs_to_drop)
1241{
1242        struct btrfs_key key;
1243        struct btrfs_extent_data_ref *ref1 = NULL;
1244        struct btrfs_shared_data_ref *ref2 = NULL;
1245        struct extent_buffer *leaf;
1246        u32 num_refs = 0;
1247        int ret = 0;
1248
1249        leaf = path->nodes[0];
1250        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1251
1252        if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1253                ref1 = btrfs_item_ptr(leaf, path->slots[0],
1254                                      struct btrfs_extent_data_ref);
1255                num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1256        } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1257                ref2 = btrfs_item_ptr(leaf, path->slots[0],
1258                                      struct btrfs_shared_data_ref);
1259                num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1260#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1261        } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1262                struct btrfs_extent_ref_v0 *ref0;
1263                ref0 = btrfs_item_ptr(leaf, path->slots[0],
1264                                      struct btrfs_extent_ref_v0);
1265                num_refs = btrfs_ref_count_v0(leaf, ref0);
1266#endif
1267        } else {
1268                BUG();
1269        }
1270
1271        BUG_ON(num_refs < refs_to_drop);
1272        num_refs -= refs_to_drop;
1273
1274        if (num_refs == 0) {
1275                ret = btrfs_del_item(trans, root, path);
1276        } else {
1277                if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1278                        btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1279                else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1280                        btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1281#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1282                else {
1283                        struct btrfs_extent_ref_v0 *ref0;
1284                        ref0 = btrfs_item_ptr(leaf, path->slots[0],
1285                                        struct btrfs_extent_ref_v0);
1286                        btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1287                }
1288#endif
1289                btrfs_mark_buffer_dirty(leaf);
1290        }
1291        return ret;
1292}
1293
1294static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1295                                          struct btrfs_path *path,
1296                                          struct btrfs_extent_inline_ref *iref)
1297{
1298        struct btrfs_key key;
1299        struct extent_buffer *leaf;
1300        struct btrfs_extent_data_ref *ref1;
1301        struct btrfs_shared_data_ref *ref2;
1302        u32 num_refs = 0;
1303
1304        leaf = path->nodes[0];
1305        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1306        if (iref) {
1307                if (btrfs_extent_inline_ref_type(leaf, iref) ==
1308                    BTRFS_EXTENT_DATA_REF_KEY) {
1309                        ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1310                        num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1311                } else {
1312                        ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1313                        num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1314                }
1315        } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1316                ref1 = btrfs_item_ptr(leaf, path->slots[0],
1317                                      struct btrfs_extent_data_ref);
1318                num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1319        } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1320                ref2 = btrfs_item_ptr(leaf, path->slots[0],
1321                                      struct btrfs_shared_data_ref);
1322                num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1323#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1324        } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1325                struct btrfs_extent_ref_v0 *ref0;
1326                ref0 = btrfs_item_ptr(leaf, path->slots[0],
1327                                      struct btrfs_extent_ref_v0);
1328                num_refs = btrfs_ref_count_v0(leaf, ref0);
1329#endif
1330        } else {
1331                WARN_ON(1);
1332        }
1333        return num_refs;
1334}
1335
1336static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1337                                          struct btrfs_root *root,
1338                                          struct btrfs_path *path,
1339                                          u64 bytenr, u64 parent,
1340                                          u64 root_objectid)
1341{
1342        struct btrfs_key key;
1343        int ret;
1344
1345        key.objectid = bytenr;
1346        if (parent) {
1347                key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1348                key.offset = parent;
1349        } else {
1350                key.type = BTRFS_TREE_BLOCK_REF_KEY;
1351                key.offset = root_objectid;
1352        }
1353
1354        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1355        if (ret > 0)
1356                ret = -ENOENT;
1357#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1358        if (ret == -ENOENT && parent) {
1359                btrfs_release_path(path);
1360                key.type = BTRFS_EXTENT_REF_V0_KEY;
1361                ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1362                if (ret > 0)
1363                        ret = -ENOENT;
1364        }
1365#endif
1366        return ret;
1367}
1368
1369static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1370                                          struct btrfs_root *root,
1371                                          struct btrfs_path *path,
1372                                          u64 bytenr, u64 parent,
1373                                          u64 root_objectid)
1374{
1375        struct btrfs_key key;
1376        int ret;
1377
1378        key.objectid = bytenr;
1379        if (parent) {
1380                key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1381                key.offset = parent;
1382        } else {
1383                key.type = BTRFS_TREE_BLOCK_REF_KEY;
1384                key.offset = root_objectid;
1385        }
1386
1387        ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1388        btrfs_release_path(path);
1389        return ret;
1390}
1391
1392static inline int extent_ref_type(u64 parent, u64 owner)
1393{
1394        int type;
1395        if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1396                if (parent > 0)
1397                        type = BTRFS_SHARED_BLOCK_REF_KEY;
1398                else
1399                        type = BTRFS_TREE_BLOCK_REF_KEY;
1400        } else {
1401                if (parent > 0)
1402                        type = BTRFS_SHARED_DATA_REF_KEY;
1403                else
1404                        type = BTRFS_EXTENT_DATA_REF_KEY;
1405        }
1406        return type;
1407}
1408
1409static int find_next_key(struct btrfs_path *path, int level,
1410                         struct btrfs_key *key)
1411
1412{
1413        for (; level < BTRFS_MAX_LEVEL; level++) {
1414                if (!path->nodes[level])
1415                        break;
1416                if (path->slots[level] + 1 >=
1417                    btrfs_header_nritems(path->nodes[level]))
1418                        continue;
1419                if (level == 0)
1420                        btrfs_item_key_to_cpu(path->nodes[level], key,
1421                                              path->slots[level] + 1);
1422                else
1423                        btrfs_node_key_to_cpu(path->nodes[level], key,
1424                                              path->slots[level] + 1);
1425                return 0;
1426        }
1427        return 1;
1428}
1429
1430/*
1431 * look for inline back ref. if back ref is found, *ref_ret is set
1432 * to the address of inline back ref, and 0 is returned.
1433 *
1434 * if back ref isn't found, *ref_ret is set to the address where it
1435 * should be inserted, and -ENOENT is returned.
1436 *
1437 * if insert is true and there are too many inline back refs, the path
1438 * points to the extent item, and -EAGAIN is returned.
1439 *
1440 * NOTE: inline back refs are ordered in the same way that back ref
1441 *       items in the tree are ordered.
1442 */
1443static noinline_for_stack
1444int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1445                                 struct btrfs_root *root,
1446                                 struct btrfs_path *path,
1447                                 struct btrfs_extent_inline_ref **ref_ret,
1448                                 u64 bytenr, u64 num_bytes,
1449                                 u64 parent, u64 root_objectid,
1450                                 u64 owner, u64 offset, int insert)
1451{
1452        struct btrfs_key key;
1453        struct extent_buffer *leaf;
1454        struct btrfs_extent_item *ei;
1455        struct btrfs_extent_inline_ref *iref;
1456        u64 flags;
1457        u64 item_size;
1458        unsigned long ptr;
1459        unsigned long end;
1460        int extra_size;
1461        int type;
1462        int want;
1463        int ret;
1464        int err = 0;
1465
1466        key.objectid = bytenr;
1467        key.type = BTRFS_EXTENT_ITEM_KEY;
1468        key.offset = num_bytes;
1469
1470        want = extent_ref_type(parent, owner);
1471        if (insert) {
1472                extra_size = btrfs_extent_inline_ref_size(want);
1473                path->keep_locks = 1;
1474        } else
1475                extra_size = -1;
1476        ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1477        if (ret < 0) {
1478                err = ret;
1479                goto out;
1480        }
1481        BUG_ON(ret);
1482
1483        leaf = path->nodes[0];
1484        item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1485#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1486        if (item_size < sizeof(*ei)) {
1487                if (!insert) {
1488                        err = -ENOENT;
1489                        goto out;
1490                }
1491                ret = convert_extent_item_v0(trans, root, path, owner,
1492                                             extra_size);
1493                if (ret < 0) {
1494                        err = ret;
1495                        goto out;
1496                }
1497                leaf = path->nodes[0];
1498                item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1499        }
1500#endif
1501        BUG_ON(item_size < sizeof(*ei));
1502
1503        ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1504        flags = btrfs_extent_flags(leaf, ei);
1505
1506        ptr = (unsigned long)(ei + 1);
1507        end = (unsigned long)ei + item_size;
1508
1509        if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1510                ptr += sizeof(struct btrfs_tree_block_info);
1511                BUG_ON(ptr > end);
1512        } else {
1513                BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1514        }
1515
1516        err = -ENOENT;
1517        while (1) {
1518                if (ptr >= end) {
1519                        WARN_ON(ptr > end);
1520                        break;
1521                }
1522                iref = (struct btrfs_extent_inline_ref *)ptr;
1523                type = btrfs_extent_inline_ref_type(leaf, iref);
1524                if (want < type)
1525                        break;
1526                if (want > type) {
1527                        ptr += btrfs_extent_inline_ref_size(type);
1528                        continue;
1529                }
1530
1531                if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1532                        struct btrfs_extent_data_ref *dref;
1533                        dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1534                        if (match_extent_data_ref(leaf, dref, root_objectid,
1535                                                  owner, offset)) {
1536                                err = 0;
1537                                break;
1538                        }
1539                        if (hash_extent_data_ref_item(leaf, dref) <
1540                            hash_extent_data_ref(root_objectid, owner, offset))
1541                                break;
1542                } else {
1543                        u64 ref_offset;
1544                        ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1545                        if (parent > 0) {
1546                                if (parent == ref_offset) {
1547                                        err = 0;
1548                                        break;
1549                                }
1550                                if (ref_offset < parent)
1551                                        break;
1552                        } else {
1553                                if (root_objectid == ref_offset) {
1554                                        err = 0;
1555                                        break;
1556                                }
1557                                if (ref_offset < root_objectid)
1558                                        break;
1559                        }
1560                }
1561                ptr += btrfs_extent_inline_ref_size(type);
1562        }
1563        if (err == -ENOENT && insert) {
1564                if (item_size + extra_size >=
1565                    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1566                        err = -EAGAIN;
1567                        goto out;
1568                }
1569                /*
1570                 * To add new inline back ref, we have to make sure
1571                 * there is no corresponding back ref item.
1572                 * For simplicity, we just do not add new inline back
1573                 * ref if there is any kind of item for this block
1574                 */
1575                if (find_next_key(path, 0, &key) == 0 &&
1576                    key.objectid == bytenr &&
1577                    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1578                        err = -EAGAIN;
1579                        goto out;
1580                }
1581        }
1582        *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1583out:
1584        if (insert) {
1585                path->keep_locks = 0;
1586                btrfs_unlock_up_safe(path, 1);
1587        }
1588        return err;
1589}
1590
1591/*
1592 * helper to add new inline back ref
1593 */
1594static noinline_for_stack
1595int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1596                                struct btrfs_root *root,
1597                                struct btrfs_path *path,
1598                                struct btrfs_extent_inline_ref *iref,
1599                                u64 parent, u64 root_objectid,
1600                                u64 owner, u64 offset, int refs_to_add,
1601                                struct btrfs_delayed_extent_op *extent_op)
1602{
1603        struct extent_buffer *leaf;
1604        struct btrfs_extent_item *ei;
1605        unsigned long ptr;
1606        unsigned long end;
1607        unsigned long item_offset;
1608        u64 refs;
1609        int size;
1610        int type;
1611        int ret;
1612
1613        leaf = path->nodes[0];
1614        ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1615        item_offset = (unsigned long)iref - (unsigned long)ei;
1616
1617        type = extent_ref_type(parent, owner);
1618        size = btrfs_extent_inline_ref_size(type);
1619
1620        ret = btrfs_extend_item(trans, root, path, size);
1621
1622        ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1623        refs = btrfs_extent_refs(leaf, ei);
1624        refs += refs_to_add;
1625        btrfs_set_extent_refs(leaf, ei, refs);
1626        if (extent_op)
1627                __run_delayed_extent_op(extent_op, leaf, ei);
1628
1629        ptr = (unsigned long)ei + item_offset;
1630        end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1631        if (ptr < end - size)
1632                memmove_extent_buffer(leaf, ptr + size, ptr,
1633                                      end - size - ptr);
1634
1635        iref = (struct btrfs_extent_inline_ref *)ptr;
1636        btrfs_set_extent_inline_ref_type(leaf, iref, type);
1637        if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1638                struct btrfs_extent_data_ref *dref;
1639                dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1640                btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1641                btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1642                btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1643                btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1644        } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1645                struct btrfs_shared_data_ref *sref;
1646                sref = (struct btrfs_shared_data_ref *)(iref + 1);
1647                btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1648                btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1649        } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1650                btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1651        } else {
1652                btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1653        }
1654        btrfs_mark_buffer_dirty(leaf);
1655        return 0;
1656}
1657
1658static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1659                                 struct btrfs_root *root,
1660                                 struct btrfs_path *path,
1661                                 struct btrfs_extent_inline_ref **ref_ret,
1662                                 u64 bytenr, u64 num_bytes, u64 parent,
1663                                 u64 root_objectid, u64 owner, u64 offset)
1664{
1665        int ret;
1666
1667        ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1668                                           bytenr, num_bytes, parent,
1669                                           root_objectid, owner, offset, 0);
1670        if (ret != -ENOENT)
1671                return ret;
1672
1673        btrfs_release_path(path);
1674        *ref_ret = NULL;
1675
1676        if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1677                ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1678                                            root_objectid);
1679        } else {
1680                ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1681                                             root_objectid, owner, offset);
1682        }
1683        return ret;
1684}
1685
1686/*
1687 * helper to update/remove inline back ref
1688 */
1689static noinline_for_stack
1690int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1691                                 struct btrfs_root *root,
1692                                 struct btrfs_path *path,
1693                                 struct btrfs_extent_inline_ref *iref,
1694                                 int refs_to_mod,
1695                                 struct btrfs_delayed_extent_op *extent_op)
1696{
1697        struct extent_buffer *leaf;
1698        struct btrfs_extent_item *ei;
1699        struct btrfs_extent_data_ref *dref = NULL;
1700        struct btrfs_shared_data_ref *sref = NULL;
1701        unsigned long ptr;
1702        unsigned long end;
1703        u32 item_size;
1704        int size;
1705        int type;
1706        int ret;
1707        u64 refs;
1708
1709        leaf = path->nodes[0];
1710        ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1711        refs = btrfs_extent_refs(leaf, ei);
1712        WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1713        refs += refs_to_mod;
1714        btrfs_set_extent_refs(leaf, ei, refs);
1715        if (extent_op)
1716                __run_delayed_extent_op(extent_op, leaf, ei);
1717
1718        type = btrfs_extent_inline_ref_type(leaf, iref);
1719
1720        if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1721                dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1722                refs = btrfs_extent_data_ref_count(leaf, dref);
1723        } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1724                sref = (struct btrfs_shared_data_ref *)(iref + 1);
1725                refs = btrfs_shared_data_ref_count(leaf, sref);
1726        } else {
1727                refs = 1;
1728                BUG_ON(refs_to_mod != -1);
1729        }
1730
1731        BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1732        refs += refs_to_mod;
1733
1734        if (refs > 0) {
1735                if (type == BTRFS_EXTENT_DATA_REF_KEY)
1736                        btrfs_set_extent_data_ref_count(leaf, dref, refs);
1737                else
1738                        btrfs_set_shared_data_ref_count(leaf, sref, refs);
1739        } else {
1740                size =  btrfs_extent_inline_ref_size(type);
1741                item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1742                ptr = (unsigned long)iref;
1743                end = (unsigned long)ei + item_size;
1744                if (ptr + size < end)
1745                        memmove_extent_buffer(leaf, ptr, ptr + size,
1746                                              end - ptr - size);
1747                item_size -= size;
1748                ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1749        }
1750        btrfs_mark_buffer_dirty(leaf);
1751        return 0;
1752}
1753
1754static noinline_for_stack
1755int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1756                                 struct btrfs_root *root,
1757                                 struct btrfs_path *path,
1758                                 u64 bytenr, u64 num_bytes, u64 parent,
1759                                 u64 root_objectid, u64 owner,
1760                                 u64 offset, int refs_to_add,
1761                                 struct btrfs_delayed_extent_op *extent_op)
1762{
1763        struct btrfs_extent_inline_ref *iref;
1764        int ret;
1765
1766        ret = lookup_inline_extent_backref(trans, root, path, &iref,
1767                                           bytenr, num_bytes, parent,
1768                                           root_objectid, owner, offset, 1);
1769        if (ret == 0) {
1770                BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1771                ret = update_inline_extent_backref(trans, root, path, iref,
1772                                                   refs_to_add, extent_op);
1773        } else if (ret == -ENOENT) {
1774                ret = setup_inline_extent_backref(trans, root, path, iref,
1775                                                  parent, root_objectid,
1776                                                  owner, offset, refs_to_add,
1777                                                  extent_op);
1778        }
1779        return ret;
1780}
1781
1782static int insert_extent_backref(struct btrfs_trans_handle *trans,
1783                                 struct btrfs_root *root,
1784                                 struct btrfs_path *path,
1785                                 u64 bytenr, u64 parent, u64 root_objectid,
1786                                 u64 owner, u64 offset, int refs_to_add)
1787{
1788        int ret;
1789        if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1790                BUG_ON(refs_to_add != 1);
1791                ret = insert_tree_block_ref(trans, root, path, bytenr,
1792                                            parent, root_objectid);
1793        } else {
1794                ret = insert_extent_data_ref(trans, root, path, bytenr,
1795                                             parent, root_objectid,
1796                                             owner, offset, refs_to_add);
1797        }
1798        return ret;
1799}
1800
1801static int remove_extent_backref(struct btrfs_trans_handle *trans,
1802                                 struct btrfs_root *root,
1803                                 struct btrfs_path *path,
1804                                 struct btrfs_extent_inline_ref *iref,
1805                                 int refs_to_drop, int is_data)
1806{
1807        int ret;
1808
1809        BUG_ON(!is_data && refs_to_drop != 1);
1810        if (iref) {
1811                ret = update_inline_extent_backref(trans, root, path, iref,
1812                                                   -refs_to_drop, NULL);
1813        } else if (is_data) {
1814                ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1815        } else {
1816                ret = btrfs_del_item(trans, root, path);
1817        }
1818        return ret;
1819}
1820
1821static int btrfs_issue_discard(struct block_device *bdev,
1822                                u64 start, u64 len)
1823{
1824        return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1825}
1826
1827static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1828                                u64 num_bytes, u64 *actual_bytes)
1829{
1830        int ret;
1831        u64 discarded_bytes = 0;
1832        struct btrfs_bio *bbio = NULL;
1833
1834
1835        /* Tell the block device(s) that the sectors can be discarded */
1836        ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1837                              bytenr, &num_bytes, &bbio, 0);
1838        if (!ret) {
1839                struct btrfs_bio_stripe *stripe = bbio->stripes;
1840                int i;
1841
1842
1843                for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1844                        if (!stripe->dev->can_discard)
1845                                continue;
1846
1847                        ret = btrfs_issue_discard(stripe->dev->bdev,
1848                                                  stripe->physical,
1849                                                  stripe->length);
1850                        if (!ret)
1851                                discarded_bytes += stripe->length;
1852                        else if (ret != -EOPNOTSUPP)
1853                                break;
1854
1855                        /*
1856                         * Just in case we get back EOPNOTSUPP for some reason,
1857                         * just ignore the return value so we don't screw up
1858                         * people calling discard_extent.
1859                         */
1860                        ret = 0;
1861                }
1862                kfree(bbio);
1863        }
1864
1865        if (actual_bytes)
1866                *actual_bytes = discarded_bytes;
1867
1868
1869        return ret;
1870}
1871
1872int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1873                         struct btrfs_root *root,
1874                         u64 bytenr, u64 num_bytes, u64 parent,
1875                         u64 root_objectid, u64 owner, u64 offset, int for_cow)
1876{
1877        int ret;
1878        struct btrfs_fs_info *fs_info = root->fs_info;
1879
1880        BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1881               root_objectid == BTRFS_TREE_LOG_OBJECTID);
1882
1883        if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1884                ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1885                                        num_bytes,
1886                                        parent, root_objectid, (int)owner,
1887                                        BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1888        } else {
1889                ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1890                                        num_bytes,
1891                                        parent, root_objectid, owner, offset,
1892                                        BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1893        }
1894        return ret;
1895}
1896
1897static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1898                                  struct btrfs_root *root,
1899                                  u64 bytenr, u64 num_bytes,
1900                                  u64 parent, u64 root_objectid,
1901                                  u64 owner, u64 offset, int refs_to_add,
1902                                  struct btrfs_delayed_extent_op *extent_op)
1903{
1904        struct btrfs_path *path;
1905        struct extent_buffer *leaf;
1906        struct btrfs_extent_item *item;
1907        u64 refs;
1908        int ret;
1909        int err = 0;
1910
1911        path = btrfs_alloc_path();
1912        if (!path)
1913                return -ENOMEM;
1914
1915        path->reada = 1;
1916        path->leave_spinning = 1;
1917        /* this will setup the path even if it fails to insert the back ref */
1918        ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1919                                           path, bytenr, num_bytes, parent,
1920                                           root_objectid, owner, offset,
1921                                           refs_to_add, extent_op);
1922        if (ret == 0)
1923                goto out;
1924
1925        if (ret != -EAGAIN) {
1926                err = ret;
1927                goto out;
1928        }
1929
1930        leaf = path->nodes[0];
1931        item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1932        refs = btrfs_extent_refs(leaf, item);
1933        btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1934        if (extent_op)
1935                __run_delayed_extent_op(extent_op, leaf, item);
1936
1937        btrfs_mark_buffer_dirty(leaf);
1938        btrfs_release_path(path);
1939
1940        path->reada = 1;
1941        path->leave_spinning = 1;
1942
1943        /* now insert the actual backref */
1944        ret = insert_extent_backref(trans, root->fs_info->extent_root,
1945                                    path, bytenr, parent, root_objectid,
1946                                    owner, offset, refs_to_add);
1947        BUG_ON(ret);
1948out:
1949        btrfs_free_path(path);
1950        return err;
1951}
1952
1953static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1954                                struct btrfs_root *root,
1955                                struct btrfs_delayed_ref_node *node,
1956                                struct btrfs_delayed_extent_op *extent_op,
1957                                int insert_reserved)
1958{
1959        int ret = 0;
1960        struct btrfs_delayed_data_ref *ref;
1961        struct btrfs_key ins;
1962        u64 parent = 0;
1963        u64 ref_root = 0;
1964        u64 flags = 0;
1965
1966        ins.objectid = node->bytenr;
1967        ins.offset = node->num_bytes;
1968        ins.type = BTRFS_EXTENT_ITEM_KEY;
1969
1970        ref = btrfs_delayed_node_to_data_ref(node);
1971        if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1972                parent = ref->parent;
1973        else
1974                ref_root = ref->root;
1975
1976        if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1977                if (extent_op) {
1978                        BUG_ON(extent_op->update_key);
1979                        flags |= extent_op->flags_to_set;
1980                }
1981                ret = alloc_reserved_file_extent(trans, root,
1982                                                 parent, ref_root, flags,
1983                                                 ref->objectid, ref->offset,
1984                                                 &ins, node->ref_mod);
1985        } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1986                ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1987                                             node->num_bytes, parent,
1988                                             ref_root, ref->objectid,
1989                                             ref->offset, node->ref_mod,
1990                                             extent_op);
1991        } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1992                ret = __btrfs_free_extent(trans, root, node->bytenr,
1993                                          node->num_bytes, parent,
1994                                          ref_root, ref->objectid,
1995                                          ref->offset, node->ref_mod,
1996                                          extent_op);
1997        } else {
1998                BUG();
1999        }
2000        return ret;
2001}
2002
2003static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2004                                    struct extent_buffer *leaf,
2005                                    struct btrfs_extent_item *ei)
2006{
2007        u64 flags = btrfs_extent_flags(leaf, ei);
2008        if (extent_op->update_flags) {
2009                flags |= extent_op->flags_to_set;
2010                btrfs_set_extent_flags(leaf, ei, flags);
2011        }
2012
2013        if (extent_op->update_key) {
2014                struct btrfs_tree_block_info *bi;
2015                BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2016                bi = (struct btrfs_tree_block_info *)(ei + 1);
2017                btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2018        }
2019}
2020
2021static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2022                                 struct btrfs_root *root,
2023                                 struct btrfs_delayed_ref_node *node,
2024                                 struct btrfs_delayed_extent_op *extent_op)
2025{
2026        struct btrfs_key key;
2027        struct btrfs_path *path;
2028        struct btrfs_extent_item *ei;
2029        struct extent_buffer *leaf;
2030        u32 item_size;
2031        int ret;
2032        int err = 0;
2033
2034        path = btrfs_alloc_path();
2035        if (!path)
2036                return -ENOMEM;
2037
2038        key.objectid = node->bytenr;
2039        key.type = BTRFS_EXTENT_ITEM_KEY;
2040        key.offset = node->num_bytes;
2041
2042        path->reada = 1;
2043        path->leave_spinning = 1;
2044        ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2045                                path, 0, 1);
2046        if (ret < 0) {
2047                err = ret;
2048                goto out;
2049        }
2050        if (ret > 0) {
2051                err = -EIO;
2052                goto out;
2053        }
2054
2055        leaf = path->nodes[0];
2056        item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2057#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2058        if (item_size < sizeof(*ei)) {
2059                ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2060                                             path, (u64)-1, 0);
2061                if (ret < 0) {
2062                        err = ret;
2063                        goto out;
2064                }
2065                leaf = path->nodes[0];
2066                item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2067        }
2068#endif
2069        BUG_ON(item_size < sizeof(*ei));
2070        ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2071        __run_delayed_extent_op(extent_op, leaf, ei);
2072
2073        btrfs_mark_buffer_dirty(leaf);
2074out:
2075        btrfs_free_path(path);
2076        return err;
2077}
2078
2079static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2080                                struct btrfs_root *root,
2081                                struct btrfs_delayed_ref_node *node,
2082                                struct btrfs_delayed_extent_op *extent_op,
2083                                int insert_reserved)
2084{
2085        int ret = 0;
2086        struct btrfs_delayed_tree_ref *ref;
2087        struct btrfs_key ins;
2088        u64 parent = 0;
2089        u64 ref_root = 0;
2090
2091        ins.objectid = node->bytenr;
2092        ins.offset = node->num_bytes;
2093        ins.type = BTRFS_EXTENT_ITEM_KEY;
2094
2095        ref = btrfs_delayed_node_to_tree_ref(node);
2096        if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2097                parent = ref->parent;
2098        else
2099                ref_root = ref->root;
2100
2101        BUG_ON(node->ref_mod != 1);
2102        if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2103                BUG_ON(!extent_op || !extent_op->update_flags ||
2104                       !extent_op->update_key);
2105                ret = alloc_reserved_tree_block(trans, root,
2106                                                parent, ref_root,
2107                                                extent_op->flags_to_set,
2108                                                &extent_op->key,
2109                                                ref->level, &ins);
2110        } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2111                ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2112                                             node->num_bytes, parent, ref_root,
2113                                             ref->level, 0, 1, extent_op);
2114        } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2115                ret = __btrfs_free_extent(trans, root, node->bytenr,
2116                                          node->num_bytes, parent, ref_root,
2117                                          ref->level, 0, 1, extent_op);
2118        } else {
2119                BUG();
2120        }
2121        return ret;
2122}
2123
2124/* helper function to actually process a single delayed ref entry */
2125static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2126                               struct btrfs_root *root,
2127                               struct btrfs_delayed_ref_node *node,
2128                               struct btrfs_delayed_extent_op *extent_op,
2129                               int insert_reserved)
2130{
2131        int ret;
2132        if (btrfs_delayed_ref_is_head(node)) {
2133                struct btrfs_delayed_ref_head *head;
2134                /*
2135                 * we've hit the end of the chain and we were supposed
2136                 * to insert this extent into the tree.  But, it got
2137                 * deleted before we ever needed to insert it, so all
2138                 * we have to do is clean up the accounting
2139                 */
2140                BUG_ON(extent_op);
2141                head = btrfs_delayed_node_to_head(node);
2142                if (insert_reserved) {
2143                        btrfs_pin_extent(root, node->bytenr,
2144                                         node->num_bytes, 1);
2145                        if (head->is_data) {
2146                                ret = btrfs_del_csums(trans, root,
2147                                                      node->bytenr,
2148                                                      node->num_bytes);
2149                                BUG_ON(ret);
2150                        }
2151                }
2152                mutex_unlock(&head->mutex);
2153                return 0;
2154        }
2155
2156        if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2157            node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2158                ret = run_delayed_tree_ref(trans, root, node, extent_op,
2159                                           insert_reserved);
2160        else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2161                 node->type == BTRFS_SHARED_DATA_REF_KEY)
2162                ret = run_delayed_data_ref(trans, root, node, extent_op,
2163                                           insert_reserved);
2164        else
2165                BUG();
2166        return ret;
2167}
2168
2169static noinline struct btrfs_delayed_ref_node *
2170select_delayed_ref(struct btrfs_delayed_ref_head *head)
2171{
2172        struct rb_node *node;
2173        struct btrfs_delayed_ref_node *ref;
2174        int action = BTRFS_ADD_DELAYED_REF;
2175again:
2176        /*
2177         * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2178         * this prevents ref count from going down to zero when
2179         * there still are pending delayed ref.
2180         */
2181        node = rb_prev(&head->node.rb_node);
2182        while (1) {
2183                if (!node)
2184                        break;
2185                ref = rb_entry(node, struct btrfs_delayed_ref_node,
2186                                rb_node);
2187                if (ref->bytenr != head->node.bytenr)
2188                        break;
2189                if (ref->action == action)
2190                        return ref;
2191                node = rb_prev(node);
2192        }
2193        if (action == BTRFS_ADD_DELAYED_REF) {
2194                action = BTRFS_DROP_DELAYED_REF;
2195                goto again;
2196        }
2197        return NULL;
2198}
2199
2200static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2201                                       struct btrfs_root *root,
2202                                       struct list_head *cluster)
2203{
2204        struct btrfs_delayed_ref_root *delayed_refs;
2205        struct btrfs_delayed_ref_node *ref;
2206        struct btrfs_delayed_ref_head *locked_ref = NULL;
2207        struct btrfs_delayed_extent_op *extent_op;
2208        int ret;
2209        int count = 0;
2210        int must_insert_reserved = 0;
2211
2212        delayed_refs = &trans->transaction->delayed_refs;
2213        while (1) {
2214                if (!locked_ref) {
2215                        /* pick a new head ref from the cluster list */
2216                        if (list_empty(cluster))
2217                                break;
2218
2219                        locked_ref = list_entry(cluster->next,
2220                                     struct btrfs_delayed_ref_head, cluster);
2221
2222                        /* grab the lock that says we are going to process
2223                         * all the refs for this head */
2224                        ret = btrfs_delayed_ref_lock(trans, locked_ref);
2225
2226                        /*
2227                         * we may have dropped the spin lock to get the head
2228                         * mutex lock, and that might have given someone else
2229                         * time to free the head.  If that's true, it has been
2230                         * removed from our list and we can move on.
2231                         */
2232                        if (ret == -EAGAIN) {
2233                                locked_ref = NULL;
2234                                count++;
2235                                continue;
2236                        }
2237                }
2238
2239                /*
2240                 * locked_ref is the head node, so we have to go one
2241                 * node back for any delayed ref updates
2242                 */
2243                ref = select_delayed_ref(locked_ref);
2244
2245                if (ref && ref->seq &&
2246                    btrfs_check_delayed_seq(delayed_refs, ref->seq)) {
2247                        /*
2248                         * there are still refs with lower seq numbers in the
2249                         * process of being added. Don't run this ref yet.
2250                         */
2251                        list_del_init(&locked_ref->cluster);
2252                        mutex_unlock(&locked_ref->mutex);
2253                        locked_ref = NULL;
2254                        delayed_refs->num_heads_ready++;
2255                        spin_unlock(&delayed_refs->lock);
2256                        cond_resched();
2257                        spin_lock(&delayed_refs->lock);
2258                        continue;
2259                }
2260
2261                /*
2262                 * record the must insert reserved flag before we
2263                 * drop the spin lock.
2264                 */
2265                must_insert_reserved = locked_ref->must_insert_reserved;
2266                locked_ref->must_insert_reserved = 0;
2267
2268                extent_op = locked_ref->extent_op;
2269                locked_ref->extent_op = NULL;
2270
2271                if (!ref) {
2272                        /* All delayed refs have been processed, Go ahead
2273                         * and send the head node to run_one_delayed_ref,
2274                         * so that any accounting fixes can happen
2275                         */
2276                        ref = &locked_ref->node;
2277
2278                        if (extent_op && must_insert_reserved) {
2279                                kfree(extent_op);
2280                                extent_op = NULL;
2281                        }
2282
2283                        if (extent_op) {
2284                                spin_unlock(&delayed_refs->lock);
2285
2286                                ret = run_delayed_extent_op(trans, root,
2287                                                            ref, extent_op);
2288                                BUG_ON(ret);
2289                                kfree(extent_op);
2290
2291                                goto next;
2292                        }
2293
2294                        list_del_init(&locked_ref->cluster);
2295                        locked_ref = NULL;
2296                }
2297
2298                ref->in_tree = 0;
2299                rb_erase(&ref->rb_node, &delayed_refs->root);
2300                delayed_refs->num_entries--;
2301                /*
2302                 * we modified num_entries, but as we're currently running
2303                 * delayed refs, skip
2304                 *     wake_up(&delayed_refs->seq_wait);
2305                 * here.
2306                 */
2307                spin_unlock(&delayed_refs->lock);
2308
2309                ret = run_one_delayed_ref(trans, root, ref, extent_op,
2310                                          must_insert_reserved);
2311                BUG_ON(ret);
2312
2313                btrfs_put_delayed_ref(ref);
2314                kfree(extent_op);
2315                count++;
2316next:
2317                do_chunk_alloc(trans, root->fs_info->extent_root,
2318                               2 * 1024 * 1024,
2319                               btrfs_get_alloc_profile(root, 0),
2320                               CHUNK_ALLOC_NO_FORCE);
2321                cond_resched();
2322                spin_lock(&delayed_refs->lock);
2323        }
2324        return count;
2325}
2326
2327
2328static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
2329                        unsigned long num_refs)
2330{
2331        struct list_head *first_seq = delayed_refs->seq_head.next;
2332
2333        spin_unlock(&delayed_refs->lock);
2334        pr_debug("waiting for more refs (num %ld, first %p)\n",
2335                 num_refs, first_seq);
2336        wait_event(delayed_refs->seq_wait,
2337                   num_refs != delayed_refs->num_entries ||
2338                   delayed_refs->seq_head.next != first_seq);
2339        pr_debug("done waiting for more refs (num %ld, first %p)\n",
2340                 delayed_refs->num_entries, delayed_refs->seq_head.next);
2341        spin_lock(&delayed_refs->lock);
2342}
2343
2344/*
2345 * this starts processing the delayed reference count updates and
2346 * extent insertions we have queued up so far.  count can be
2347 * 0, which means to process everything in the tree at the start
2348 * of the run (but not newly added entries), or it can be some target
2349 * number you'd like to process.
2350 */
2351int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2352                           struct btrfs_root *root, unsigned long count)
2353{
2354        struct rb_node *node;
2355        struct btrfs_delayed_ref_root *delayed_refs;
2356        struct btrfs_delayed_ref_node *ref;
2357        struct list_head cluster;
2358        int ret;
2359        u64 delayed_start;
2360        int run_all = count == (unsigned long)-1;
2361        int run_most = 0;
2362        unsigned long num_refs = 0;
2363        int consider_waiting;
2364
2365        if (root == root->fs_info->extent_root)
2366                root = root->fs_info->tree_root;
2367
2368        do_chunk_alloc(trans, root->fs_info->extent_root,
2369                       2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
2370                       CHUNK_ALLOC_NO_FORCE);
2371
2372        delayed_refs = &trans->transaction->delayed_refs;
2373        INIT_LIST_HEAD(&cluster);
2374again:
2375        consider_waiting = 0;
2376        spin_lock(&delayed_refs->lock);
2377        if (count == 0) {
2378                count = delayed_refs->num_entries * 2;
2379                run_most = 1;
2380        }
2381        while (1) {
2382                if (!(run_all || run_most) &&
2383                    delayed_refs->num_heads_ready < 64)
2384                        break;
2385
2386                /*
2387                 * go find something we can process in the rbtree.  We start at
2388                 * the beginning of the tree, and then build a cluster
2389                 * of refs to process starting at the first one we are able to
2390                 * lock
2391                 */
2392                delayed_start = delayed_refs->run_delayed_start;
2393                ret = btrfs_find_ref_cluster(trans, &cluster,
2394                                             delayed_refs->run_delayed_start);
2395                if (ret)
2396                        break;
2397
2398                if (delayed_start >= delayed_refs->run_delayed_start) {
2399                        if (consider_waiting == 0) {
2400                                /*
2401                                 * btrfs_find_ref_cluster looped. let's do one
2402                                 * more cycle. if we don't run any delayed ref
2403                                 * during that cycle (because we can't because
2404                                 * all of them are blocked) and if the number of
2405                                 * refs doesn't change, we avoid busy waiting.
2406                                 */
2407                                consider_waiting = 1;
2408                                num_refs = delayed_refs->num_entries;
2409                        } else {
2410                                wait_for_more_refs(delayed_refs, num_refs);
2411                                /*
2412                                 * after waiting, things have changed. we
2413                                 * dropped the lock and someone else might have
2414                                 * run some refs, built new clusters and so on.
2415                                 * therefore, we restart staleness detection.
2416                                 */
2417                                consider_waiting = 0;
2418                        }
2419                }
2420
2421                ret = run_clustered_refs(trans, root, &cluster);
2422                BUG_ON(ret < 0);
2423
2424                count -= min_t(unsigned long, ret, count);
2425
2426                if (count == 0)
2427                        break;
2428
2429                if (ret || delayed_refs->run_delayed_start == 0) {
2430                        /* refs were run, let's reset staleness detection */
2431                        consider_waiting = 0;
2432                }
2433        }
2434
2435        if (run_all) {
2436                node = rb_first(&delayed_refs->root);
2437                if (!node)
2438                        goto out;
2439                count = (unsigned long)-1;
2440
2441                while (node) {
2442                        ref = rb_entry(node, struct btrfs_delayed_ref_node,
2443                                       rb_node);
2444                        if (btrfs_delayed_ref_is_head(ref)) {
2445                                struct btrfs_delayed_ref_head *head;
2446
2447                                head = btrfs_delayed_node_to_head(ref);
2448                                atomic_inc(&ref->refs);
2449
2450                                spin_unlock(&delayed_refs->lock);
2451                                /*
2452                                 * Mutex was contended, block until it's
2453                                 * released and try again
2454                                 */
2455                                mutex_lock(&head->mutex);
2456                                mutex_unlock(&head->mutex);
2457
2458                                btrfs_put_delayed_ref(ref);
2459                                cond_resched();
2460                                goto again;
2461                        }
2462                        node = rb_next(node);
2463                }
2464                spin_unlock(&delayed_refs->lock);
2465                schedule_timeout(1);
2466                goto again;
2467        }
2468out:
2469        spin_unlock(&delayed_refs->lock);
2470        return 0;
2471}
2472
2473int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2474                                struct btrfs_root *root,
2475                                u64 bytenr, u64 num_bytes, u64 flags,
2476                                int is_data)
2477{
2478        struct btrfs_delayed_extent_op *extent_op;
2479        int ret;
2480
2481        extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2482        if (!extent_op)
2483                return -ENOMEM;
2484
2485        extent_op->flags_to_set = flags;
2486        extent_op->update_flags = 1;
2487        extent_op->update_key = 0;
2488        extent_op->is_data = is_data ? 1 : 0;
2489
2490        ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2491                                          num_bytes, extent_op);
2492        if (ret)
2493                kfree(extent_op);
2494        return ret;
2495}
2496
2497static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2498                                      struct btrfs_root *root,
2499                                      struct btrfs_path *path,
2500                                      u64 objectid, u64 offset, u64 bytenr)
2501{
2502        struct btrfs_delayed_ref_head *head;
2503        struct btrfs_delayed_ref_node *ref;
2504        struct btrfs_delayed_data_ref *data_ref;
2505        struct btrfs_delayed_ref_root *delayed_refs;
2506        struct rb_node *node;
2507        int ret = 0;
2508
2509        ret = -ENOENT;
2510        delayed_refs = &trans->transaction->delayed_refs;
2511        spin_lock(&delayed_refs->lock);
2512        head = btrfs_find_delayed_ref_head(trans, bytenr);
2513        if (!head)
2514                goto out;
2515
2516        if (!mutex_trylock(&head->mutex)) {
2517                atomic_inc(&head->node.refs);
2518                spin_unlock(&delayed_refs->lock);
2519
2520                btrfs_release_path(path);
2521
2522                /*
2523                 * Mutex was contended, block until it's released and let
2524                 * caller try again
2525                 */
2526                mutex_lock(&head->mutex);
2527                mutex_unlock(&head->mutex);
2528                btrfs_put_delayed_ref(&head->node);
2529                return -EAGAIN;
2530        }
2531
2532        node = rb_prev(&head->node.rb_node);
2533        if (!node)
2534                goto out_unlock;
2535
2536        ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2537
2538        if (ref->bytenr != bytenr)
2539                goto out_unlock;
2540
2541        ret = 1;
2542        if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2543                goto out_unlock;
2544
2545        data_ref = btrfs_delayed_node_to_data_ref(ref);
2546
2547        node = rb_prev(node);
2548        if (node) {
2549                ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2550                if (ref->bytenr == bytenr)
2551                        goto out_unlock;
2552        }
2553
2554        if (data_ref->root != root->root_key.objectid ||
2555            data_ref->objectid != objectid || data_ref->offset != offset)
2556                goto out_unlock;
2557
2558        ret = 0;
2559out_unlock:
2560        mutex_unlock(&head->mutex);
2561out:
2562        spin_unlock(&delayed_refs->lock);
2563        return ret;
2564}
2565
2566static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2567                                        struct btrfs_root *root,
2568                                        struct btrfs_path *path,
2569                                        u64 objectid, u64 offset, u64 bytenr)
2570{
2571        struct btrfs_root *extent_root = root->fs_info->extent_root;
2572        struct extent_buffer *leaf;
2573        struct btrfs_extent_data_ref *ref;
2574        struct btrfs_extent_inline_ref *iref;
2575        struct btrfs_extent_item *ei;
2576        struct btrfs_key key;
2577        u32 item_size;
2578        int ret;
2579
2580        key.objectid = bytenr;
2581        key.offset = (u64)-1;
2582        key.type = BTRFS_EXTENT_ITEM_KEY;
2583
2584        ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2585        if (ret < 0)
2586                goto out;
2587        BUG_ON(ret == 0);
2588
2589        ret = -ENOENT;
2590        if (path->slots[0] == 0)
2591                goto out;
2592
2593        path->slots[0]--;
2594        leaf = path->nodes[0];
2595        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2596
2597        if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2598                goto out;
2599
2600        ret = 1;
2601        item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2602#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2603        if (item_size < sizeof(*ei)) {
2604                WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2605                goto out;
2606        }
2607#endif
2608        ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2609
2610        if (item_size != sizeof(*ei) +
2611            btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2612                goto out;
2613
2614        if (btrfs_extent_generation(leaf, ei) <=
2615            btrfs_root_last_snapshot(&root->root_item))
2616                goto out;
2617
2618        iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2619        if (btrfs_extent_inline_ref_type(leaf, iref) !=
2620            BTRFS_EXTENT_DATA_REF_KEY)
2621                goto out;
2622
2623        ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2624        if (btrfs_extent_refs(leaf, ei) !=
2625            btrfs_extent_data_ref_count(leaf, ref) ||
2626            btrfs_extent_data_ref_root(leaf, ref) !=
2627            root->root_key.objectid ||
2628            btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2629            btrfs_extent_data_ref_offset(leaf, ref) != offset)
2630                goto out;
2631
2632        ret = 0;
2633out:
2634        return ret;
2635}
2636
2637int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2638                          struct btrfs_root *root,
2639                          u64 objectid, u64 offset, u64 bytenr)
2640{
2641        struct btrfs_path *path;
2642        int ret;
2643        int ret2;
2644
2645        path = btrfs_alloc_path();
2646        if (!path)
2647                return -ENOENT;
2648
2649        do {
2650                ret = check_committed_ref(trans, root, path, objectid,
2651                                          offset, bytenr);
2652                if (ret && ret != -ENOENT)
2653                        goto out;
2654
2655                ret2 = check_delayed_ref(trans, root, path, objectid,
2656                                         offset, bytenr);
2657        } while (ret2 == -EAGAIN);
2658
2659        if (ret2 && ret2 != -ENOENT) {
2660                ret = ret2;
2661                goto out;
2662        }
2663
2664        if (ret != -ENOENT || ret2 != -ENOENT)
2665                ret = 0;
2666out:
2667        btrfs_free_path(path);
2668        if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2669                WARN_ON(ret > 0);
2670        return ret;
2671}
2672
2673static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2674                           struct btrfs_root *root,
2675                           struct extent_buffer *buf,
2676                           int full_backref, int inc, int for_cow)
2677{
2678        u64 bytenr;
2679        u64 num_bytes;
2680        u64 parent;
2681        u64 ref_root;
2682        u32 nritems;
2683        struct btrfs_key key;
2684        struct btrfs_file_extent_item *fi;
2685        int i;
2686        int level;
2687        int ret = 0;
2688        int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2689                            u64, u64, u64, u64, u64, u64, int);
2690
2691        ref_root = btrfs_header_owner(buf);
2692        nritems = btrfs_header_nritems(buf);
2693        level = btrfs_header_level(buf);
2694
2695        if (!root->ref_cows && level == 0)
2696                return 0;
2697
2698        if (inc)
2699                process_func = btrfs_inc_extent_ref;
2700        else
2701                process_func = btrfs_free_extent;
2702
2703        if (full_backref)
2704                parent = buf->start;
2705        else
2706                parent = 0;
2707
2708        for (i = 0; i < nritems; i++) {
2709                if (level == 0) {
2710                        btrfs_item_key_to_cpu(buf, &key, i);
2711                        if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2712                                continue;
2713                        fi = btrfs_item_ptr(buf, i,
2714                                            struct btrfs_file_extent_item);
2715                        if (btrfs_file_extent_type(buf, fi) ==
2716                            BTRFS_FILE_EXTENT_INLINE)
2717                                continue;
2718                        bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2719                        if (bytenr == 0)
2720                                continue;
2721
2722                        num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2723                        key.offset -= btrfs_file_extent_offset(buf, fi);
2724                        ret = process_func(trans, root, bytenr, num_bytes,
2725                                           parent, ref_root, key.objectid,
2726                                           key.offset, for_cow);
2727                        if (ret)
2728                                goto fail;
2729                } else {
2730                        bytenr = btrfs_node_blockptr(buf, i);
2731                        num_bytes = btrfs_level_size(root, level - 1);
2732                        ret = process_func(trans, root, bytenr, num_bytes,
2733                                           parent, ref_root, level - 1, 0,
2734                                           for_cow);
2735                        if (ret)
2736                                goto fail;
2737                }
2738        }
2739        return 0;
2740fail:
2741        BUG();
2742        return ret;
2743}
2744
2745int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2746                  struct extent_buffer *buf, int full_backref, int for_cow)
2747{
2748        return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2749}
2750
2751int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2752                  struct extent_buffer *buf, int full_backref, int for_cow)
2753{
2754        return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2755}
2756
2757static int write_one_cache_group(struct btrfs_trans_handle *trans,
2758                                 struct btrfs_root *root,
2759                                 struct btrfs_path *path,
2760                                 struct btrfs_block_group_cache *cache)
2761{
2762        int ret;
2763        struct btrfs_root *extent_root = root->fs_info->extent_root;
2764        unsigned long bi;
2765        struct extent_buffer *leaf;
2766
2767        ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2768        if (ret < 0)
2769                goto fail;
2770        BUG_ON(ret);
2771
2772        leaf = path->nodes[0];
2773        bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2774        write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2775        btrfs_mark_buffer_dirty(leaf);
2776        btrfs_release_path(path);
2777fail:
2778        if (ret)
2779                return ret;
2780        return 0;
2781
2782}
2783
2784static struct btrfs_block_group_cache *
2785next_block_group(struct btrfs_root *root,
2786                 struct btrfs_block_group_cache *cache)
2787{
2788        struct rb_node *node;
2789        spin_lock(&root->fs_info->block_group_cache_lock);
2790        node = rb_next(&cache->cache_node);
2791        btrfs_put_block_group(cache);
2792        if (node) {
2793                cache = rb_entry(node, struct btrfs_block_group_cache,
2794                                 cache_node);
2795                btrfs_get_block_group(cache);
2796        } else
2797                cache = NULL;
2798        spin_unlock(&root->fs_info->block_group_cache_lock);
2799        return cache;
2800}
2801
2802static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2803                            struct btrfs_trans_handle *trans,
2804                            struct btrfs_path *path)
2805{
2806        struct btrfs_root *root = block_group->fs_info->tree_root;
2807        struct inode *inode = NULL;
2808        u64 alloc_hint = 0;
2809        int dcs = BTRFS_DC_ERROR;
2810        int num_pages = 0;
2811        int retries = 0;
2812        int ret = 0;
2813
2814        /*
2815         * If this block group is smaller than 100 megs don't bother caching the
2816         * block group.
2817         */
2818        if (block_group->key.offset < (100 * 1024 * 1024)) {
2819                spin_lock(&block_group->lock);
2820                block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2821                spin_unlock(&block_group->lock);
2822                return 0;
2823        }
2824
2825again:
2826        inode = lookup_free_space_inode(root, block_group, path);
2827        if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2828                ret = PTR_ERR(inode);
2829                btrfs_release_path(path);
2830                goto out;
2831        }
2832
2833        if (IS_ERR(inode)) {
2834                BUG_ON(retries);
2835                retries++;
2836
2837                if (block_group->ro)
2838                        goto out_free;
2839
2840                ret = create_free_space_inode(root, trans, block_group, path);
2841                if (ret)
2842                        goto out_free;
2843                goto again;
2844        }
2845
2846        /* We've already setup this transaction, go ahead and exit */
2847        if (block_group->cache_generation == trans->transid &&
2848            i_size_read(inode)) {
2849                dcs = BTRFS_DC_SETUP;
2850                goto out_put;
2851        }
2852
2853        /*
2854         * We want to set the generation to 0, that way if anything goes wrong
2855         * from here on out we know not to trust this cache when we load up next
2856         * time.
2857         */
2858        BTRFS_I(inode)->generation = 0;
2859        ret = btrfs_update_inode(trans, root, inode);
2860        WARN_ON(ret);
2861
2862        if (i_size_read(inode) > 0) {
2863                ret = btrfs_truncate_free_space_cache(root, trans, path,
2864                                                      inode);
2865                if (ret)
2866                        goto out_put;
2867        }
2868
2869        spin_lock(&block_group->lock);
2870        if (block_group->cached != BTRFS_CACHE_FINISHED) {
2871                /* We're not cached, don't bother trying to write stuff out */
2872                dcs = BTRFS_DC_WRITTEN;
2873                spin_unlock(&block_group->lock);
2874                goto out_put;
2875        }
2876        spin_unlock(&block_group->lock);
2877
2878        num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2879        if (!num_pages)
2880                num_pages = 1;
2881
2882        /*
2883         * Just to make absolutely sure we have enough space, we're going to
2884         * preallocate 12 pages worth of space for each block group.  In
2885         * practice we ought to use at most 8, but we need extra space so we can
2886         * add our header and have a terminator between the extents and the
2887         * bitmaps.
2888         */
2889        num_pages *= 16;
2890        num_pages *= PAGE_CACHE_SIZE;
2891
2892        ret = btrfs_check_data_free_space(inode, num_pages);
2893        if (ret)
2894                goto out_put;
2895
2896        ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2897                                              num_pages, num_pages,
2898                                              &alloc_hint);
2899        if (!ret)
2900                dcs = BTRFS_DC_SETUP;
2901        btrfs_free_reserved_data_space(inode, num_pages);
2902
2903out_put:
2904        iput(inode);
2905out_free:
2906        btrfs_release_path(path);
2907out:
2908        spin_lock(&block_group->lock);
2909        if (!ret && dcs == BTRFS_DC_SETUP)
2910                block_group->cache_generation = trans->transid;
2911        block_group->disk_cache_state = dcs;
2912        spin_unlock(&block_group->lock);
2913
2914        return ret;
2915}
2916
2917int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2918                                   struct btrfs_root *root)
2919{
2920        struct btrfs_block_group_cache *cache;
2921        int err = 0;
2922        struct btrfs_path *path;
2923        u64 last = 0;
2924
2925        path = btrfs_alloc_path();
2926        if (!path)
2927                return -ENOMEM;
2928
2929again:
2930        while (1) {
2931                cache = btrfs_lookup_first_block_group(root->fs_info, last);
2932                while (cache) {
2933                        if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2934                                break;
2935                        cache = next_block_group(root, cache);
2936                }
2937                if (!cache) {
2938                        if (last == 0)
2939                                break;
2940                        last = 0;
2941                        continue;
2942                }
2943                err = cache_save_setup(cache, trans, path);
2944                last = cache->key.objectid + cache->key.offset;
2945                btrfs_put_block_group(cache);
2946        }
2947
2948        while (1) {
2949                if (last == 0) {
2950                        err = btrfs_run_delayed_refs(trans, root,
2951                                                     (unsigned long)-1);
2952                        BUG_ON(err);
2953                }
2954
2955                cache = btrfs_lookup_first_block_group(root->fs_info, last);
2956                while (cache) {
2957                        if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2958                                btrfs_put_block_group(cache);
2959                                goto again;
2960                        }
2961
2962                        if (cache->dirty)
2963                                break;
2964                        cache = next_block_group(root, cache);
2965                }
2966                if (!cache) {
2967                        if (last == 0)
2968                                break;
2969                        last = 0;
2970                        continue;
2971                }
2972
2973                if (cache->disk_cache_state == BTRFS_DC_SETUP)
2974                        cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2975                cache->dirty = 0;
2976                last = cache->key.objectid + cache->key.offset;
2977
2978                err = write_one_cache_group(trans, root, path, cache);
2979                BUG_ON(err);
2980                btrfs_put_block_group(cache);
2981        }
2982
2983        while (1) {
2984                /*
2985                 * I don't think this is needed since we're just marking our
2986                 * preallocated extent as written, but just in case it can't
2987                 * hurt.
2988                 */
2989                if (last == 0) {
2990                        err = btrfs_run_delayed_refs(trans, root,
2991                                                     (unsigned long)-1);
2992                        BUG_ON(err);
2993                }
2994
2995                cache = btrfs_lookup_first_block_group(root->fs_info, last);
2996                while (cache) {
2997                        /*
2998                         * Really this shouldn't happen, but it could if we
2999                         * couldn't write the entire preallocated extent and
3000                         * splitting the extent resulted in a new block.
3001                         */
3002                        if (cache->dirty) {
3003                                btrfs_put_block_group(cache);
3004                                goto again;
3005                        }
3006                        if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3007                                break;
3008                        cache = next_block_group(root, cache);
3009                }
3010                if (!cache) {
3011                        if (last == 0)
3012                                break;
3013                        last = 0;
3014                        continue;
3015                }
3016
3017                btrfs_write_out_cache(root, trans, cache, path);
3018
3019                /*
3020                 * If we didn't have an error then the cache state is still
3021                 * NEED_WRITE, so we can set it to WRITTEN.
3022                 */
3023                if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3024                        cache->disk_cache_state = BTRFS_DC_WRITTEN;
3025                last = cache->key.objectid + cache->key.offset;
3026                btrfs_put_block_group(cache);
3027        }
3028
3029        btrfs_free_path(path);
3030        return 0;
3031}
3032
3033int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3034{
3035        struct btrfs_block_group_cache *block_group;
3036        int readonly = 0;
3037
3038        block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3039        if (!block_group || block_group->ro)
3040                readonly = 1;
3041        if (block_group)
3042                btrfs_put_block_group(block_group);
3043        return readonly;
3044}
3045
3046static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3047                             u64 total_bytes, u64 bytes_used,
3048                             struct btrfs_space_info **space_info)
3049{
3050        struct btrfs_space_info *found;
3051        int i;
3052        int factor;
3053
3054        if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3055                     BTRFS_BLOCK_GROUP_RAID10))
3056                factor = 2;
3057        else
3058                factor = 1;
3059
3060        found = __find_space_info(info, flags);
3061        if (found) {
3062                spin_lock(&found->lock);
3063                found->total_bytes += total_bytes;
3064                found->disk_total += total_bytes * factor;
3065                found->bytes_used += bytes_used;
3066                found->disk_used += bytes_used * factor;
3067                found->full = 0;
3068                spin_unlock(&found->lock);
3069                *space_info = found;
3070                return 0;
3071        }
3072        found = kzalloc(sizeof(*found), GFP_NOFS);
3073        if (!found)
3074                return -ENOMEM;
3075
3076        for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3077                INIT_LIST_HEAD(&found->block_groups[i]);
3078        init_rwsem(&found->groups_sem);
3079        spin_lock_init(&found->lock);
3080        found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3081        found->total_bytes = total_bytes;
3082        found->disk_total = total_bytes * factor;
3083        found->bytes_used = bytes_used;
3084        found->disk_used = bytes_used * factor;
3085        found->bytes_pinned = 0;
3086        found->bytes_reserved = 0;
3087        found->bytes_readonly = 0;
3088        found->bytes_may_use = 0;
3089        found->full = 0;
3090        found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3091        found->chunk_alloc = 0;
3092        found->flush = 0;
3093        init_waitqueue_head(&found->wait);
3094        *space_info = found;
3095        list_add_rcu(&found->list, &info->space_info);
3096        return 0;
3097}
3098
3099static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3100{
3101        u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
3102
3103        /* chunk -> extended profile */
3104        if (extra_flags == 0)
3105                extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3106
3107        if (flags & BTRFS_BLOCK_GROUP_DATA)
3108                fs_info->avail_data_alloc_bits |= extra_flags;
3109        if (flags & BTRFS_BLOCK_GROUP_METADATA)
3110                fs_info->avail_metadata_alloc_bits |= extra_flags;
3111        if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3112                fs_info->avail_system_alloc_bits |= extra_flags;
3113}
3114
3115/*
3116 * @flags: available profiles in extended format (see ctree.h)
3117 *
3118 * Returns reduced profile in chunk format.  If profile changing is in
3119 * progress (either running or paused) picks the target profile (if it's
3120 * already available), otherwise falls back to plain reducing.
3121 */
3122u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3123{
3124        /*
3125         * we add in the count of missing devices because we want
3126         * to make sure that any RAID levels on a degraded FS
3127         * continue to be honored.
3128         */
3129        u64 num_devices = root->fs_info->fs_devices->rw_devices +
3130                root->fs_info->fs_devices->missing_devices;
3131
3132        /* pick restriper's target profile if it's available */
3133        spin_lock(&root->fs_info->balance_lock);
3134        if (root->fs_info->balance_ctl) {
3135                struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3136                u64 tgt = 0;
3137
3138                if ((flags & BTRFS_BLOCK_GROUP_DATA) &&
3139                    (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3140                    (flags & bctl->data.target)) {
3141                        tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3142                } else if ((flags & BTRFS_BLOCK_GROUP_SYSTEM) &&
3143                           (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3144                           (flags & bctl->sys.target)) {
3145                        tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3146                } else if ((flags & BTRFS_BLOCK_GROUP_METADATA) &&
3147                           (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3148                           (flags & bctl->meta.target)) {
3149                        tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3150                }
3151
3152                if (tgt) {
3153                        spin_unlock(&root->fs_info->balance_lock);
3154                        flags = tgt;
3155                        goto out;
3156                }
3157        }
3158        spin_unlock(&root->fs_info->balance_lock);
3159
3160        if (num_devices == 1)
3161                flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3162        if (num_devices < 4)
3163                flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3164
3165        if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3166            (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3167                      BTRFS_BLOCK_GROUP_RAID10))) {
3168                flags &= ~BTRFS_BLOCK_GROUP_DUP;
3169        }
3170
3171        if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3172            (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3173                flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3174        }
3175
3176        if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3177            ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3178             (flags & BTRFS_BLOCK_GROUP_RAID10) |
3179             (flags & BTRFS_BLOCK_GROUP_DUP))) {
3180                flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3181        }
3182
3183out:
3184        /* extended -> chunk profile */
3185        flags &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3186        return flags;
3187}
3188
3189static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3190{
3191        if (flags & BTRFS_BLOCK_GROUP_DATA)
3192                flags |= root->fs_info->avail_data_alloc_bits;
3193        else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3194                flags |= root->fs_info->avail_system_alloc_bits;
3195        else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3196                flags |= root->fs_info->avail_metadata_alloc_bits;
3197
3198        return btrfs_reduce_alloc_profile(root, flags);
3199}
3200
3201u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3202{
3203        u64 flags;
3204
3205        if (data)
3206                flags = BTRFS_BLOCK_GROUP_DATA;
3207        else if (root == root->fs_info->chunk_root)
3208                flags = BTRFS_BLOCK_GROUP_SYSTEM;
3209        else
3210                flags = BTRFS_BLOCK_GROUP_METADATA;
3211
3212        return get_alloc_profile(root, flags);
3213}
3214
3215void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3216{
3217        BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3218                                                       BTRFS_BLOCK_GROUP_DATA);
3219}
3220
3221/*
3222 * This will check the space that the inode allocates from to make sure we have
3223 * enough space for bytes.
3224 */
3225int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3226{
3227        struct btrfs_space_info *data_sinfo;
3228        struct btrfs_root *root = BTRFS_I(inode)->root;
3229        u64 used;
3230        int ret = 0, committed = 0, alloc_chunk = 1;
3231
3232        /* make sure bytes are sectorsize aligned */
3233        bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3234
3235        if (root == root->fs_info->tree_root ||
3236            BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3237                alloc_chunk = 0;
3238                committed = 1;
3239        }
3240
3241        data_sinfo = BTRFS_I(inode)->space_info;
3242        if (!data_sinfo)
3243                goto alloc;
3244
3245again:
3246        /* make sure we have enough space to handle the data first */
3247        spin_lock(&data_sinfo->lock);
3248        used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3249                data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3250                data_sinfo->bytes_may_use;
3251
3252        if (used + bytes > data_sinfo->total_bytes) {
3253                struct btrfs_trans_handle *trans;
3254
3255                /*
3256                 * if we don't have enough free bytes in this space then we need
3257                 * to alloc a new chunk.
3258                 */
3259                if (!data_sinfo->full && alloc_chunk) {
3260                        u64 alloc_target;
3261
3262                        data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3263                        spin_unlock(&data_sinfo->lock);
3264alloc:
3265                        alloc_target = btrfs_get_alloc_profile(root, 1);
3266                        trans = btrfs_join_transaction(root);
3267                        if (IS_ERR(trans))
3268                                return PTR_ERR(trans);
3269
3270                        ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3271                                             bytes + 2 * 1024 * 1024,
3272                                             alloc_target,
3273                                             CHUNK_ALLOC_NO_FORCE);
3274                        btrfs_end_transaction(trans, root);
3275                        if (ret < 0) {
3276                                if (ret != -ENOSPC)
3277                                        return ret;
3278                                else
3279                                        goto commit_trans;
3280                        }
3281
3282                        if (!data_sinfo) {
3283                                btrfs_set_inode_space_info(root, inode);
3284                                data_sinfo = BTRFS_I(inode)->space_info;
3285                        }
3286                        goto again;
3287                }
3288
3289                /*
3290                 * If we have less pinned bytes than we want to allocate then
3291                 * don't bother committing the transaction, it won't help us.
3292                 */
3293                if (data_sinfo->bytes_pinned < bytes)
3294                        committed = 1;
3295                spin_unlock(&data_sinfo->lock);
3296
3297                /* commit the current transaction and try again */
3298commit_trans:
3299                if (!committed &&
3300                    !atomic_read(&root->fs_info->open_ioctl_trans)) {
3301                        committed = 1;
3302                        trans = btrfs_join_transaction(root);
3303                        if (IS_ERR(trans))
3304                                return PTR_ERR(trans);
3305                        ret = btrfs_commit_transaction(trans, root);
3306                        if (ret)
3307                                return ret;
3308                        goto again;
3309                }
3310
3311                return -ENOSPC;
3312        }
3313        data_sinfo->bytes_may_use += bytes;
3314        trace_btrfs_space_reservation(root->fs_info, "space_info",
3315                                      (u64)(unsigned long)data_sinfo,
3316                                      bytes, 1);
3317        spin_unlock(&data_sinfo->lock);
3318
3319        return 0;
3320}
3321
3322/*
3323 * Called if we need to clear a data reservation for this inode.
3324 */
3325void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3326{
3327        struct btrfs_root *root = BTRFS_I(inode)->root;
3328        struct btrfs_space_info *data_sinfo;
3329
3330        /* make sure bytes are sectorsize aligned */
3331        bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3332
3333        data_sinfo = BTRFS_I(inode)->space_info;
3334        spin_lock(&data_sinfo->lock);
3335        data_sinfo->bytes_may_use -= bytes;
3336        trace_btrfs_space_reservation(root->fs_info, "space_info",
3337                                      (u64)(unsigned long)data_sinfo,
3338                                      bytes, 0);
3339        spin_unlock(&data_sinfo->lock);
3340}
3341
3342static void force_metadata_allocation(struct btrfs_fs_info *info)
3343{
3344        struct list_head *head = &info->space_info;
3345        struct btrfs_space_info *found;
3346
3347        rcu_read_lock();
3348        list_for_each_entry_rcu(found, head, list) {
3349                if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3350                        found->force_alloc = CHUNK_ALLOC_FORCE;
3351        }
3352        rcu_read_unlock();
3353}
3354
3355static int should_alloc_chunk(struct btrfs_root *root,
3356                              struct btrfs_space_info *sinfo, u64 alloc_bytes,
3357                              int force)
3358{
3359        struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3360        u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3361        u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3362        u64 thresh;
3363
3364        if (force == CHUNK_ALLOC_FORCE)
3365                return 1;
3366
3367        /*
3368         * We need to take into account the global rsv because for all intents
3369         * and purposes it's used space.  Don't worry about locking the
3370         * global_rsv, it doesn't change except when the transaction commits.
3371         */
3372        num_allocated += global_rsv->size;
3373
3374        /*
3375         * in limited mode, we want to have some free space up to
3376         * about 1% of the FS size.
3377         */
3378        if (force == CHUNK_ALLOC_LIMITED) {
3379                thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3380                thresh = max_t(u64, 64 * 1024 * 1024,
3381                               div_factor_fine(thresh, 1));
3382
3383                if (num_bytes - num_allocated < thresh)
3384                        return 1;
3385        }
3386        thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3387
3388        /* 256MB or 2% of the FS */
3389        thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2));
3390        /* system chunks need a much small threshold */
3391        if (sinfo->flags & BTRFS_BLOCK_GROUP_SYSTEM)
3392                thresh = 32 * 1024 * 1024;
3393
3394        if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8))
3395                return 0;
3396        return 1;
3397}
3398
3399static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3400                          struct btrfs_root *extent_root, u64 alloc_bytes,
3401                          u64 flags, int force)
3402{
3403        struct btrfs_space_info *space_info;
3404        struct btrfs_fs_info *fs_info = extent_root->fs_info;
3405        int wait_for_alloc = 0;
3406        int ret = 0;
3407
3408        BUG_ON(!profile_is_valid(flags, 0));
3409
3410        space_info = __find_space_info(extent_root->fs_info, flags);
3411        if (!space_info) {
3412                ret = update_space_info(extent_root->fs_info, flags,
3413                                        0, 0, &space_info);
3414                BUG_ON(ret);
3415        }
3416        BUG_ON(!space_info);
3417
3418again:
3419        spin_lock(&space_info->lock);
3420        if (force < space_info->force_alloc)
3421                force = space_info->force_alloc;
3422        if (space_info->full) {
3423                spin_unlock(&space_info->lock);
3424                return 0;
3425        }
3426
3427        if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3428                spin_unlock(&space_info->lock);
3429                return 0;
3430        } else if (space_info->chunk_alloc) {
3431                wait_for_alloc = 1;
3432        } else {
3433                space_info->chunk_alloc = 1;
3434        }
3435
3436        spin_unlock(&space_info->lock);
3437
3438        mutex_lock(&fs_info->chunk_mutex);
3439
3440        /*
3441         * The chunk_mutex is held throughout the entirety of a chunk
3442         * allocation, so once we've acquired the chunk_mutex we know that the
3443         * other guy is done and we need to recheck and see if we should
3444         * allocate.
3445         */
3446        if (wait_for_alloc) {
3447                mutex_unlock(&fs_info->chunk_mutex);
3448                wait_for_alloc = 0;
3449                goto again;
3450        }
3451
3452        /*
3453         * If we have mixed data/metadata chunks we want to make sure we keep
3454         * allocating mixed chunks instead of individual chunks.
3455         */
3456        if (btrfs_mixed_space_info(space_info))
3457                flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3458
3459        /*
3460         * if we're doing a data chunk, go ahead and make sure that
3461         * we keep a reasonable number of metadata chunks allocated in the
3462         * FS as well.
3463         */
3464        if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3465                fs_info->data_chunk_allocations++;
3466                if (!(fs_info->data_chunk_allocations %
3467                      fs_info->metadata_ratio))
3468                        force_metadata_allocation(fs_info);
3469        }
3470
3471        ret = btrfs_alloc_chunk(trans, extent_root, flags);
3472        if (ret < 0 && ret != -ENOSPC)
3473                goto out;
3474
3475        spin_lock(&space_info->lock);
3476        if (ret)
3477                space_info->full = 1;
3478        else
3479                ret = 1;
3480
3481        space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3482        space_info->chunk_alloc = 0;
3483        spin_unlock(&space_info->lock);
3484out:
3485        mutex_unlock(&extent_root->fs_info->chunk_mutex);
3486        return ret;
3487}
3488
3489/*
3490 * shrink metadata reservation for delalloc
3491 */
3492static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
3493                           bool wait_ordered)
3494{
3495        struct btrfs_block_rsv *block_rsv;
3496        struct btrfs_space_info *space_info;
3497        struct btrfs_trans_handle *trans;
3498        u64 reserved;
3499        u64 max_reclaim;
3500        u64 reclaimed = 0;
3501        long time_left;
3502        unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3503        int loops = 0;
3504        unsigned long progress;
3505
3506        trans = (struct btrfs_trans_handle *)current->journal_info;
3507        block_rsv = &root->fs_info->delalloc_block_rsv;
3508        space_info = block_rsv->space_info;
3509
3510        smp_mb();
3511        reserved = space_info->bytes_may_use;
3512        progress = space_info->reservation_progress;
3513
3514        if (reserved == 0)
3515                return 0;
3516
3517        smp_mb();
3518        if (root->fs_info->delalloc_bytes == 0) {
3519                if (trans)
3520                        return 0;
3521                btrfs_wait_ordered_extents(root, 0, 0);
3522                return 0;
3523        }
3524
3525        max_reclaim = min(reserved, to_reclaim);
3526        nr_pages = max_t(unsigned long, nr_pages,
3527                         max_reclaim >> PAGE_CACHE_SHIFT);
3528        while (loops < 1024) {
3529                /* have the flusher threads jump in and do some IO */
3530                smp_mb();
3531                nr_pages = min_t(unsigned long, nr_pages,
3532                       root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3533                writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
3534                                                WB_REASON_FS_FREE_SPACE);
3535
3536                spin_lock(&space_info->lock);
3537                if (reserved > space_info->bytes_may_use)
3538                        reclaimed += reserved - space_info->bytes_may_use;
3539                reserved = space_info->bytes_may_use;
3540                spin_unlock(&space_info->lock);
3541
3542                loops++;
3543
3544                if (reserved == 0 || reclaimed >= max_reclaim)
3545                        break;
3546
3547                if (trans && trans->transaction->blocked)
3548                        return -EAGAIN;
3549
3550                if (wait_ordered && !trans) {
3551                        btrfs_wait_ordered_extents(root, 0, 0);
3552                } else {
3553                        time_left = schedule_timeout_interruptible(1);
3554
3555                        /* We were interrupted, exit */
3556                        if (time_left)
3557                                break;
3558                }
3559
3560                /* we've kicked the IO a few times, if anything has been freed,
3561                 * exit.  There is no sense in looping here for a long time
3562                 * when we really need to commit the transaction, or there are
3563                 * just too many writers without enough free space
3564                 */
3565
3566                if (loops > 3) {
3567                        smp_mb();
3568                        if (progress != space_info->reservation_progress)
3569                                break;
3570                }
3571
3572        }
3573
3574        return reclaimed >= to_reclaim;
3575}
3576
3577/**
3578 * maybe_commit_transaction - possibly commit the transaction if its ok to
3579 * @root - the root we're allocating for
3580 * @bytes - the number of bytes we want to reserve
3581 * @force - force the commit
3582 *
3583 * This will check to make sure that committing the transaction will actually
3584 * get us somewhere and then commit the transaction if it does.  Otherwise it
3585 * will return -ENOSPC.
3586 */
3587static int may_commit_transaction(struct btrfs_root *root,
3588                                  struct btrfs_space_info *space_info,
3589                                  u64 bytes, int force)
3590{
3591        struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3592        struct btrfs_trans_handle *trans;
3593
3594        trans = (struct btrfs_trans_handle *)current->journal_info;
3595        if (trans)
3596                return -EAGAIN;
3597
3598        if (force)
3599                goto commit;
3600
3601        /* See if there is enough pinned space to make this reservation */
3602        spin_lock(&space_info->lock);
3603        if (space_info->bytes_pinned >= bytes) {
3604                spin_unlock(&space_info->lock);
3605                goto commit;
3606        }
3607        spin_unlock(&space_info->lock);
3608
3609        /*
3610         * See if there is some space in the delayed insertion reservation for
3611         * this reservation.
3612         */
3613        if (space_info != delayed_rsv->space_info)
3614                return -ENOSPC;
3615
3616        spin_lock(&space_info->lock);
3617        spin_lock(&delayed_rsv->lock);
3618        if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
3619                spin_unlock(&delayed_rsv->lock);
3620                spin_unlock(&space_info->lock);
3621                return -ENOSPC;
3622        }
3623        spin_unlock(&delayed_rsv->lock);
3624        spin_unlock(&space_info->lock);
3625
3626commit:
3627        trans = btrfs_join_transaction(root);
3628        if (IS_ERR(trans))
3629                return -ENOSPC;
3630
3631        return btrfs_commit_transaction(trans, root);
3632}
3633
3634/**
3635 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3636 * @root - the root we're allocating for
3637 * @block_rsv - the block_rsv we're allocating for
3638 * @orig_bytes - the number of bytes we want
3639 * @flush - wether or not we can flush to make our reservation
3640 *
3641 * This will reserve orgi_bytes number of bytes from the space info associated
3642 * with the block_rsv.  If there is not enough space it will make an attempt to
3643 * flush out space to make room.  It will do this by flushing delalloc if
3644 * possible or committing the transaction.  If flush is 0 then no attempts to
3645 * regain reservations will be made and this will fail if there is not enough
3646 * space already.
3647 */
3648static int reserve_metadata_bytes(struct btrfs_root *root,
3649                                  struct btrfs_block_rsv *block_rsv,
3650                                  u64 orig_bytes, int flush)
3651{
3652        struct btrfs_space_info *space_info = block_rsv->space_info;
3653        u64 used;
3654        u64 num_bytes = orig_bytes;
3655        int retries = 0;
3656        int ret = 0;
3657        bool committed = false;
3658        bool flushing = false;
3659        bool wait_ordered = false;
3660
3661again:
3662        ret = 0;
3663        spin_lock(&space_info->lock);
3664        /*
3665         * We only want to wait if somebody other than us is flushing and we are
3666         * actually alloed to flush.
3667         */
3668        while (flush && !flushing && space_info->flush) {
3669                spin_unlock(&space_info->lock);
3670                /*
3671                 * If we have a trans handle we can't wait because the flusher
3672                 * may have to commit the transaction, which would mean we would
3673                 * deadlock since we are waiting for the flusher to finish, but
3674                 * hold the current transaction open.
3675                 */
3676                if (current->journal_info)
3677                        return -EAGAIN;
3678                ret = wait_event_interruptible(space_info->wait,
3679                                               !space_info->flush);
3680                /* Must have been interrupted, return */
3681                if (ret)
3682                        return -EINTR;
3683
3684                spin_lock(&space_info->lock);
3685        }
3686
3687        ret = -ENOSPC;
3688        used = space_info->bytes_used + space_info->bytes_reserved +
3689                space_info->bytes_pinned + space_info->bytes_readonly +
3690                space_info->bytes_may_use;
3691
3692        /*
3693         * The idea here is that we've not already over-reserved the block group
3694         * then we can go ahead and save our reservation first and then start
3695         * flushing if we need to.  Otherwise if we've already overcommitted
3696         * lets start flushing stuff first and then come back and try to make
3697         * our reservation.
3698         */
3699        if (used <= space_info->total_bytes) {
3700                if (used + orig_bytes <= space_info->total_bytes) {
3701                        space_info->bytes_may_use += orig_bytes;
3702                        trace_btrfs_space_reservation(root->fs_info,
3703                                              "space_info",
3704                                              (u64)(unsigned long)space_info,
3705                                              orig_bytes, 1);
3706                        ret = 0;
3707                } else {
3708                        /*
3709                         * Ok set num_bytes to orig_bytes since we aren't
3710                         * overocmmitted, this way we only try and reclaim what
3711                         * we need.
3712                         */
3713                        num_bytes = orig_bytes;
3714                }
3715        } else {
3716                /*
3717                 * Ok we're over committed, set num_bytes to the overcommitted
3718                 * amount plus the amount of bytes that we need for this
3719                 * reservation.
3720                 */
3721                wait_ordered = true;
3722                num_bytes = used - space_info->total_bytes +
3723                        (orig_bytes * (retries + 1));
3724        }
3725
3726        if (ret) {
3727                u64 profile = btrfs_get_alloc_profile(root, 0);
3728                u64 avail;
3729
3730                /*
3731                 * If we have a lot of space that's pinned, don't bother doing
3732                 * the overcommit dance yet and just commit the transaction.
3733                 */
3734                avail = (space_info->total_bytes - space_info->bytes_used) * 8;
3735                do_div(avail, 10);
3736                if (space_info->bytes_pinned >= avail && flush && !committed) {
3737                        space_info->flush = 1;
3738                        flushing = true;
3739                        spin_unlock(&space_info->lock);
3740                        ret = may_commit_transaction(root, space_info,
3741                                                     orig_bytes, 1);
3742                        if (ret)
3743                                goto out;
3744                        committed = true;
3745                        goto again;
3746                }
3747
3748                spin_lock(&root->fs_info->free_chunk_lock);
3749                avail = root->fs_info->free_chunk_space;
3750
3751                /*
3752                 * If we have dup, raid1 or raid10 then only half of the free
3753                 * space is actually useable.
3754                 */
3755                if (profile & (BTRFS_BLOCK_GROUP_DUP |
3756                               BTRFS_BLOCK_GROUP_RAID1 |
3757                               BTRFS_BLOCK_GROUP_RAID10))
3758                        avail >>= 1;
3759
3760                /*
3761                 * If we aren't flushing don't let us overcommit too much, say
3762                 * 1/8th of the space.  If we can flush, let it overcommit up to
3763                 * 1/2 of the space.
3764                 */
3765                if (flush)
3766                        avail >>= 3;
3767                else
3768                        avail >>= 1;
3769                 spin_unlock(&root->fs_info->free_chunk_lock);
3770
3771                if (used + num_bytes < space_info->total_bytes + avail) {
3772                        space_info->bytes_may_use += orig_bytes;
3773                        trace_btrfs_space_reservation(root->fs_info,
3774                                              "space_info",
3775                                              (u64)(unsigned long)space_info,
3776                                              orig_bytes, 1);
3777                        ret = 0;
3778                } else {
3779                        wait_ordered = true;
3780                }
3781        }
3782
3783        /*
3784         * Couldn't make our reservation, save our place so while we're trying
3785         * to reclaim space we can actually use it instead of somebody else
3786         * stealing it from us.
3787         */
3788        if (ret && flush) {
3789                flushing = true;
3790                space_info->flush = 1;
3791        }
3792
3793        spin_unlock(&space_info->lock);
3794
3795        if (!ret || !flush)
3796                goto out;
3797
3798        /*
3799         * We do synchronous shrinking since we don't actually unreserve
3800         * metadata until after the IO is completed.
3801         */
3802        ret = shrink_delalloc(root, num_bytes, wait_ordered);
3803        if (ret < 0)
3804                goto out;
3805
3806        ret = 0;
3807
3808        /*
3809         * So if we were overcommitted it's possible that somebody else flushed
3810         * out enough space and we simply didn't have enough space to reclaim,
3811         * so go back around and try again.
3812         */
3813        if (retries < 2) {
3814                wait_ordered = true;
3815                retries++;
3816                goto again;
3817        }
3818
3819        ret = -ENOSPC;
3820        if (committed)
3821                goto out;
3822
3823        ret = may_commit_transaction(root, space_info, orig_bytes, 0);
3824        if (!ret) {
3825                committed = true;
3826                goto again;
3827        }
3828
3829out:
3830        if (flushing) {
3831                spin_lock(&space_info->lock);
3832                space_info->flush = 0;
3833                wake_up_all(&space_info->wait);
3834                spin_unlock(&space_info->lock);
3835        }
3836        return ret;
3837}
3838
3839static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3840                                             struct btrfs_root *root)
3841{
3842        struct btrfs_block_rsv *block_rsv = NULL;
3843
3844        if (root->ref_cows || root == root->fs_info->csum_root)
3845                block_rsv = trans->block_rsv;
3846
3847        if (!block_rsv)
3848                block_rsv = root->block_rsv;
3849
3850        if (!block_rsv)
3851                block_rsv = &root->fs_info->empty_block_rsv;
3852
3853        return block_rsv;
3854}
3855
3856static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3857                               u64 num_bytes)
3858{
3859        int ret = -ENOSPC;
3860        spin_lock(&block_rsv->lock);
3861        if (block_rsv->reserved >= num_bytes) {
3862                block_rsv->reserved -= num_bytes;
3863                if (block_rsv->reserved < block_rsv->size)
3864                        block_rsv->full = 0;
3865                ret = 0;
3866        }
3867        spin_unlock(&block_rsv->lock);
3868        return ret;
3869}
3870
3871static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3872                                u64 num_bytes, int update_size)
3873{
3874        spin_lock(&block_rsv->lock);
3875        block_rsv->reserved += num_bytes;
3876        if (update_size)
3877                block_rsv->size += num_bytes;
3878        else if (block_rsv->reserved >= block_rsv->size)
3879                block_rsv->full = 1;
3880        spin_unlock(&block_rsv->lock);
3881}
3882
3883static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
3884                                    struct btrfs_block_rsv *block_rsv,
3885                                    struct btrfs_block_rsv *dest, u64 num_bytes)
3886{
3887        struct btrfs_space_info *space_info = block_rsv->space_info;
3888
3889        spin_lock(&block_rsv->lock);
3890        if (num_bytes == (u64)-1)
3891                num_bytes = block_rsv->size;
3892        block_rsv->size -= num_bytes;
3893        if (block_rsv->reserved >= block_rsv->size) {
3894                num_bytes = block_rsv->reserved - block_rsv->size;
3895                block_rsv->reserved = block_rsv->size;
3896                block_rsv->full = 1;
3897        } else {
3898                num_bytes = 0;
3899        }
3900        spin_unlock(&block_rsv->lock);
3901
3902        if (num_bytes > 0) {
3903                if (dest) {
3904                        spin_lock(&dest->lock);
3905                        if (!dest->full) {
3906                                u64 bytes_to_add;
3907
3908                                bytes_to_add = dest->size - dest->reserved;
3909                                bytes_to_add = min(num_bytes, bytes_to_add);
3910                                dest->reserved += bytes_to_add;
3911                                if (dest->reserved >= dest->size)
3912                                        dest->full = 1;
3913                                num_bytes -= bytes_to_add;
3914                        }
3915                        spin_unlock(&dest->lock);
3916                }
3917                if (num_bytes) {
3918                        spin_lock(&space_info->lock);
3919                        space_info->bytes_may_use -= num_bytes;
3920                        trace_btrfs_space_reservation(fs_info, "space_info",
3921                                              (u64)(unsigned long)space_info,
3922                                              num_bytes, 0);
3923                        space_info->reservation_progress++;
3924                        spin_unlock(&space_info->lock);
3925                }
3926        }
3927}
3928
3929static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3930                                   struct btrfs_block_rsv *dst, u64 num_bytes)
3931{
3932        int ret;
3933
3934        ret = block_rsv_use_bytes(src, num_bytes);
3935        if (ret)
3936                return ret;
3937
3938        block_rsv_add_bytes(dst, num_bytes, 1);
3939        return 0;
3940}
3941
3942void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3943{
3944        memset(rsv, 0, sizeof(*rsv));
3945        spin_lock_init(&rsv->lock);
3946}
3947
3948struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3949{
3950        struct btrfs_block_rsv *block_rsv;
3951        struct btrfs_fs_info *fs_info = root->fs_info;
3952
3953        block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3954        if (!block_rsv)
3955                return NULL;
3956
3957        btrfs_init_block_rsv(block_rsv);
3958        block_rsv->space_info = __find_space_info(fs_info,
3959                                                  BTRFS_BLOCK_GROUP_METADATA);
3960        return block_rsv;
3961}
3962
3963void btrfs_free_block_rsv(struct btrfs_root *root,
3964                          struct btrfs_block_rsv *rsv)
3965{
3966        btrfs_block_rsv_release(root, rsv, (u64)-1);
3967        kfree(rsv);
3968}
3969
3970static inline int __block_rsv_add(struct btrfs_root *root,
3971                                  struct btrfs_block_rsv *block_rsv,
3972                                  u64 num_bytes, int flush)
3973{
3974        int ret;
3975
3976        if (num_bytes == 0)
3977                return 0;
3978
3979        ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3980        if (!ret) {
3981                block_rsv_add_bytes(block_rsv, num_bytes, 1);
3982                return 0;
3983        }
3984
3985        return ret;
3986}
3987
3988int btrfs_block_rsv_add(struct btrfs_root *root,
3989                        struct btrfs_block_rsv *block_rsv,
3990                        u64 num_bytes)
3991{
3992        return __block_rsv_add(root, block_rsv, num_bytes, 1);
3993}
3994
3995int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
3996                                struct btrfs_block_rsv *block_rsv,
3997                                u64 num_bytes)
3998{
3999        return __block_rsv_add(root, block_rsv, num_bytes, 0);
4000}
4001
4002int btrfs_block_rsv_check(struct btrfs_root *root,
4003                          struct btrfs_block_rsv *block_rsv, int min_factor)
4004{
4005        u64 num_bytes = 0;
4006        int ret = -ENOSPC;
4007
4008        if (!block_rsv)
4009                return 0;
4010
4011        spin_lock(&block_rsv->lock);
4012        num_bytes = div_factor(block_rsv->size, min_factor);
4013        if (block_rsv->reserved >= num_bytes)
4014                ret = 0;
4015        spin_unlock(&block_rsv->lock);
4016
4017        return ret;
4018}
4019
4020static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
4021                                           struct btrfs_block_rsv *block_rsv,
4022                                           u64 min_reserved, int flush)
4023{
4024        u64 num_bytes = 0;
4025        int ret = -ENOSPC;
4026
4027        if (!block_rsv)
4028                return 0;
4029
4030        spin_lock(&block_rsv->lock);
4031        num_bytes = min_reserved;
4032        if (block_rsv->reserved >= num_bytes)
4033                ret = 0;
4034        else
4035                num_bytes -= block_rsv->reserved;
4036        spin_unlock(&block_rsv->lock);
4037
4038        if (!ret)
4039                return 0;
4040
4041        ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4042        if (!ret) {
4043                block_rsv_add_bytes(block_rsv, num_bytes, 0);
4044                return 0;
4045        }
4046
4047        return ret;
4048}
4049
4050int btrfs_block_rsv_refill(struct btrfs_root *root,
4051                           struct btrfs_block_rsv *block_rsv,
4052                           u64 min_reserved)
4053{
4054        return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
4055}
4056
4057int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
4058                                   struct btrfs_block_rsv *block_rsv,
4059                                   u64 min_reserved)
4060{
4061        return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
4062}
4063
4064int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4065                            struct btrfs_block_rsv *dst_rsv,
4066                            u64 num_bytes)
4067{
4068        return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4069}
4070
4071void btrfs_block_rsv_release(struct btrfs_root *root,
4072                             struct btrfs_block_rsv *block_rsv,
4073                             u64 num_bytes)
4074{
4075        struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4076        if (global_rsv->full || global_rsv == block_rsv ||
4077            block_rsv->space_info != global_rsv->space_info)
4078                global_rsv = NULL;
4079        block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4080                                num_bytes);
4081}
4082
4083/*
4084 * helper to calculate size of global block reservation.
4085 * the desired value is sum of space used by extent tree,
4086 * checksum tree and root tree
4087 */
4088static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4089{
4090        struct btrfs_space_info *sinfo;
4091        u64 num_bytes;
4092        u64 meta_used;
4093        u64 data_used;
4094        int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4095
4096        sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4097        spin_lock(&sinfo->lock);
4098        data_used = sinfo->bytes_used;
4099        spin_unlock(&sinfo->lock);
4100
4101        sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4102        spin_lock(&sinfo->lock);
4103        if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4104                data_used = 0;
4105        meta_used = sinfo->bytes_used;
4106        spin_unlock(&sinfo->lock);
4107
4108        num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4109                    csum_size * 2;
4110        num_bytes += div64_u64(data_used + meta_used, 50);
4111
4112        if (num_bytes * 3 > meta_used)
4113                num_bytes = div64_u64(meta_used, 3) * 2;
4114
4115        return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4116}
4117
4118static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4119{
4120        struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4121        struct btrfs_space_info *sinfo = block_rsv->space_info;
4122        u64 num_bytes;
4123
4124        num_bytes = calc_global_metadata_size(fs_info);
4125
4126        spin_lock(&block_rsv->lock);
4127        spin_lock(&sinfo->lock);
4128
4129        block_rsv->size = num_bytes;
4130
4131        num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4132                    sinfo->bytes_reserved + sinfo->bytes_readonly +
4133                    sinfo->bytes_may_use;
4134
4135        if (sinfo->total_bytes > num_bytes) {
4136                num_bytes = sinfo->total_bytes - num_bytes;
4137                block_rsv->reserved += num_bytes;
4138                sinfo->bytes_may_use += num_bytes;
4139                trace_btrfs_space_reservation(fs_info, "space_info",
4140                                      (u64)(unsigned long)sinfo, num_bytes, 1);
4141        }
4142
4143        if (block_rsv->reserved >= block_rsv->size) {
4144                num_bytes = block_rsv->reserved - block_rsv->size;
4145                sinfo->bytes_may_use -= num_bytes;
4146                trace_btrfs_space_reservation(fs_info, "space_info",
4147                                      (u64)(unsigned long)sinfo, num_bytes, 0);
4148                sinfo->reservation_progress++;
4149                block_rsv->reserved = block_rsv->size;
4150                block_rsv->full = 1;
4151        }
4152
4153        spin_unlock(&sinfo->lock);
4154        spin_unlock(&block_rsv->lock);
4155}
4156
4157static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4158{
4159        struct btrfs_space_info *space_info;
4160
4161        space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4162        fs_info->chunk_block_rsv.space_info = space_info;
4163
4164        space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4165        fs_info->global_block_rsv.space_info = space_info;
4166        fs_info->delalloc_block_rsv.space_info = space_info;
4167        fs_info->trans_block_rsv.space_info = space_info;
4168        fs_info->empty_block_rsv.space_info = space_info;
4169        fs_info->delayed_block_rsv.space_info = space_info;
4170
4171        fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4172        fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4173        fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4174        fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4175        fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4176
4177        update_global_block_rsv(fs_info);
4178}
4179
4180static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4181{
4182        block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4183                                (u64)-1);
4184        WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4185        WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4186        WARN_ON(fs_info->trans_block_rsv.size > 0);
4187        WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4188        WARN_ON(fs_info->chunk_block_rsv.size > 0);
4189        WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4190        WARN_ON(fs_info->delayed_block_rsv.size > 0);
4191        WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4192}
4193
4194void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4195                                  struct btrfs_root *root)
4196{
4197        if (!trans->bytes_reserved)
4198                return;
4199
4200        trace_btrfs_space_reservation(root->fs_info, "transaction",
4201                                      (u64)(unsigned long)trans,
4202                                      trans->bytes_reserved, 0);
4203        btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4204        trans->bytes_reserved = 0;
4205}
4206
4207int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4208                                  struct inode *inode)
4209{
4210        struct btrfs_root *root = BTRFS_I(inode)->root;
4211        struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4212        struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4213
4214        /*
4215         * We need to hold space in order to delete our orphan item once we've
4216         * added it, so this takes the reservation so we can release it later
4217         * when we are truly done with the orphan item.
4218         */
4219        u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4220        trace_btrfs_space_reservation(root->fs_info, "orphan",
4221                                      btrfs_ino(inode), num_bytes, 1);
4222        return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4223}
4224
4225void btrfs_orphan_release_metadata(struct inode *inode)
4226{
4227        struct btrfs_root *root = BTRFS_I(inode)->root;
4228        u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4229        trace_btrfs_space_reservation(root->fs_info, "orphan",
4230                                      btrfs_ino(inode), num_bytes, 0);
4231        btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4232}
4233
4234int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4235                                struct btrfs_pending_snapshot *pending)
4236{
4237        struct btrfs_root *root = pending->root;
4238        struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4239        struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4240        /*
4241         * two for root back/forward refs, two for directory entries
4242         * and one for root of the snapshot.
4243         */
4244        u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4245        dst_rsv->space_info = src_rsv->space_info;
4246        return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4247}
4248
4249/**
4250 * drop_outstanding_extent - drop an outstanding extent
4251 * @inode: the inode we're dropping the extent for
4252 *
4253 * This is called when we are freeing up an outstanding extent, either called
4254 * after an error or after an extent is written.  This will return the number of
4255 * reserved extents that need to be freed.  This must be called with
4256 * BTRFS_I(inode)->lock held.
4257 */
4258static unsigned drop_outstanding_extent(struct inode *inode)
4259{
4260        unsigned drop_inode_space = 0;
4261        unsigned dropped_extents = 0;
4262
4263        BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4264        BTRFS_I(inode)->outstanding_extents--;
4265
4266        if (BTRFS_I(inode)->outstanding_extents == 0 &&
4267            BTRFS_I(inode)->delalloc_meta_reserved) {
4268                drop_inode_space = 1;
4269                BTRFS_I(inode)->delalloc_meta_reserved = 0;
4270        }
4271
4272        /*
4273         * If we have more or the same amount of outsanding extents than we have
4274         * reserved then we need to leave the reserved extents count alone.
4275         */
4276        if (BTRFS_I(inode)->outstanding_extents >=
4277            BTRFS_I(inode)->reserved_extents)
4278                return drop_inode_space;
4279
4280        dropped_extents = BTRFS_I(inode)->reserved_extents -
4281                BTRFS_I(inode)->outstanding_extents;
4282        BTRFS_I(inode)->reserved_extents -= dropped_extents;
4283        return dropped_extents + drop_inode_space;
4284}
4285
4286/**
4287 * calc_csum_metadata_size - return the amount of metada space that must be
4288 *      reserved/free'd for the given bytes.
4289 * @inode: the inode we're manipulating
4290 * @num_bytes: the number of bytes in question
4291 * @reserve: 1 if we are reserving space, 0 if we are freeing space
4292 *
4293 * This adjusts the number of csum_bytes in the inode and then returns the
4294 * correct amount of metadata that must either be reserved or freed.  We
4295 * calculate how many checksums we can fit into one leaf and then divide the
4296 * number of bytes that will need to be checksumed by this value to figure out
4297 * how many checksums will be required.  If we are adding bytes then the number
4298 * may go up and we will return the number of additional bytes that must be
4299 * reserved.  If it is going down we will return the number of bytes that must
4300 * be freed.
4301 *
4302 * This must be called with BTRFS_I(inode)->lock held.
4303 */
4304static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4305                                   int reserve)
4306{
4307        struct btrfs_root *root = BTRFS_I(inode)->root;
4308        u64 csum_size;
4309        int num_csums_per_leaf;
4310        int num_csums;
4311        int old_csums;
4312
4313        if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4314            BTRFS_I(inode)->csum_bytes == 0)
4315                return 0;
4316
4317        old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4318        if (reserve)
4319                BTRFS_I(inode)->csum_bytes += num_bytes;
4320        else
4321                BTRFS_I(inode)->csum_bytes -= num_bytes;
4322        csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4323        num_csums_per_leaf = (int)div64_u64(csum_size,
4324                                            sizeof(struct btrfs_csum_item) +
4325                                            sizeof(struct btrfs_disk_key));
4326        num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4327        num_csums = num_csums + num_csums_per_leaf - 1;
4328        num_csums = num_csums / num_csums_per_leaf;
4329
4330        old_csums = old_csums + num_csums_per_leaf - 1;
4331        old_csums = old_csums / num_csums_per_leaf;
4332
4333        /* No change, no need to reserve more */
4334        if (old_csums == num_csums)
4335                return 0;
4336
4337        if (reserve)
4338                return btrfs_calc_trans_metadata_size(root,
4339                                                      num_csums - old_csums);
4340
4341        return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4342}
4343
4344int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4345{
4346        struct btrfs_root *root = BTRFS_I(inode)->root;
4347        struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4348        u64 to_reserve = 0;
4349        u64 csum_bytes;
4350        unsigned nr_extents = 0;
4351        int extra_reserve = 0;
4352        int flush = 1;
4353        int ret;
4354
4355        /* Need to be holding the i_mutex here if we aren't free space cache */
4356        if (btrfs_is_free_space_inode(root, inode))
4357                flush = 0;
4358
4359        if (flush && btrfs_transaction_in_commit(root->fs_info))
4360                schedule_timeout(1);
4361
4362        mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4363        num_bytes = ALIGN(num_bytes, root->sectorsize);
4364
4365        spin_lock(&BTRFS_I(inode)->lock);
4366        BTRFS_I(inode)->outstanding_extents++;
4367
4368        if (BTRFS_I(inode)->outstanding_extents >
4369            BTRFS_I(inode)->reserved_extents)
4370                nr_extents = BTRFS_I(inode)->outstanding_extents -
4371                        BTRFS_I(inode)->reserved_extents;
4372
4373        /*
4374         * Add an item to reserve for updating the inode when we complete the
4375         * delalloc io.
4376         */
4377        if (!BTRFS_I(inode)->delalloc_meta_reserved) {
4378                nr_extents++;
4379                extra_reserve = 1;
4380        }
4381
4382        to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4383        to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4384        csum_bytes = BTRFS_I(inode)->csum_bytes;
4385        spin_unlock(&BTRFS_I(inode)->lock);
4386
4387        ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4388        if (ret) {
4389                u64 to_free = 0;
4390                unsigned dropped;
4391
4392                spin_lock(&BTRFS_I(inode)->lock);
4393                dropped = drop_outstanding_extent(inode);
4394                /*
4395                 * If the inodes csum_bytes is the same as the original
4396                 * csum_bytes then we know we haven't raced with any free()ers
4397                 * so we can just reduce our inodes csum bytes and carry on.
4398                 * Otherwise we have to do the normal free thing to account for
4399                 * the case that the free side didn't free up its reserve
4400                 * because of this outstanding reservation.
4401                 */
4402                if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4403                        calc_csum_metadata_size(inode, num_bytes, 0);
4404                else
4405                        to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4406                spin_unlock(&BTRFS_I(inode)->lock);
4407                if (dropped)
4408                        to_free += btrfs_calc_trans_metadata_size(root, dropped);
4409
4410                if (to_free) {
4411                        btrfs_block_rsv_release(root, block_rsv, to_free);
4412                        trace_btrfs_space_reservation(root->fs_info,
4413                                                      "delalloc",
4414                                                      btrfs_ino(inode),
4415                                                      to_free, 0);
4416                }
4417                mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4418                return ret;
4419        }
4420
4421        spin_lock(&BTRFS_I(inode)->lock);
4422        if (extra_reserve) {
4423                BTRFS_I(inode)->delalloc_meta_reserved = 1;
4424                nr_extents--;
4425        }
4426        BTRFS_I(inode)->reserved_extents += nr_extents;
4427        spin_unlock(&BTRFS_I(inode)->lock);
4428        mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4429
4430        if (to_reserve)
4431                trace_btrfs_space_reservation(root->fs_info,"delalloc",
4432                                              btrfs_ino(inode), to_reserve, 1);
4433        block_rsv_add_bytes(block_rsv, to_reserve, 1);
4434
4435        return 0;
4436}
4437
4438/**
4439 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4440 * @inode: the inode to release the reservation for
4441 * @num_bytes: the number of bytes we're releasing
4442 *
4443 * This will release the metadata reservation for an inode.  This can be called
4444 * once we complete IO for a given set of bytes to release their metadata
4445 * reservations.
4446 */
4447void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4448{
4449        struct btrfs_root *root = BTRFS_I(inode)->root;
4450        u64 to_free = 0;
4451        unsigned dropped;
4452
4453        num_bytes = ALIGN(num_bytes, root->sectorsize);
4454        spin_lock(&BTRFS_I(inode)->lock);
4455        dropped = drop_outstanding_extent(inode);
4456
4457        to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4458        spin_unlock(&BTRFS_I(inode)->lock);
4459        if (dropped > 0)
4460                to_free += btrfs_calc_trans_metadata_size(root, dropped);
4461
4462        trace_btrfs_space_reservation(root->fs_info, "delalloc",
4463                                      btrfs_ino(inode), to_free, 0);
4464        btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4465                                to_free);
4466}
4467
4468/**
4469 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4470 * @inode: inode we're writing to
4471 * @num_bytes: the number of bytes we want to allocate
4472 *
4473 * This will do the following things
4474 *
4475 * o reserve space in the data space info for num_bytes
4476 * o reserve space in the metadata space info based on number of outstanding
4477 *   extents and how much csums will be needed
4478 * o add to the inodes ->delalloc_bytes
4479 * o add it to the fs_info's delalloc inodes list.
4480 *
4481 * This will return 0 for success and -ENOSPC if there is no space left.
4482 */
4483int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4484{
4485        int ret;
4486
4487        ret = btrfs_check_data_free_space(inode, num_bytes);
4488        if (ret)
4489                return ret;
4490
4491        ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4492        if (ret) {
4493                btrfs_free_reserved_data_space(inode, num_bytes);
4494                return ret;
4495        }
4496
4497        return 0;
4498}
4499
4500/**
4501 * btrfs_delalloc_release_space - release data and metadata space for delalloc
4502 * @inode: inode we're releasing space for
4503 * @num_bytes: the number of bytes we want to free up
4504 *
4505 * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4506 * called in the case that we don't need the metadata AND data reservations
4507 * anymore.  So if there is an error or we insert an inline extent.
4508 *
4509 * This function will release the metadata space that was not used and will
4510 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4511 * list if there are no delalloc bytes left.
4512 */
4513void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4514{
4515        btrfs_delalloc_release_metadata(inode, num_bytes);
4516        btrfs_free_reserved_data_space(inode, num_bytes);
4517}
4518
4519static int update_block_group(struct btrfs_trans_handle *trans,
4520                              struct btrfs_root *root,
4521                              u64 bytenr, u64 num_bytes, int alloc)
4522{
4523        struct btrfs_block_group_cache *cache = NULL;
4524        struct btrfs_fs_info *info = root->fs_info;
4525        u64 total = num_bytes;
4526        u64 old_val;
4527        u64 byte_in_group;
4528        int factor;
4529
4530        /* block accounting for super block */
4531        spin_lock(&info->delalloc_lock);
4532        old_val = btrfs_super_bytes_used(info->super_copy);
4533        if (alloc)
4534                old_val += num_bytes;
4535        else
4536                old_val -= num_bytes;
4537        btrfs_set_super_bytes_used(info->super_copy, old_val);
4538        spin_unlock(&info->delalloc_lock);
4539
4540        while (total) {
4541                cache = btrfs_lookup_block_group(info, bytenr);
4542                if (!cache)
4543                        return -1;
4544                if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4545                                    BTRFS_BLOCK_GROUP_RAID1 |
4546                                    BTRFS_BLOCK_GROUP_RAID10))
4547                        factor = 2;
4548                else
4549                        factor = 1;
4550                /*
4551                 * If this block group has free space cache written out, we
4552                 * need to make sure to load it if we are removing space.  This
4553                 * is because we need the unpinning stage to actually add the
4554                 * space back to the block group, otherwise we will leak space.
4555                 */
4556                if (!alloc && cache->cached == BTRFS_CACHE_NO)
4557                        cache_block_group(cache, trans, NULL, 1);
4558
4559                byte_in_group = bytenr - cache->key.objectid;
4560                WARN_ON(byte_in_group > cache->key.offset);
4561
4562                spin_lock(&cache->space_info->lock);
4563                spin_lock(&cache->lock);
4564
4565                if (btrfs_test_opt(root, SPACE_CACHE) &&
4566                    cache->disk_cache_state < BTRFS_DC_CLEAR)
4567                        cache->disk_cache_state = BTRFS_DC_CLEAR;
4568
4569                cache->dirty = 1;
4570                old_val = btrfs_block_group_used(&cache->item);
4571                num_bytes = min(total, cache->key.offset - byte_in_group);
4572                if (alloc) {
4573                        old_val += num_bytes;
4574                        btrfs_set_block_group_used(&cache->item, old_val);
4575                        cache->reserved -= num_bytes;
4576                        cache->space_info->bytes_reserved -= num_bytes;
4577                        cache->space_info->bytes_used += num_bytes;
4578                        cache->space_info->disk_used += num_bytes * factor;
4579                        spin_unlock(&cache->lock);
4580                        spin_unlock(&cache->space_info->lock);
4581                } else {
4582                        old_val -= num_bytes;
4583                        btrfs_set_block_group_used(&cache->item, old_val);
4584                        cache->pinned += num_bytes;
4585                        cache->space_info->bytes_pinned += num_bytes;
4586                        cache->space_info->bytes_used -= num_bytes;
4587                        cache->space_info->disk_used -= num_bytes * factor;
4588                        spin_unlock(&cache->lock);
4589                        spin_unlock(&cache->space_info->lock);
4590
4591                        set_extent_dirty(info->pinned_extents,
4592                                         bytenr, bytenr + num_bytes - 1,
4593                                         GFP_NOFS | __GFP_NOFAIL);
4594                }
4595                btrfs_put_block_group(cache);
4596                total -= num_bytes;
4597                bytenr += num_bytes;
4598        }
4599        return 0;
4600}
4601
4602static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4603{
4604        struct btrfs_block_group_cache *cache;
4605        u64 bytenr;
4606
4607        cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4608        if (!cache)
4609                return 0;
4610
4611        bytenr = cache->key.objectid;
4612        btrfs_put_block_group(cache);
4613
4614        return bytenr;
4615}
4616
4617static int pin_down_extent(struct btrfs_root *root,
4618                           struct btrfs_block_group_cache *cache,
4619                           u64 bytenr, u64 num_bytes, int reserved)
4620{
4621        spin_lock(&cache->space_info->lock);
4622        spin_lock(&cache->lock);
4623        cache->pinned += num_bytes;
4624        cache->space_info->bytes_pinned += num_bytes;
4625        if (reserved) {
4626                cache->reserved -= num_bytes;
4627                cache->space_info->bytes_reserved -= num_bytes;
4628        }
4629        spin_unlock(&cache->lock);
4630        spin_unlock(&cache->space_info->lock);
4631
4632        set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4633                         bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4634        return 0;
4635}
4636
4637/*
4638 * this function must be called within transaction
4639 */
4640int btrfs_pin_extent(struct btrfs_root *root,
4641                     u64 bytenr, u64 num_bytes, int reserved)
4642{
4643        struct btrfs_block_group_cache *cache;
4644
4645        cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4646        BUG_ON(!cache);
4647
4648        pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4649
4650        btrfs_put_block_group(cache);
4651        return 0;
4652}
4653
4654/*
4655 * this function must be called within transaction
4656 */
4657int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
4658                                    struct btrfs_root *root,
4659                                    u64 bytenr, u64 num_bytes)
4660{
4661        struct btrfs_block_group_cache *cache;
4662
4663        cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4664        BUG_ON(!cache);
4665
4666        /*
4667         * pull in the free space cache (if any) so that our pin
4668         * removes the free space from the cache.  We have load_only set
4669         * to one because the slow code to read in the free extents does check
4670         * the pinned extents.
4671         */
4672        cache_block_group(cache, trans, root, 1);
4673
4674        pin_down_extent(root, cache, bytenr, num_bytes, 0);
4675
4676        /* remove us from the free space cache (if we're there at all) */
4677        btrfs_remove_free_space(cache, bytenr, num_bytes);
4678        btrfs_put_block_group(cache);
4679        return 0;
4680}
4681
4682/**
4683 * btrfs_update_reserved_bytes - update the block_group and space info counters
4684 * @cache:      The cache we are manipulating
4685 * @num_bytes:  The number of bytes in question
4686 * @reserve:    One of the reservation enums
4687 *
4688 * This is called by the allocator when it reserves space, or by somebody who is
4689 * freeing space that was never actually used on disk.  For example if you
4690 * reserve some space for a new leaf in transaction A and before transaction A
4691 * commits you free that leaf, you call this with reserve set to 0 in order to
4692 * clear the reservation.
4693 *
4694 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4695 * ENOSPC accounting.  For data we handle the reservation through clearing the
4696 * delalloc bits in the io_tree.  We have to do this since we could end up
4697 * allocating less disk space for the amount of data we have reserved in the
4698 * case of compression.
4699 *
4700 * If this is a reservation and the block group has become read only we cannot
4701 * make the reservation and return -EAGAIN, otherwise this function always
4702 * succeeds.
4703 */
4704static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4705                                       u64 num_bytes, int reserve)
4706{
4707        struct btrfs_space_info *space_info = cache->space_info;
4708        int ret = 0;
4709        spin_lock(&space_info->lock);
4710        spin_lock(&cache->lock);
4711        if (reserve != RESERVE_FREE) {
4712                if (cache->ro) {
4713                        ret = -EAGAIN;
4714                } else {
4715                        cache->reserved += num_bytes;
4716                        space_info->bytes_reserved += num_bytes;
4717                        if (reserve == RESERVE_ALLOC) {
4718                                trace_btrfs_space_reservation(cache->fs_info,
4719                                              "space_info",
4720                                              (u64)(unsigned long)space_info,
4721                                              num_bytes, 0);
4722                                space_info->bytes_may_use -= num_bytes;
4723                        }
4724                }
4725        } else {
4726                if (cache->ro)
4727                        space_info->bytes_readonly += num_bytes;
4728                cache->reserved -= num_bytes;
4729                space_info->bytes_reserved -= num_bytes;
4730                space_info->reservation_progress++;
4731        }
4732        spin_unlock(&cache->lock);
4733        spin_unlock(&space_info->lock);
4734        return ret;
4735}
4736
4737int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4738                                struct btrfs_root *root)
4739{
4740        struct btrfs_fs_info *fs_info = root->fs_info;
4741        struct btrfs_caching_control *next;
4742        struct btrfs_caching_control *caching_ctl;
4743        struct btrfs_block_group_cache *cache;
4744
4745        down_write(&fs_info->extent_commit_sem);
4746
4747        list_for_each_entry_safe(caching_ctl, next,
4748                                 &fs_info->caching_block_groups, list) {
4749                cache = caching_ctl->block_group;
4750                if (block_group_cache_done(cache)) {
4751                        cache->last_byte_to_unpin = (u64)-1;
4752                        list_del_init(&caching_ctl->list);
4753                        put_caching_control(caching_ctl);
4754                } else {
4755                        cache->last_byte_to_unpin = caching_ctl->progress;
4756                }
4757        }
4758
4759        if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4760                fs_info->pinned_extents = &fs_info->freed_extents[1];
4761        else
4762                fs_info->pinned_extents = &fs_info->freed_extents[0];
4763
4764        up_write(&fs_info->extent_commit_sem);
4765
4766        update_global_block_rsv(fs_info);
4767        return 0;
4768}
4769
4770static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4771{
4772        struct btrfs_fs_info *fs_info = root->fs_info;
4773        struct btrfs_block_group_cache *cache = NULL;
4774        u64 len;
4775
4776        while (start <= end) {
4777                if (!cache ||
4778                    start >= cache->key.objectid + cache->key.offset) {
4779                        if (cache)
4780                                btrfs_put_block_group(cache);
4781                        cache = btrfs_lookup_block_group(fs_info, start);
4782                        BUG_ON(!cache);
4783                }
4784
4785                len = cache->key.objectid + cache->key.offset - start;
4786                len = min(len, end + 1 - start);
4787
4788                if (start < cache->last_byte_to_unpin) {
4789                        len = min(len, cache->last_byte_to_unpin - start);
4790                        btrfs_add_free_space(cache, start, len);
4791                }
4792
4793                start += len;
4794
4795                spin_lock(&cache->space_info->lock);
4796                spin_lock(&cache->lock);
4797                cache->pinned -= len;
4798                cache->space_info->bytes_pinned -= len;
4799                if (cache->ro)
4800                        cache->space_info->bytes_readonly += len;
4801                spin_unlock(&cache->lock);
4802                spin_unlock(&cache->space_info->lock);
4803        }
4804
4805        if (cache)
4806                btrfs_put_block_group(cache);
4807        return 0;
4808}
4809
4810int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4811                               struct btrfs_root *root)
4812{
4813        struct btrfs_fs_info *fs_info = root->fs_info;
4814        struct extent_io_tree *unpin;
4815        u64 start;
4816        u64 end;
4817        int ret;
4818
4819        if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4820                unpin = &fs_info->freed_extents[1];
4821        else
4822                unpin = &fs_info->freed_extents[0];
4823
4824        while (1) {
4825                ret = find_first_extent_bit(unpin, 0, &start, &end,
4826                                            EXTENT_DIRTY);
4827                if (ret)
4828                        break;
4829
4830                if (btrfs_test_opt(root, DISCARD))
4831                        ret = btrfs_discard_extent(root, start,
4832                                                   end + 1 - start, NULL);
4833
4834                clear_extent_dirty(unpin, start, end, GFP_NOFS);
4835                unpin_extent_range(root, start, end);
4836                cond_resched();
4837        }
4838
4839        return 0;
4840}
4841
4842static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4843                                struct btrfs_root *root,
4844                                u64 bytenr, u64 num_bytes, u64 parent,
4845                                u64 root_objectid, u64 owner_objectid,
4846                                u64 owner_offset, int refs_to_drop,
4847                                struct btrfs_delayed_extent_op *extent_op)
4848{
4849        struct btrfs_key key;
4850        struct btrfs_path *path;
4851        struct btrfs_fs_info *info = root->fs_info;
4852        struct btrfs_root *extent_root = info->extent_root;
4853        struct extent_buffer *leaf;
4854        struct btrfs_extent_item *ei;
4855        struct btrfs_extent_inline_ref *iref;
4856        int ret;
4857        int is_data;
4858        int extent_slot = 0;
4859        int found_extent = 0;
4860        int num_to_del = 1;
4861        u32 item_size;
4862        u64 refs;
4863
4864        path = btrfs_alloc_path();
4865        if (!path)
4866                return -ENOMEM;
4867
4868        path->reada = 1;
4869        path->leave_spinning = 1;
4870
4871        is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4872        BUG_ON(!is_data && refs_to_drop != 1);
4873
4874        ret = lookup_extent_backref(trans, extent_root, path, &iref,
4875                                    bytenr, num_bytes, parent,
4876                                    root_objectid, owner_objectid,
4877                                    owner_offset);
4878        if (ret == 0) {
4879                extent_slot = path->slots[0];
4880                while (extent_slot >= 0) {
4881                        btrfs_item_key_to_cpu(path->nodes[0], &key,
4882                                              extent_slot);
4883                        if (key.objectid != bytenr)
4884                                break;
4885                        if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4886                            key.offset == num_bytes) {
4887                                found_extent = 1;
4888                                break;
4889                        }
4890                        if (path->slots[0] - extent_slot > 5)
4891                                break;
4892                        extent_slot--;
4893                }
4894#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4895                item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4896                if (found_extent && item_size < sizeof(*ei))
4897                        found_extent = 0;
4898#endif
4899                if (!found_extent) {
4900                        BUG_ON(iref);
4901                        ret = remove_extent_backref(trans, extent_root, path,
4902                                                    NULL, refs_to_drop,
4903                                                    is_data);
4904                        BUG_ON(ret);
4905                        btrfs_release_path(path);
4906                        path->leave_spinning = 1;
4907
4908                        key.objectid = bytenr;
4909                        key.type = BTRFS_EXTENT_ITEM_KEY;
4910                        key.offset = num_bytes;
4911
4912                        ret = btrfs_search_slot(trans, extent_root,
4913                                                &key, path, -1, 1);
4914                        if (ret) {
4915                                printk(KERN_ERR "umm, got %d back from search"
4916                                       ", was looking for %llu\n", ret,
4917                                       (unsigned long long)bytenr);
4918                                if (ret > 0)
4919                                        btrfs_print_leaf(extent_root,
4920                                                         path->nodes[0]);
4921                        }
4922                        BUG_ON(ret);
4923                        extent_slot = path->slots[0];
4924                }
4925        } else {
4926                btrfs_print_leaf(extent_root, path->nodes[0]);
4927                WARN_ON(1);
4928                printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4929                       "parent %llu root %llu  owner %llu offset %llu\n",
4930                       (unsigned long long)bytenr,
4931                       (unsigned long long)parent,
4932                       (unsigned long long)root_objectid,
4933                       (unsigned long long)owner_objectid,
4934                       (unsigned long long)owner_offset);
4935        }
4936
4937        leaf = path->nodes[0];
4938        item_size = btrfs_item_size_nr(leaf, extent_slot);
4939#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4940        if (item_size < sizeof(*ei)) {
4941                BUG_ON(found_extent || extent_slot != path->slots[0]);
4942                ret = convert_extent_item_v0(trans, extent_root, path,
4943                                             owner_objectid, 0);
4944                BUG_ON(ret < 0);
4945
4946                btrfs_release_path(path);
4947                path->leave_spinning = 1;
4948
4949                key.objectid = bytenr;
4950                key.type = BTRFS_EXTENT_ITEM_KEY;
4951                key.offset = num_bytes;
4952
4953                ret = btrfs_search_slot(trans, extent_root, &key, path,
4954                                        -1, 1);
4955                if (ret) {
4956                        printk(KERN_ERR "umm, got %d back from search"
4957                               ", was looking for %llu\n", ret,
4958                               (unsigned long long)bytenr);
4959                        btrfs_print_leaf(extent_root, path->nodes[0]);
4960                }
4961                BUG_ON(ret);
4962                extent_slot = path->slots[0];
4963                leaf = path->nodes[0];
4964                item_size = btrfs_item_size_nr(leaf, extent_slot);
4965        }
4966#endif
4967        BUG_ON(item_size < sizeof(*ei));
4968        ei = btrfs_item_ptr(leaf, extent_slot,
4969                            struct btrfs_extent_item);
4970        if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4971                struct btrfs_tree_block_info *bi;
4972                BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4973                bi = (struct btrfs_tree_block_info *)(ei + 1);
4974                WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4975        }
4976
4977        refs = btrfs_extent_refs(leaf, ei);
4978        BUG_ON(refs < refs_to_drop);
4979        refs -= refs_to_drop;
4980
4981        if (refs > 0) {
4982                if (extent_op)
4983                        __run_delayed_extent_op(extent_op, leaf, ei);
4984                /*
4985                 * In the case of inline back ref, reference count will
4986                 * be updated by remove_extent_backref
4987                 */
4988                if (iref) {
4989                        BUG_ON(!found_extent);
4990                } else {
4991                        btrfs_set_extent_refs(leaf, ei, refs);
4992                        btrfs_mark_buffer_dirty(leaf);
4993                }
4994                if (found_extent) {
4995                        ret = remove_extent_backref(trans, extent_root, path,
4996                                                    iref, refs_to_drop,
4997                                                    is_data);
4998                        BUG_ON(ret);
4999                }
5000        } else {
5001                if (found_extent) {
5002                        BUG_ON(is_data && refs_to_drop !=
5003                               extent_data_ref_count(root, path, iref));
5004                        if (iref) {
5005                                BUG_ON(path->slots[0] != extent_slot);
5006                        } else {
5007                                BUG_ON(path->slots[0] != extent_slot + 1);
5008                                path->slots[0] = extent_slot;
5009                                num_to_del = 2;
5010                        }
5011                }
5012
5013                ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5014                                      num_to_del);
5015                BUG_ON(ret);
5016                btrfs_release_path(path);
5017
5018                if (is_data) {
5019                        ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5020                        BUG_ON(ret);
5021                } else {
5022                        invalidate_mapping_pages(info->btree_inode->i_mapping,
5023                             bytenr >> PAGE_CACHE_SHIFT,
5024                             (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
5025                }
5026
5027                ret = update_block_group(trans, root, bytenr, num_bytes, 0);
5028                BUG_ON(ret);
5029        }
5030        btrfs_free_path(path);
5031        return ret;
5032}
5033
5034/*
5035 * when we free an block, it is possible (and likely) that we free the last
5036 * delayed ref for that extent as well.  This searches the delayed ref tree for
5037 * a given extent, and if there are no other delayed refs to be processed, it
5038 * removes it from the tree.
5039 */
5040static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5041                                      struct btrfs_root *root, u64 bytenr)
5042{
5043        struct btrfs_delayed_ref_head *head;
5044        struct btrfs_delayed_ref_root *delayed_refs;
5045        struct btrfs_delayed_ref_node *ref;
5046        struct rb_node *node;
5047        int ret = 0;
5048
5049        delayed_refs = &trans->transaction->delayed_refs;
5050        spin_lock(&delayed_refs->lock);
5051        head = btrfs_find_delayed_ref_head(trans, bytenr);
5052        if (!head)
5053                goto out;
5054
5055        node = rb_prev(&head->node.rb_node);
5056        if (!node)
5057                goto out;
5058
5059        ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5060
5061        /* there are still entries for this ref, we can't drop it */
5062        if (ref->bytenr == bytenr)
5063                goto out;
5064
5065        if (head->extent_op) {
5066                if (!head->must_insert_reserved)
5067                        goto out;
5068                kfree(head->extent_op);
5069                head->extent_op = NULL;
5070        }
5071
5072        /*
5073         * waiting for the lock here would deadlock.  If someone else has it
5074         * locked they are already in the process of dropping it anyway
5075         */
5076        if (!mutex_trylock(&head->mutex))
5077                goto out;
5078
5079        /*
5080         * at this point we have a head with no other entries.  Go
5081         * ahead and process it.
5082         */
5083        head->node.in_tree = 0;
5084        rb_erase(&head->node.rb_node, &delayed_refs->root);
5085
5086        delayed_refs->num_entries--;
5087        if (waitqueue_active(&delayed_refs->seq_wait))
5088                wake_up(&delayed_refs->seq_wait);
5089
5090        /*
5091         * we don't take a ref on the node because we're removing it from the
5092         * tree, so we just steal the ref the tree was holding.
5093         */
5094        delayed_refs->num_heads--;
5095        if (list_empty(&head->cluster))
5096                delayed_refs->num_heads_ready--;
5097
5098        list_del_init(&head->cluster);
5099        spin_unlock(&delayed_refs->lock);
5100
5101        BUG_ON(head->extent_op);
5102        if (head->must_insert_reserved)
5103                ret = 1;
5104
5105        mutex_unlock(&head->mutex);
5106        btrfs_put_delayed_ref(&head->node);
5107        return ret;
5108out:
5109        spin_unlock(&delayed_refs->lock);
5110        return 0;
5111}
5112
5113void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5114                           struct btrfs_root *root,
5115                           struct extent_buffer *buf,
5116                           u64 parent, int last_ref, int for_cow)
5117{
5118        struct btrfs_block_group_cache *cache = NULL;
5119        int ret;
5120
5121        if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5122                ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5123                                        buf->start, buf->len,
5124                                        parent, root->root_key.objectid,
5125                                        btrfs_header_level(buf),
5126                                        BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5127                BUG_ON(ret);
5128        }
5129
5130        if (!last_ref)
5131                return;
5132
5133        cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5134
5135        if (btrfs_header_generation(buf) == trans->transid) {
5136                if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5137                        ret = check_ref_cleanup(trans, root, buf->start);
5138                        if (!ret)
5139                                goto out;
5140                }
5141
5142                if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5143                        pin_down_extent(root, cache, buf->start, buf->len, 1);
5144                        goto out;
5145                }
5146
5147                WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5148
5149                btrfs_add_free_space(cache, buf->start, buf->len);
5150                btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5151        }
5152out:
5153        /*
5154         * Deleting the buffer, clear the corrupt flag since it doesn't matter
5155         * anymore.
5156         */
5157        clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5158        btrfs_put_block_group(cache);
5159}
5160
5161int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5162                      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5163                      u64 owner, u64 offset, int for_cow)
5164{
5165        int ret;
5166        struct btrfs_fs_info *fs_info = root->fs_info;
5167
5168        /*
5169         * tree log blocks never actually go into the extent allocation
5170         * tree, just update pinning info and exit early.
5171         */
5172        if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5173                WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5174                /* unlocks the pinned mutex */
5175                btrfs_pin_extent(root, bytenr, num_bytes, 1);
5176                ret = 0;
5177        } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5178                ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5179                                        num_bytes,
5180                                        parent, root_objectid, (int)owner,
5181                                        BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5182                BUG_ON(ret);
5183        } else {
5184                ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5185                                                num_bytes,
5186                                                parent, root_objectid, owner,
5187                                                offset, BTRFS_DROP_DELAYED_REF,
5188                                                NULL, for_cow);
5189                BUG_ON(ret);
5190        }
5191        return ret;
5192}
5193
5194static u64 stripe_align(struct btrfs_root *root, u64 val)
5195{
5196        u64 mask = ((u64)root->stripesize - 1);
5197        u64 ret = (val + mask) & ~mask;
5198        return ret;
5199}
5200
5201/*
5202 * when we wait for progress in the block group caching, its because
5203 * our allocation attempt failed at least once.  So, we must sleep
5204 * and let some progress happen before we try again.
5205 *
5206 * This function will sleep at least once waiting for new free space to
5207 * show up, and then it will check the block group free space numbers
5208 * for our min num_bytes.  Another option is to have it go ahead
5209 * and look in the rbtree for a free extent of a given size, but this
5210 * is a good start.
5211 */
5212static noinline int
5213wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5214                                u64 num_bytes)
5215{
5216        struct btrfs_caching_control *caching_ctl;
5217        DEFINE_WAIT(wait);
5218
5219        caching_ctl = get_caching_control(cache);
5220        if (!caching_ctl)
5221                return 0;
5222
5223        wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5224                   (cache->free_space_ctl->free_space >= num_bytes));
5225
5226        put_caching_control(caching_ctl);
5227        return 0;
5228}
5229
5230static noinline int
5231wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5232{
5233        struct btrfs_caching_control *caching_ctl;
5234        DEFINE_WAIT(wait);
5235
5236        caching_ctl = get_caching_control(cache);
5237        if (!caching_ctl)
5238                return 0;
5239
5240        wait_event(caching_ctl->wait, block_group_cache_done(cache));
5241
5242        put_caching_control(caching_ctl);
5243        return 0;
5244}
5245
5246static int get_block_group_index(struct btrfs_block_group_cache *cache)
5247{
5248        int index;
5249        if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
5250                index = 0;
5251        else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
5252                index = 1;
5253        else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
5254                index = 2;
5255        else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
5256                index = 3;
5257        else
5258                index = 4;
5259        return index;
5260}
5261
5262enum btrfs_loop_type {
5263        LOOP_FIND_IDEAL = 0,
5264        LOOP_CACHING_NOWAIT = 1,
5265        LOOP_CACHING_WAIT = 2,
5266        LOOP_ALLOC_CHUNK = 3,
5267        LOOP_NO_EMPTY_SIZE = 4,
5268};
5269
5270/*
5271 * walks the btree of allocated extents and find a hole of a given size.
5272 * The key ins is changed to record the hole:
5273 * ins->objectid == block start
5274 * ins->flags = BTRFS_EXTENT_ITEM_KEY
5275 * ins->offset == number of blocks
5276 * Any available blocks before search_start are skipped.
5277 */
5278static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5279                                     struct btrfs_root *orig_root,
5280                                     u64 num_bytes, u64 empty_size,
5281                                     u64 search_start, u64 search_end,
5282                                     u64 hint_byte, struct btrfs_key *ins,
5283                                     u64 data)
5284{
5285        int ret = 0;
5286        struct btrfs_root *root = orig_root->fs_info->extent_root;
5287        struct btrfs_free_cluster *last_ptr = NULL;
5288        struct btrfs_block_group_cache *block_group = NULL;
5289        struct btrfs_block_group_cache *used_block_group;
5290        int empty_cluster = 2 * 1024 * 1024;
5291        int allowed_chunk_alloc = 0;
5292        int done_chunk_alloc = 0;
5293        struct btrfs_space_info *space_info;
5294        int loop = 0;
5295        int index = 0;
5296        int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5297                RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5298        bool found_uncached_bg = false;
5299        bool failed_cluster_refill = false;
5300        bool failed_alloc = false;
5301        bool use_cluster = true;
5302        bool have_caching_bg = false;
5303        u64 ideal_cache_percent = 0;
5304        u64 ideal_cache_offset = 0;
5305
5306        WARN_ON(num_bytes < root->sectorsize);
5307        btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5308        ins->objectid = 0;
5309        ins->offset = 0;
5310
5311        trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5312
5313        space_info = __find_space_info(root->fs_info, data);
5314        if (!space_info) {
5315                printk(KERN_ERR "No space info for %llu\n", data);
5316                return -ENOSPC;
5317        }
5318
5319        /*
5320         * If the space info is for both data and metadata it means we have a
5321         * small filesystem and we can't use the clustering stuff.
5322         */
5323        if (btrfs_mixed_space_info(space_info))
5324                use_cluster = false;
5325
5326        if (orig_root->ref_cows || empty_size)
5327                allowed_chunk_alloc = 1;
5328
5329        if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5330                last_ptr = &root->fs_info->meta_alloc_cluster;
5331                if (!btrfs_test_opt(root, SSD))
5332                        empty_cluster = 64 * 1024;
5333        }
5334
5335        if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5336            btrfs_test_opt(root, SSD)) {
5337                last_ptr = &root->fs_info->data_alloc_cluster;
5338        }
5339
5340        if (last_ptr) {
5341                spin_lock(&last_ptr->lock);
5342                if (last_ptr->block_group)
5343                        hint_byte = last_ptr->window_start;
5344                spin_unlock(&last_ptr->lock);
5345        }
5346
5347        search_start = max(search_start, first_logical_byte(root, 0));
5348        search_start = max(search_start, hint_byte);
5349
5350        if (!last_ptr)
5351                empty_cluster = 0;
5352
5353        if (search_start == hint_byte) {
5354ideal_cache:
5355                block_group = btrfs_lookup_block_group(root->fs_info,
5356                                                       search_start);
5357                used_block_group = block_group;
5358                /*
5359                 * we don't want to use the block group if it doesn't match our
5360                 * allocation bits, or if its not cached.
5361                 *
5362                 * However if we are re-searching with an ideal block group
5363                 * picked out then we don't care that the block group is cached.
5364                 */
5365                if (block_group && block_group_bits(block_group, data) &&
5366                    (block_group->cached != BTRFS_CACHE_NO ||
5367                     search_start == ideal_cache_offset)) {
5368                        down_read(&space_info->groups_sem);
5369                        if (list_empty(&block_group->list) ||
5370                            block_group->ro) {
5371                                /*
5372                                 * someone is removing this block group,
5373                                 * we can't jump into the have_block_group
5374                                 * target because our list pointers are not
5375                                 * valid
5376                                 */
5377                                btrfs_put_block_group(block_group);
5378                                up_read(&space_info->groups_sem);
5379                        } else {
5380                                index = get_block_group_index(block_group);
5381                                goto have_block_group;
5382                        }
5383                } else if (block_group) {
5384                        btrfs_put_block_group(block_group);
5385                }
5386        }
5387search:
5388        have_caching_bg = false;
5389        down_read(&space_info->groups_sem);
5390        list_for_each_entry(block_group, &space_info->block_groups[index],
5391                            list) {
5392                u64 offset;
5393                int cached;
5394
5395                used_block_group = block_group;
5396                btrfs_get_block_group(block_group);
5397                search_start = block_group->key.objectid;
5398
5399                /*
5400                 * this can happen if we end up cycling through all the
5401                 * raid types, but we want to make sure we only allocate
5402                 * for the proper type.
5403                 */
5404                if (!block_group_bits(block_group, data)) {
5405                    u64 extra = BTRFS_BLOCK_GROUP_DUP |
5406                                BTRFS_BLOCK_GROUP_RAID1 |
5407                                BTRFS_BLOCK_GROUP_RAID10;
5408
5409                        /*
5410                         * if they asked for extra copies and this block group
5411                         * doesn't provide them, bail.  This does allow us to
5412                         * fill raid0 from raid1.
5413                         */
5414                        if ((data & extra) && !(block_group->flags & extra))
5415                                goto loop;
5416                }
5417
5418have_block_group:
5419                cached = block_group_cache_done(block_group);
5420                if (unlikely(!cached)) {
5421                        u64 free_percent;
5422
5423                        found_uncached_bg = true;
5424                        ret = cache_block_group(block_group, trans,
5425                                                orig_root, 1);
5426                        if (block_group->cached == BTRFS_CACHE_FINISHED)
5427                                goto alloc;
5428
5429                        free_percent = btrfs_block_group_used(&block_group->item);
5430                        free_percent *= 100;
5431                        free_percent = div64_u64(free_percent,
5432                                                 block_group->key.offset);
5433                        free_percent = 100 - free_percent;
5434                        if (free_percent > ideal_cache_percent &&
5435                            likely(!block_group->ro)) {
5436                                ideal_cache_offset = block_group->key.objectid;
5437                                ideal_cache_percent = free_percent;
5438                        }
5439
5440                        /*
5441                         * The caching workers are limited to 2 threads, so we
5442                         * can queue as much work as we care to.
5443                         */
5444                        if (loop > LOOP_FIND_IDEAL) {
5445                                ret = cache_block_group(block_group, trans,
5446                                                        orig_root, 0);
5447                                BUG_ON(ret);
5448                        }
5449
5450                        /*
5451                         * If loop is set for cached only, try the next block
5452                         * group.
5453                         */
5454                        if (loop == LOOP_FIND_IDEAL)
5455                                goto loop;
5456                }
5457
5458alloc:
5459                if (unlikely(block_group->ro))
5460                        goto loop;
5461
5462                /*
5463                 * Ok we want to try and use the cluster allocator, so
5464                 * lets look there
5465                 */
5466                if (last_ptr) {
5467                        /*
5468                         * the refill lock keeps out other
5469                         * people trying to start a new cluster
5470                         */
5471                        spin_lock(&last_ptr->refill_lock);
5472                        used_block_group = last_ptr->block_group;
5473                        if (used_block_group != block_group &&
5474                            (!used_block_group ||
5475                             used_block_group->ro ||
5476                             !block_group_bits(used_block_group, data))) {
5477                                used_block_group = block_group;
5478                                goto refill_cluster;
5479                        }
5480
5481                        if (used_block_group != block_group)
5482                                btrfs_get_block_group(used_block_group);
5483
5484                        offset = btrfs_alloc_from_cluster(used_block_group,
5485                          last_ptr, num_bytes, used_block_group->key.objectid);
5486                        if (offset) {
5487                                /* we have a block, we're done */
5488                                spin_unlock(&last_ptr->refill_lock);
5489                                trace_btrfs_reserve_extent_cluster(root,
5490                                        block_group, search_start, num_bytes);
5491                                goto checks;
5492                        }
5493
5494                        WARN_ON(last_ptr->block_group != used_block_group);
5495                        if (used_block_group != block_group) {
5496                                btrfs_put_block_group(used_block_group);
5497                                used_block_group = block_group;
5498                        }
5499refill_cluster:
5500                        BUG_ON(used_block_group != block_group);
5501                        /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5502                         * set up a new clusters, so lets just skip it
5503                         * and let the allocator find whatever block
5504                         * it can find.  If we reach this point, we
5505                         * will have tried the cluster allocator
5506                         * plenty of times and not have found
5507                         * anything, so we are likely way too
5508                         * fragmented for the clustering stuff to find
5509                         * anything.
5510                         *
5511                         * However, if the cluster is taken from the
5512                         * current block group, release the cluster
5513                         * first, so that we stand a better chance of
5514                         * succeeding in the unclustered
5515                         * allocation.  */
5516                        if (loop >= LOOP_NO_EMPTY_SIZE &&
5517                            last_ptr->block_group != block_group) {
5518                                spin_unlock(&last_ptr->refill_lock);
5519                                goto unclustered_alloc;
5520                        }
5521
5522                        /*
5523                         * this cluster didn't work out, free it and
5524                         * start over
5525                         */
5526                        btrfs_return_cluster_to_free_space(NULL, last_ptr);
5527
5528                        if (loop >= LOOP_NO_EMPTY_SIZE) {
5529                                spin_unlock(&last_ptr->refill_lock);
5530                                goto unclustered_alloc;
5531                        }
5532
5533                        /* allocate a cluster in this block group */
5534                        ret = btrfs_find_space_cluster(trans, root,
5535                                               block_group, last_ptr,
5536                                               search_start, num_bytes,
5537                                               empty_cluster + empty_size);
5538                        if (ret == 0) {
5539                                /*
5540                                 * now pull our allocation out of this
5541                                 * cluster
5542                                 */
5543                                offset = btrfs_alloc_from_cluster(block_group,
5544                                                  last_ptr, num_bytes,
5545                                                  search_start);
5546                                if (offset) {
5547                                        /* we found one, proceed */
5548                                        spin_unlock(&last_ptr->refill_lock);
5549                                        trace_btrfs_reserve_extent_cluster(root,
5550                                                block_group, search_start,
5551                                                num_bytes);
5552                                        goto checks;
5553                                }
5554                        } else if (!cached && loop > LOOP_CACHING_NOWAIT
5555                                   && !failed_cluster_refill) {
5556                                spin_unlock(&last_ptr->refill_lock);
5557
5558                                failed_cluster_refill = true;
5559                                wait_block_group_cache_progress(block_group,
5560                                       num_bytes + empty_cluster + empty_size);
5561                                goto have_block_group;
5562                        }
5563
5564                        /*
5565                         * at this point we either didn't find a cluster
5566                         * or we weren't able to allocate a block from our
5567                         * cluster.  Free the cluster we've been trying
5568                         * to use, and go to the next block group
5569                         */
5570                        btrfs_return_cluster_to_free_space(NULL, last_ptr);
5571                        spin_unlock(&last_ptr->refill_lock);
5572                        goto loop;
5573                }
5574
5575unclustered_alloc:
5576                spin_lock(&block_group->free_space_ctl->tree_lock);
5577                if (cached &&
5578                    block_group->free_space_ctl->free_space <
5579                    num_bytes + empty_cluster + empty_size) {
5580                        spin_unlock(&block_group->free_space_ctl->tree_lock);
5581                        goto loop;
5582                }
5583                spin_unlock(&block_group->free_space_ctl->tree_lock);
5584
5585                offset = btrfs_find_space_for_alloc(block_group, search_start,
5586                                                    num_bytes, empty_size);
5587                /*
5588                 * If we didn't find a chunk, and we haven't failed on this
5589                 * block group before, and this block group is in the middle of
5590                 * caching and we are ok with waiting, then go ahead and wait
5591                 * for progress to be made, and set failed_alloc to true.
5592                 *
5593                 * If failed_alloc is true then we've already waited on this
5594                 * block group once and should move on to the next block group.
5595                 */
5596                if (!offset && !failed_alloc && !cached &&
5597                    loop > LOOP_CACHING_NOWAIT) {
5598                        wait_block_group_cache_progress(block_group,
5599                                                num_bytes + empty_size);
5600                        failed_alloc = true;
5601                        goto have_block_group;
5602                } else if (!offset) {
5603                        if (!cached)
5604                                have_caching_bg = true;
5605                        goto loop;
5606                }
5607checks:
5608                search_start = stripe_align(root, offset);
5609                /* move on to the next group */
5610                if (search_start + num_bytes >= search_end) {
5611                        btrfs_add_free_space(used_block_group, offset, num_bytes);
5612                        goto loop;
5613                }
5614
5615                /* move on to the next group */
5616                if (search_start + num_bytes >
5617                    used_block_group->key.objectid + used_block_group->key.offset) {
5618                        btrfs_add_free_space(used_block_group, offset, num_bytes);
5619                        goto loop;
5620                }
5621
5622                if (offset < search_start)
5623                        btrfs_add_free_space(used_block_group, offset,
5624                                             search_start - offset);
5625                BUG_ON(offset > search_start);
5626
5627                ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5628                                                  alloc_type);
5629                if (ret == -EAGAIN) {
5630                        btrfs_add_free_space(used_block_group, offset, num_bytes);
5631                        goto loop;
5632                }
5633
5634                /* we are all good, lets return */
5635                ins->objectid = search_start;
5636                ins->offset = num_bytes;
5637
5638                trace_btrfs_reserve_extent(orig_root, block_group,
5639                                           search_start, num_bytes);
5640                if (offset < search_start)
5641                        btrfs_add_free_space(used_block_group, offset,
5642                                             search_start - offset);
5643                BUG_ON(offset > search_start);
5644                if (used_block_group != block_group)
5645                        btrfs_put_block_group(used_block_group);
5646                btrfs_put_block_group(block_group);
5647                break;
5648loop:
5649                failed_cluster_refill = false;
5650                failed_alloc = false;
5651                BUG_ON(index != get_block_group_index(block_group));
5652                if (used_block_group != block_group)
5653                        btrfs_put_block_group(used_block_group);
5654                btrfs_put_block_group(block_group);
5655        }
5656        up_read(&space_info->groups_sem);
5657
5658        if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
5659                goto search;
5660
5661        if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5662                goto search;
5663
5664        /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5665         *                      for them to make caching progress.  Also
5666         *                      determine the best possible bg to cache
5667         * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5668         *                      caching kthreads as we move along
5669         * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5670         * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5671         * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5672         *                      again
5673         */
5674        if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5675                index = 0;
5676                if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5677                        found_uncached_bg = false;
5678                        loop++;
5679                        if (!ideal_cache_percent)
5680                                goto search;
5681
5682                        /*
5683                         * 1 of the following 2 things have happened so far
5684                         *
5685                         * 1) We found an ideal block group for caching that
5686                         * is mostly full and will cache quickly, so we might
5687                         * as well wait for it.
5688                         *
5689                         * 2) We searched for cached only and we didn't find
5690                         * anything, and we didn't start any caching kthreads
5691                         * either, so chances are we will loop through and
5692                         * start a couple caching kthreads, and then come back
5693                         * around and just wait for them.  This will be slower
5694                         * because we will have 2 caching kthreads reading at
5695                         * the same time when we could have just started one
5696                         * and waited for it to get far enough to give us an
5697                         * allocation, so go ahead and go to the wait caching
5698                         * loop.
5699                         */
5700                        loop = LOOP_CACHING_WAIT;
5701                        search_start = ideal_cache_offset;
5702                        ideal_cache_percent = 0;
5703                        goto ideal_cache;
5704                } else if (loop == LOOP_FIND_IDEAL) {
5705                        /*
5706                         * Didn't find a uncached bg, wait on anything we find
5707                         * next.
5708                         */
5709                        loop = LOOP_CACHING_WAIT;
5710                        goto search;
5711                }
5712
5713                loop++;
5714
5715                if (loop == LOOP_ALLOC_CHUNK) {
5716                       if (allowed_chunk_alloc) {
5717                                ret = do_chunk_alloc(trans, root, num_bytes +
5718                                                     2 * 1024 * 1024, data,
5719                                                     CHUNK_ALLOC_LIMITED);
5720                                allowed_chunk_alloc = 0;
5721                                if (ret == 1)
5722                                        done_chunk_alloc = 1;
5723                        } else if (!done_chunk_alloc &&
5724                                   space_info->force_alloc ==
5725                                   CHUNK_ALLOC_NO_FORCE) {
5726                                space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5727                        }
5728
5729                       /*
5730                        * We didn't allocate a chunk, go ahead and drop the
5731                        * empty size and loop again.
5732                        */
5733                       if (!done_chunk_alloc)
5734                               loop = LOOP_NO_EMPTY_SIZE;
5735                }
5736
5737                if (loop == LOOP_NO_EMPTY_SIZE) {
5738                        empty_size = 0;
5739                        empty_cluster = 0;
5740                }
5741
5742                goto search;
5743        } else if (!ins->objectid) {
5744                ret = -ENOSPC;
5745        } else if (ins->objectid) {
5746                ret = 0;
5747        }
5748
5749        return ret;
5750}
5751
5752static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5753                            int dump_block_groups)
5754{
5755        struct btrfs_block_group_cache *cache;
5756        int index = 0;
5757
5758        spin_lock(&info->lock);
5759        printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
5760               (unsigned long long)info->flags,
5761               (unsigned long long)(info->total_bytes - info->bytes_used -
5762                                    info->bytes_pinned - info->bytes_reserved -
5763                                    info->bytes_readonly),
5764               (info->full) ? "" : "not ");
5765        printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5766               "reserved=%llu, may_use=%llu, readonly=%llu\n",
5767               (unsigned long long)info->total_bytes,
5768               (unsigned long long)info->bytes_used,
5769               (unsigned long long)info->bytes_pinned,
5770               (unsigned long long)info->bytes_reserved,
5771               (unsigned long long)info->bytes_may_use,
5772               (unsigned long long)info->bytes_readonly);
5773        spin_unlock(&info->lock);
5774
5775        if (!dump_block_groups)
5776                return;
5777
5778        down_read(&info->groups_sem);
5779again:
5780        list_for_each_entry(cache, &info->block_groups[index], list) {
5781                spin_lock(&cache->lock);
5782                printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5783                       "%llu pinned %llu reserved\n",
5784                       (unsigned long long)cache->key.objectid,
5785                       (unsigned long long)cache->key.offset,
5786                       (unsigned long long)btrfs_block_group_used(&cache->item),
5787                       (unsigned long long)cache->pinned,
5788                       (unsigned long long)cache->reserved);
5789                btrfs_dump_free_space(cache, bytes);
5790                spin_unlock(&cache->lock);
5791        }
5792        if (++index < BTRFS_NR_RAID_TYPES)
5793                goto again;
5794        up_read(&info->groups_sem);
5795}
5796
5797int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5798                         struct btrfs_root *root,
5799                         u64 num_bytes, u64 min_alloc_size,
5800                         u64 empty_size, u64 hint_byte,
5801                         u64 search_end, struct btrfs_key *ins,
5802                         u64 data)
5803{
5804        bool final_tried = false;
5805        int ret;
5806        u64 search_start = 0;
5807
5808        data = btrfs_get_alloc_profile(root, data);
5809again:
5810        /*
5811         * the only place that sets empty_size is btrfs_realloc_node, which
5812         * is not called recursively on allocations
5813         */
5814        if (empty_size || root->ref_cows)
5815                ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5816                                     num_bytes + 2 * 1024 * 1024, data,
5817                                     CHUNK_ALLOC_NO_FORCE);
5818
5819        WARN_ON(num_bytes < root->sectorsize);
5820        ret = find_free_extent(trans, root, num_bytes, empty_size,
5821                               search_start, search_end, hint_byte,
5822                               ins, data);
5823
5824        if (ret == -ENOSPC) {
5825                if (!final_tried) {
5826                        num_bytes = num_bytes >> 1;
5827                        num_bytes = num_bytes & ~(root->sectorsize - 1);
5828                        num_bytes = max(num_bytes, min_alloc_size);
5829                        do_chunk_alloc(trans, root->fs_info->extent_root,
5830                                       num_bytes, data, CHUNK_ALLOC_FORCE);
5831                        if (num_bytes == min_alloc_size)
5832                                final_tried = true;
5833                        goto again;
5834                } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
5835                        struct btrfs_space_info *sinfo;
5836
5837                        sinfo = __find_space_info(root->fs_info, data);
5838                        printk(KERN_ERR "btrfs allocation failed flags %llu, "
5839                               "wanted %llu\n", (unsigned long long)data,
5840                               (unsigned long long)num_bytes);
5841                        dump_space_info(sinfo, num_bytes, 1);
5842                }
5843        }
5844
5845        trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5846
5847        return ret;
5848}
5849
5850static int __btrfs_free_reserved_extent(struct btrfs_root *root,
5851                                        u64 start, u64 len, int pin)
5852{
5853        struct btrfs_block_group_cache *cache;
5854        int ret = 0;
5855
5856        cache = btrfs_lookup_block_group(root->fs_info, start);
5857        if (!cache) {
5858                printk(KERN_ERR "Unable to find block group for %llu\n",
5859                       (unsigned long long)start);
5860                return -ENOSPC;
5861        }
5862
5863        if (btrfs_test_opt(root, DISCARD))
5864                ret = btrfs_discard_extent(root, start, len, NULL);
5865
5866        if (pin)
5867                pin_down_extent(root, cache, start, len, 1);
5868        else {
5869                btrfs_add_free_space(cache, start, len);
5870                btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
5871        }
5872        btrfs_put_block_group(cache);
5873
5874        trace_btrfs_reserved_extent_free(root, start, len);
5875
5876        return ret;
5877}
5878
5879int btrfs_free_reserved_extent(struct btrfs_root *root,
5880                                        u64 start, u64 len)
5881{
5882        return __btrfs_free_reserved_extent(root, start, len, 0);
5883}
5884
5885int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
5886                                       u64 start, u64 len)
5887{
5888        return __btrfs_free_reserved_extent(root, start, len, 1);
5889}
5890
5891static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5892                                      struct btrfs_root *root,
5893                                      u64 parent, u64 root_objectid,
5894                                      u64 flags, u64 owner, u64 offset,
5895                                      struct btrfs_key *ins, int ref_mod)
5896{
5897        int ret;
5898        struct btrfs_fs_info *fs_info = root->fs_info;
5899        struct btrfs_extent_item *extent_item;
5900        struct btrfs_extent_inline_ref *iref;
5901        struct btrfs_path *path;
5902        struct extent_buffer *leaf;
5903        int type;
5904        u32 size;
5905
5906        if (parent > 0)
5907                type = BTRFS_SHARED_DATA_REF_KEY;
5908        else
5909                type = BTRFS_EXTENT_DATA_REF_KEY;
5910
5911        size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5912
5913        path = btrfs_alloc_path();
5914        if (!path)
5915                return -ENOMEM;
5916
5917        path->leave_spinning = 1;
5918        ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5919                                      ins, size);
5920        BUG_ON(ret);
5921
5922        leaf = path->nodes[0];
5923        extent_item = btrfs_item_ptr(leaf, path->slots[0],
5924                                     struct btrfs_extent_item);
5925        btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5926        btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5927        btrfs_set_extent_flags(leaf, extent_item,
5928                               flags | BTRFS_EXTENT_FLAG_DATA);
5929
5930        iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5931        btrfs_set_extent_inline_ref_type(leaf, iref, type);
5932        if (parent > 0) {
5933                struct btrfs_shared_data_ref *ref;
5934                ref = (struct btrfs_shared_data_ref *)(iref + 1);
5935                btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5936                btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5937        } else {
5938                struct btrfs_extent_data_ref *ref;
5939                ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5940                btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5941                btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5942                btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5943                btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5944        }
5945
5946        btrfs_mark_buffer_dirty(path->nodes[0]);
5947        btrfs_free_path(path);
5948
5949        ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5950        if (ret) {
5951                printk(KERN_ERR "btrfs update block group failed for %llu "
5952                       "%llu\n", (unsigned long long)ins->objectid,
5953                       (unsigned long long)ins->offset);
5954                BUG();
5955        }
5956        return ret;
5957}
5958
5959static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5960                                     struct btrfs_root *root,
5961                                     u64 parent, u64 root_objectid,
5962                                     u64 flags, struct btrfs_disk_key *key,
5963                                     int level, struct btrfs_key *ins)
5964{
5965        int ret;
5966        struct btrfs_fs_info *fs_info = root->fs_info;
5967        struct btrfs_extent_item *extent_item;
5968        struct btrfs_tree_block_info *block_info;
5969        struct btrfs_extent_inline_ref *iref;
5970        struct btrfs_path *path;
5971        struct extent_buffer *leaf;
5972        u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5973
5974        path = btrfs_alloc_path();
5975        if (!path)
5976                return -ENOMEM;
5977
5978        path->leave_spinning = 1;
5979        ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5980                                      ins, size);
5981        BUG_ON(ret);
5982
5983        leaf = path->nodes[0];
5984        extent_item = btrfs_item_ptr(leaf, path->slots[0],
5985                                     struct btrfs_extent_item);
5986        btrfs_set_extent_refs(leaf, extent_item, 1);
5987        btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5988        btrfs_set_extent_flags(leaf, extent_item,
5989                               flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5990        block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5991
5992        btrfs_set_tree_block_key(leaf, block_info, key);
5993        btrfs_set_tree_block_level(leaf, block_info, level);
5994
5995        iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5996        if (parent > 0) {
5997                BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5998                btrfs_set_extent_inline_ref_type(leaf, iref,
5999                                                 BTRFS_SHARED_BLOCK_REF_KEY);
6000                btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6001        } else {
6002                btrfs_set_extent_inline_ref_type(leaf, iref,
6003                                                 BTRFS_TREE_BLOCK_REF_KEY);
6004                btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6005        }
6006
6007        btrfs_mark_buffer_dirty(leaf);
6008        btrfs_free_path(path);
6009
6010        ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
6011        if (ret) {
6012                printk(KERN_ERR "btrfs update block group failed for %llu "
6013                       "%llu\n", (unsigned long long)ins->objectid,
6014                       (unsigned long long)ins->offset);
6015                BUG();
6016        }
6017        return ret;
6018}
6019
6020int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6021                                     struct btrfs_root *root,
6022                                     u64 root_objectid, u64 owner,
6023                                     u64 offset, struct btrfs_key *ins)
6024{
6025        int ret;
6026
6027        BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6028
6029        ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6030                                         ins->offset, 0,
6031                                         root_objectid, owner, offset,
6032                                         BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6033        return ret;
6034}
6035
6036/*
6037 * this is used by the tree logging recovery code.  It records that
6038 * an extent has been allocated and makes sure to clear the free
6039 * space cache bits as well
6040 */
6041int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6042                                   struct btrfs_root *root,
6043                                   u64 root_objectid, u64 owner, u64 offset,
6044                                   struct btrfs_key *ins)
6045{
6046        int ret;
6047        struct btrfs_block_group_cache *block_group;
6048        struct btrfs_caching_control *caching_ctl;
6049        u64 start = ins->objectid;
6050        u64 num_bytes = ins->offset;
6051
6052        block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6053        cache_block_group(block_group, trans, NULL, 0);
6054        caching_ctl = get_caching_control(block_group);
6055
6056        if (!caching_ctl) {
6057                BUG_ON(!block_group_cache_done(block_group));
6058                ret = btrfs_remove_free_space(block_group, start, num_bytes);
6059                BUG_ON(ret);
6060        } else {
6061                mutex_lock(&caching_ctl->mutex);
6062
6063                if (start >= caching_ctl->progress) {
6064                        ret = add_excluded_extent(root, start, num_bytes);
6065                        BUG_ON(ret);
6066                } else if (start + num_bytes <= caching_ctl->progress) {
6067                        ret = btrfs_remove_free_space(block_group,
6068                                                      start, num_bytes);
6069                        BUG_ON(ret);
6070                } else {
6071                        num_bytes = caching_ctl->progress - start;
6072                        ret = btrfs_remove_free_space(block_group,
6073                                                      start, num_bytes);
6074                        BUG_ON(ret);
6075
6076                        start = caching_ctl->progress;
6077                        num_bytes = ins->objectid + ins->offset -
6078                                    caching_ctl->progress;
6079                        ret = add_excluded_extent(root, start, num_bytes);
6080                        BUG_ON(ret);
6081                }
6082
6083                mutex_unlock(&caching_ctl->mutex);
6084                put_caching_control(caching_ctl);
6085        }
6086
6087        ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6088                                          RESERVE_ALLOC_NO_ACCOUNT);
6089        BUG_ON(ret);
6090        btrfs_put_block_group(block_group);
6091        ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6092                                         0, owner, offset, ins, 1);
6093        return ret;
6094}
6095
6096struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6097                                            struct btrfs_root *root,
6098                                            u64 bytenr, u32 blocksize,
6099                                            int level)
6100{
6101        struct extent_buffer *buf;
6102
6103        buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6104        if (!buf)
6105                return ERR_PTR(-ENOMEM);
6106        btrfs_set_header_generation(buf, trans->transid);
6107        btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6108        btrfs_tree_lock(buf);
6109        clean_tree_block(trans, root, buf);
6110
6111        btrfs_set_lock_blocking(buf);
6112        btrfs_set_buffer_uptodate(buf);
6113
6114        if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6115                /*
6116                 * we allow two log transactions at a time, use different
6117                 * EXENT bit to differentiate dirty pages.
6118                 */
6119                if (root->log_transid % 2 == 0)
6120                        set_extent_dirty(&root->dirty_log_pages, buf->start,
6121                                        buf->start + buf->len - 1, GFP_NOFS);
6122                else
6123                        set_extent_new(&root->dirty_log_pages, buf->start,
6124                                        buf->start + buf->len - 1, GFP_NOFS);
6125        } else {
6126                set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6127                         buf->start + buf->len - 1, GFP_NOFS);
6128        }
6129        trans->blocks_used++;
6130        /* this returns a buffer locked for blocking */
6131        return buf;
6132}
6133
6134static struct btrfs_block_rsv *
6135use_block_rsv(struct btrfs_trans_handle *trans,
6136              struct btrfs_root *root, u32 blocksize)
6137{
6138        struct btrfs_block_rsv *block_rsv;
6139        struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6140        int ret;
6141
6142        block_rsv = get_block_rsv(trans, root);
6143
6144        if (block_rsv->size == 0) {
6145                ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
6146                /*
6147                 * If we couldn't reserve metadata bytes try and use some from
6148                 * the global reserve.
6149                 */
6150                if (ret && block_rsv != global_rsv) {
6151                        ret = block_rsv_use_bytes(global_rsv, blocksize);
6152                        if (!ret)
6153                                return global_rsv;
6154                        return ERR_PTR(ret);
6155                } else if (ret) {
6156                        return ERR_PTR(ret);
6157                }
6158                return block_rsv;
6159        }
6160
6161        ret = block_rsv_use_bytes(block_rsv, blocksize);
6162        if (!ret)
6163                return block_rsv;
6164        if (ret) {
6165                static DEFINE_RATELIMIT_STATE(_rs,
6166                                DEFAULT_RATELIMIT_INTERVAL,
6167                                /*DEFAULT_RATELIMIT_BURST*/ 2);
6168                if (__ratelimit(&_rs)) {
6169                        printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
6170                        WARN_ON(1);
6171                }
6172                ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
6173                if (!ret) {
6174                        return block_rsv;
6175                } else if (ret && block_rsv != global_rsv) {
6176                        ret = block_rsv_use_bytes(global_rsv, blocksize);
6177                        if (!ret)
6178                                return global_rsv;
6179                }
6180        }
6181
6182        return ERR_PTR(-ENOSPC);
6183}
6184
6185static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6186                            struct btrfs_block_rsv *block_rsv, u32 blocksize)
6187{
6188        block_rsv_add_bytes(block_rsv, blocksize, 0);
6189        block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6190}
6191
6192/*
6193 * finds a free extent and does all the dirty work required for allocation
6194 * returns the key for the extent through ins, and a tree buffer for
6195 * the first block of the extent through buf.
6196 *
6197 * returns the tree buffer or NULL.
6198 */
6199struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6200                                        struct btrfs_root *root, u32 blocksize,
6201                                        u64 parent, u64 root_objectid,
6202                                        struct btrfs_disk_key *key, int level,
6203                                        u64 hint, u64 empty_size, int for_cow)
6204{
6205        struct btrfs_key ins;
6206        struct btrfs_block_rsv *block_rsv;
6207        struct extent_buffer *buf;
6208        u64 flags = 0;
6209        int ret;
6210
6211
6212        block_rsv = use_block_rsv(trans, root, blocksize);
6213        if (IS_ERR(block_rsv))
6214                return ERR_CAST(block_rsv);
6215
6216        ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6217                                   empty_size, hint, (u64)-1, &ins, 0);
6218        if (ret) {
6219                unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6220                return ERR_PTR(ret);
6221        }
6222
6223        buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6224                                    blocksize, level);
6225        BUG_ON(IS_ERR(buf));
6226
6227        if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6228                if (parent == 0)
6229                        parent = ins.objectid;
6230                flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6231        } else
6232                BUG_ON(parent > 0);
6233
6234        if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6235                struct btrfs_delayed_extent_op *extent_op;
6236                extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
6237                BUG_ON(!extent_op);
6238                if (key)
6239                        memcpy(&extent_op->key, key, sizeof(extent_op->key));
6240                else
6241                        memset(&extent_op->key, 0, sizeof(extent_op->key));
6242                extent_op->flags_to_set = flags;
6243                extent_op->update_key = 1;
6244                extent_op->update_flags = 1;
6245                extent_op->is_data = 0;
6246
6247                ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6248                                        ins.objectid,
6249                                        ins.offset, parent, root_objectid,
6250                                        level, BTRFS_ADD_DELAYED_EXTENT,
6251                                        extent_op, for_cow);
6252                BUG_ON(ret);
6253        }
6254        return buf;
6255}
6256
6257struct walk_control {
6258        u64 refs[BTRFS_MAX_LEVEL];
6259        u64 flags[BTRFS_MAX_LEVEL];
6260        struct btrfs_key update_progress;
6261        int stage;
6262        int level;
6263        int shared_level;
6264        int update_ref;
6265        int keep_locks;
6266        int reada_slot;
6267        int reada_count;
6268        int for_reloc;
6269};
6270
6271#define DROP_REFERENCE  1
6272#define UPDATE_BACKREF  2
6273
6274static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6275                                     struct btrfs_root *root,
6276                                     struct walk_control *wc,
6277                                     struct btrfs_path *path)
6278{
6279        u64 bytenr;
6280        u64 generation;
6281        u64 refs;
6282        u64 flags;
6283        u32 nritems;
6284        u32 blocksize;
6285        struct btrfs_key key;
6286        struct extent_buffer *eb;
6287        int ret;
6288        int slot;
6289        int nread = 0;
6290
6291        if (path->slots[wc->level] < wc->reada_slot) {
6292                wc->reada_count = wc->reada_count * 2 / 3;
6293                wc->reada_count = max(wc->reada_count, 2);
6294        } else {
6295                wc->reada_count = wc->reada_count * 3 / 2;
6296                wc->reada_count = min_t(int, wc->reada_count,
6297                                        BTRFS_NODEPTRS_PER_BLOCK(root));
6298        }
6299
6300        eb = path->nodes[wc->level];
6301        nritems = btrfs_header_nritems(eb);
6302        blocksize = btrfs_level_size(root, wc->level - 1);
6303
6304        for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6305                if (nread >= wc->reada_count)
6306                        break;
6307
6308                cond_resched();
6309                bytenr = btrfs_node_blockptr(eb, slot);
6310                generation = btrfs_node_ptr_generation(eb, slot);
6311
6312                if (slot == path->slots[wc->level])
6313                        goto reada;
6314
6315                if (wc->stage == UPDATE_BACKREF &&
6316                    generation <= root->root_key.offset)
6317                        continue;
6318
6319                /* We don't lock the tree block, it's OK to be racy here */
6320                ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6321                                               &refs, &flags);
6322                BUG_ON(ret);
6323                BUG_ON(refs == 0);
6324
6325                if (wc->stage == DROP_REFERENCE) {
6326                        if (refs == 1)
6327                                goto reada;
6328
6329                        if (wc->level == 1 &&
6330                            (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6331                                continue;
6332                        if (!wc->update_ref ||
6333                            generation <= root->root_key.offset)
6334                                continue;
6335                        btrfs_node_key_to_cpu(eb, &key, slot);
6336                        ret = btrfs_comp_cpu_keys(&key,
6337                                                  &wc->update_progress);
6338                        if (ret < 0)
6339                                continue;
6340                } else {
6341                        if (wc->level == 1 &&
6342                            (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6343                                continue;
6344                }
6345reada:
6346                ret = readahead_tree_block(root, bytenr, blocksize,
6347                                           generation);
6348                if (ret)
6349                        break;
6350                nread++;
6351        }
6352        wc->reada_slot = slot;
6353}
6354
6355/*
6356 * hepler to process tree block while walking down the tree.
6357 *
6358 * when wc->stage == UPDATE_BACKREF, this function updates
6359 * back refs for pointers in the block.
6360 *
6361 * NOTE: return value 1 means we should stop walking down.
6362 */
6363static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6364                                   struct btrfs_root *root,
6365                                   struct btrfs_path *path,
6366                                   struct walk_control *wc, int lookup_info)
6367{
6368        int level = wc->level;
6369        struct extent_buffer *eb = path->nodes[level];
6370        u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6371        int ret;
6372
6373        if (wc->stage == UPDATE_BACKREF &&
6374            btrfs_header_owner(eb) != root->root_key.objectid)
6375                return 1;
6376
6377        /*
6378         * when reference count of tree block is 1, it won't increase
6379         * again. once full backref flag is set, we never clear it.
6380         */
6381        if (lookup_info &&
6382            ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6383             (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6384                BUG_ON(!path->locks[level]);
6385                ret = btrfs_lookup_extent_info(trans, root,
6386                                               eb->start, eb->len,
6387                                               &wc->refs[level],
6388                                               &wc->flags[level]);
6389                BUG_ON(ret);
6390                BUG_ON(wc->refs[level] == 0);
6391        }
6392
6393        if (wc->stage == DROP_REFERENCE) {
6394                if (wc->refs[level] > 1)
6395                        return 1;
6396
6397                if (path->locks[level] && !wc->keep_locks) {
6398                        btrfs_tree_unlock_rw(eb, path->locks[level]);
6399                        path->locks[level] = 0;
6400                }
6401                return 0;
6402        }
6403
6404        /* wc->stage == UPDATE_BACKREF */
6405        if (!(wc->flags[level] & flag)) {
6406                BUG_ON(!path->locks[level]);
6407                ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6408                BUG_ON(ret);
6409                ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6410                BUG_ON(ret);
6411                ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6412                                                  eb->len, flag, 0);
6413                BUG_ON(ret);
6414                wc->flags[level] |= flag;
6415        }
6416
6417        /*
6418         * the block is shared by multiple trees, so it's not good to
6419         * keep the tree lock
6420         */
6421        if (path->locks[level] && level > 0) {
6422                btrfs_tree_unlock_rw(eb, path->locks[level]);
6423                path->locks[level] = 0;
6424        }
6425        return 0;
6426}
6427
6428/*
6429 * hepler to process tree block pointer.
6430 *
6431 * when wc->stage == DROP_REFERENCE, this function checks
6432 * reference count of the block pointed to. if the block
6433 * is shared and we need update back refs for the subtree
6434 * rooted at the block, this function changes wc->stage to
6435 * UPDATE_BACKREF. if the block is shared and there is no
6436 * need to update back, this function drops the reference
6437 * to the block.
6438 *
6439 * NOTE: return value 1 means we should stop walking down.
6440 */
6441static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6442                                 struct btrfs_root *root,
6443                                 struct btrfs_path *path,
6444                                 struct walk_control *wc, int *lookup_info)
6445{
6446        u64 bytenr;
6447        u64 generation;
6448        u64 parent;
6449        u32 blocksize;
6450        struct btrfs_key key;
6451        struct extent_buffer *next;
6452        int level = wc->level;
6453        int reada = 0;
6454        int ret = 0;
6455
6456        generation = btrfs_node_ptr_generation(path->nodes[level],
6457                                               path->slots[level]);
6458        /*
6459         * if the lower level block was created before the snapshot
6460         * was created, we know there is no need to update back refs
6461         * for the subtree
6462         */
6463        if (wc->stage == UPDATE_BACKREF &&
6464            generation <= root->root_key.offset) {
6465                *lookup_info = 1;
6466                return 1;
6467        }
6468
6469        bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6470        blocksize = btrfs_level_size(root, level - 1);
6471
6472        next = btrfs_find_tree_block(root, bytenr, blocksize);
6473        if (!next) {
6474                next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6475                if (!next)
6476                        return -ENOMEM;
6477                reada = 1;
6478        }
6479        btrfs_tree_lock(next);
6480        btrfs_set_lock_blocking(next);
6481
6482        ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6483                                       &wc->refs[level - 1],
6484                                       &wc->flags[level - 1]);
6485        BUG_ON(ret);
6486        BUG_ON(wc->refs[level - 1] == 0);
6487        *lookup_info = 0;
6488
6489        if (wc->stage == DROP_REFERENCE) {
6490                if (wc->refs[level - 1] > 1) {
6491                        if (level == 1 &&
6492                            (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6493                                goto skip;
6494
6495                        if (!wc->update_ref ||
6496                            generation <= root->root_key.offset)
6497                                goto skip;
6498
6499                        btrfs_node_key_to_cpu(path->nodes[level], &key,
6500                                              path->slots[level]);
6501                        ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6502                        if (ret < 0)
6503                                goto skip;
6504
6505                        wc->stage = UPDATE_BACKREF;
6506                        wc->shared_level = level - 1;
6507                }
6508        } else {
6509                if (level == 1 &&
6510                    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6511                        goto skip;
6512        }
6513
6514        if (!btrfs_buffer_uptodate(next, generation)) {
6515                btrfs_tree_unlock(next);
6516                free_extent_buffer(next);
6517                next = NULL;
6518                *lookup_info = 1;
6519        }
6520
6521        if (!next) {
6522                if (reada && level == 1)
6523                        reada_walk_down(trans, root, wc, path);
6524                next = read_tree_block(root, bytenr, blocksize, generation);
6525                if (!next)
6526                        return -EIO;
6527                btrfs_tree_lock(next);
6528                btrfs_set_lock_blocking(next);
6529        }
6530
6531        level--;
6532        BUG_ON(level != btrfs_header_level(next));
6533        path->nodes[level] = next;
6534        path->slots[level] = 0;
6535        path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6536        wc->level = level;
6537        if (wc->level == 1)
6538                wc->reada_slot = 0;
6539        return 0;
6540skip:
6541        wc->refs[level - 1] = 0;
6542        wc->flags[level - 1] = 0;
6543        if (wc->stage == DROP_REFERENCE) {
6544                if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6545                        parent = path->nodes[level]->start;
6546                } else {
6547                        BUG_ON(root->root_key.objectid !=
6548                               btrfs_header_owner(path->nodes[level]));
6549                        parent = 0;
6550                }
6551
6552                ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6553                                root->root_key.objectid, level - 1, 0, 0);
6554                BUG_ON(ret);
6555        }
6556        btrfs_tree_unlock(next);
6557        free_extent_buffer(next);
6558        *lookup_info = 1;
6559        return 1;
6560}
6561
6562/*
6563 * hepler to process tree block while walking up the tree.
6564 *
6565 * when wc->stage == DROP_REFERENCE, this function drops
6566 * reference count on the block.
6567 *
6568 * when wc->stage == UPDATE_BACKREF, this function changes
6569 * wc->stage back to DROP_REFERENCE if we changed wc->stage
6570 * to UPDATE_BACKREF previously while processing the block.
6571 *
6572 * NOTE: return value 1 means we should stop walking up.
6573 */
6574static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6575                                 struct btrfs_root *root,
6576                                 struct btrfs_path *path,
6577                                 struct walk_control *wc)
6578{
6579        int ret;
6580        int level = wc->level;
6581        struct extent_buffer *eb = path->nodes[level];
6582        u64 parent = 0;
6583
6584        if (wc->stage == UPDATE_BACKREF) {
6585                BUG_ON(wc->shared_level < level);
6586                if (level < wc->shared_level)
6587                        goto out;
6588
6589                ret = find_next_key(path, level + 1, &wc->update_progress);
6590                if (ret > 0)
6591                        wc->update_ref = 0;
6592
6593                wc->stage = DROP_REFERENCE;
6594                wc->shared_level = -1;
6595                path->slots[level] = 0;
6596
6597                /*
6598                 * check reference count again if the block isn't locked.
6599                 * we should start walking down the tree again if reference
6600                 * count is one.
6601                 */
6602                if (!path->locks[level]) {
6603                        BUG_ON(level == 0);
6604                        btrfs_tree_lock(eb);
6605                        btrfs_set_lock_blocking(eb);
6606                        path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6607
6608                        ret = btrfs_lookup_extent_info(trans, root,
6609                                                       eb->start, eb->len,
6610                                                       &wc->refs[level],
6611                                                       &wc->flags[level]);
6612                        BUG_ON(ret);
6613                        BUG_ON(wc->refs[level] == 0);
6614                        if (wc->refs[level] == 1) {
6615                                btrfs_tree_unlock_rw(eb, path->locks[level]);
6616                                return 1;
6617                        }
6618                }
6619        }
6620
6621        /* wc->stage == DROP_REFERENCE */
6622        BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6623
6624        if (wc->refs[level] == 1) {
6625                if (level == 0) {
6626                        if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6627                                ret = btrfs_dec_ref(trans, root, eb, 1,
6628                                                    wc->for_reloc);
6629                        else
6630                                ret = btrfs_dec_ref(trans, root, eb, 0,
6631                                                    wc->for_reloc);
6632                        BUG_ON(ret);
6633                }
6634                /* make block locked assertion in clean_tree_block happy */
6635                if (!path->locks[level] &&
6636                    btrfs_header_generation(eb) == trans->transid) {
6637                        btrfs_tree_lock(eb);
6638                        btrfs_set_lock_blocking(eb);
6639                        path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6640                }
6641                clean_tree_block(trans, root, eb);
6642        }
6643
6644        if (eb == root->node) {
6645                if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6646                        parent = eb->start;
6647                else
6648                        BUG_ON(root->root_key.objectid !=
6649                               btrfs_header_owner(eb));
6650        } else {
6651                if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6652                        parent = path->nodes[level + 1]->start;
6653                else
6654                        BUG_ON(root->root_key.objectid !=
6655                               btrfs_header_owner(path->nodes[level + 1]));
6656        }
6657
6658        btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1, 0);
6659out:
6660        wc->refs[level] = 0;
6661        wc->flags[level] = 0;
6662        return 0;
6663}
6664
6665static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6666                                   struct btrfs_root *root,
6667                                   struct btrfs_path *path,
6668                                   struct walk_control *wc)
6669{
6670        int level = wc->level;
6671        int lookup_info = 1;
6672        int ret;
6673
6674        while (level >= 0) {
6675                ret = walk_down_proc(trans, root, path, wc, lookup_info);
6676                if (ret > 0)
6677                        break;
6678
6679                if (level == 0)
6680                        break;
6681
6682                if (path->slots[level] >=
6683                    btrfs_header_nritems(path->nodes[level]))
6684                        break;
6685
6686                ret = do_walk_down(trans, root, path, wc, &lookup_info);
6687                if (ret > 0) {
6688                        path->slots[level]++;
6689                        continue;
6690                } else if (ret < 0)
6691                        return ret;
6692                level = wc->level;
6693        }
6694        return 0;
6695}
6696
6697static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6698                                 struct btrfs_root *root,
6699                                 struct btrfs_path *path,
6700                                 struct walk_control *wc, int max_level)
6701{
6702        int level = wc->level;
6703        int ret;
6704
6705        path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6706        while (level < max_level && path->nodes[level]) {
6707                wc->level = level;
6708                if (path->slots[level] + 1 <
6709                    btrfs_header_nritems(path->nodes[level])) {
6710                        path->slots[level]++;
6711                        return 0;
6712                } else {
6713                        ret = walk_up_proc(trans, root, path, wc);
6714                        if (ret > 0)
6715                                return 0;
6716
6717                        if (path->locks[level]) {
6718                                btrfs_tree_unlock_rw(path->nodes[level],
6719                                                     path->locks[level]);
6720                                path->locks[level] = 0;
6721                        }
6722                        free_extent_buffer(path->nodes[level]);
6723                        path->nodes[level] = NULL;
6724                        level++;
6725                }
6726        }
6727        return 1;
6728}
6729
6730/*
6731 * drop a subvolume tree.
6732 *
6733 * this function traverses the tree freeing any blocks that only
6734 * referenced by the tree.
6735 *
6736 * when a shared tree block is found. this function decreases its
6737 * reference count by one. if update_ref is true, this function
6738 * also make sure backrefs for the shared block and all lower level
6739 * blocks are properly updated.
6740 */
6741void btrfs_drop_snapshot(struct btrfs_root *root,
6742                         struct btrfs_block_rsv *block_rsv, int update_ref,
6743                         int for_reloc)
6744{
6745        struct btrfs_path *path;
6746        struct btrfs_trans_handle *trans;
6747        struct btrfs_root *tree_root = root->fs_info->tree_root;
6748        struct btrfs_root_item *root_item = &root->root_item;
6749        struct walk_control *wc;
6750        struct btrfs_key key;
6751        int err = 0;
6752        int ret;
6753        int level;
6754
6755        path = btrfs_alloc_path();
6756        if (!path) {
6757                err = -ENOMEM;
6758                goto out;
6759        }
6760
6761        wc = kzalloc(sizeof(*wc), GFP_NOFS);
6762        if (!wc) {
6763                btrfs_free_path(path);
6764                err = -ENOMEM;
6765                goto out;
6766        }
6767
6768        trans = btrfs_start_transaction(tree_root, 0);
6769        BUG_ON(IS_ERR(trans));
6770
6771        if (block_rsv)
6772                trans->block_rsv = block_rsv;
6773
6774        if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6775                level = btrfs_header_level(root->node);
6776                path->nodes[level] = btrfs_lock_root_node(root);
6777                btrfs_set_lock_blocking(path->nodes[level]);
6778                path->slots[level] = 0;
6779                path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6780                memset(&wc->update_progress, 0,
6781                       sizeof(wc->update_progress));
6782        } else {
6783                btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6784                memcpy(&wc->update_progress, &key,
6785                       sizeof(wc->update_progress));
6786
6787                level = root_item->drop_level;
6788                BUG_ON(level == 0);
6789                path->lowest_level = level;
6790                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6791                path->lowest_level = 0;
6792                if (ret < 0) {
6793                        err = ret;
6794                        goto out_free;
6795                }
6796                WARN_ON(ret > 0);
6797
6798                /*
6799                 * unlock our path, this is safe because only this
6800                 * function is allowed to delete this snapshot
6801                 */
6802                btrfs_unlock_up_safe(path, 0);
6803
6804                level = btrfs_header_level(root->node);
6805                while (1) {
6806                        btrfs_tree_lock(path->nodes[level]);
6807                        btrfs_set_lock_blocking(path->nodes[level]);
6808
6809                        ret = btrfs_lookup_extent_info(trans, root,
6810                                                path->nodes[level]->start,
6811                                                path->nodes[level]->len,
6812                                                &wc->refs[level],
6813                                                &wc->flags[level]);
6814                        BUG_ON(ret);
6815                        BUG_ON(wc->refs[level] == 0);
6816
6817                        if (level == root_item->drop_level)
6818                                break;
6819
6820                        btrfs_tree_unlock(path->nodes[level]);
6821                        WARN_ON(wc->refs[level] != 1);
6822                        level--;
6823                }
6824        }
6825
6826        wc->level = level;
6827        wc->shared_level = -1;
6828        wc->stage = DROP_REFERENCE;
6829        wc->update_ref = update_ref;
6830        wc->keep_locks = 0;
6831        wc->for_reloc = for_reloc;
6832        wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6833
6834        while (1) {
6835                ret = walk_down_tree(trans, root, path, wc);
6836                if (ret < 0) {
6837                        err = ret;
6838                        break;
6839                }
6840
6841                ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6842                if (ret < 0) {
6843                        err = ret;
6844                        break;
6845                }
6846
6847                if (ret > 0) {
6848                        BUG_ON(wc->stage != DROP_REFERENCE);
6849                        break;
6850                }
6851
6852                if (wc->stage == DROP_REFERENCE) {
6853                        level = wc->level;
6854                        btrfs_node_key(path->nodes[level],
6855                                       &root_item->drop_progress,
6856                                       path->slots[level]);
6857                        root_item->drop_level = level;
6858                }
6859
6860                BUG_ON(wc->level == 0);
6861                if (btrfs_should_end_transaction(trans, tree_root)) {
6862                        ret = btrfs_update_root(trans, tree_root,
6863                                                &root->root_key,
6864                                                root_item);
6865                        BUG_ON(ret);
6866
6867                        btrfs_end_transaction_throttle(trans, tree_root);
6868                        trans = btrfs_start_transaction(tree_root, 0);
6869                        BUG_ON(IS_ERR(trans));
6870                        if (block_rsv)
6871                                trans->block_rsv = block_rsv;
6872                }
6873        }
6874        btrfs_release_path(path);
6875        BUG_ON(err);
6876
6877        ret = btrfs_del_root(trans, tree_root, &root->root_key);
6878        BUG_ON(ret);
6879
6880        if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6881                ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6882                                           NULL, NULL);
6883                BUG_ON(ret < 0);
6884                if (ret > 0) {
6885                        /* if we fail to delete the orphan item this time
6886                         * around, it'll get picked up the next time.
6887                         *
6888                         * The most common failure here is just -ENOENT.
6889                         */
6890                        btrfs_del_orphan_item(trans, tree_root,
6891                                              root->root_key.objectid);
6892                }
6893        }
6894
6895        if (root->in_radix) {
6896                btrfs_free_fs_root(tree_root->fs_info, root);
6897        } else {
6898                free_extent_buffer(root->node);
6899                free_extent_buffer(root->commit_root);
6900                kfree(root);
6901        }
6902out_free:
6903        btrfs_end_transaction_throttle(trans, tree_root);
6904        kfree(wc);
6905        btrfs_free_path(path);
6906out:
6907        if (err)
6908                btrfs_std_error(root->fs_info, err);
6909        return;
6910}
6911
6912/*
6913 * drop subtree rooted at tree block 'node'.
6914 *
6915 * NOTE: this function will unlock and release tree block 'node'
6916 * only used by relocation code
6917 */
6918int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6919                        struct btrfs_root *root,
6920                        struct extent_buffer *node,
6921                        struct extent_buffer *parent)
6922{
6923        struct btrfs_path *path;
6924        struct walk_control *wc;
6925        int level;
6926        int parent_level;
6927        int ret = 0;
6928        int wret;
6929
6930        BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6931
6932        path = btrfs_alloc_path();
6933        if (!path)
6934                return -ENOMEM;
6935
6936        wc = kzalloc(sizeof(*wc), GFP_NOFS);
6937        if (!wc) {
6938                btrfs_free_path(path);
6939                return -ENOMEM;
6940        }
6941
6942        btrfs_assert_tree_locked(parent);
6943        parent_level = btrfs_header_level(parent);
6944        extent_buffer_get(parent);
6945        path->nodes[parent_level] = parent;
6946        path->slots[parent_level] = btrfs_header_nritems(parent);
6947
6948        btrfs_assert_tree_locked(node);
6949        level = btrfs_header_level(node);
6950        path->nodes[level] = node;
6951        path->slots[level] = 0;
6952        path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6953
6954        wc->refs[parent_level] = 1;
6955        wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6956        wc->level = level;
6957        wc->shared_level = -1;
6958        wc->stage = DROP_REFERENCE;
6959        wc->update_ref = 0;
6960        wc->keep_locks = 1;
6961        wc->for_reloc = 1;
6962        wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6963
6964        while (1) {
6965                wret = walk_down_tree(trans, root, path, wc);
6966                if (wret < 0) {
6967                        ret = wret;
6968                        break;
6969                }
6970
6971                wret = walk_up_tree(trans, root, path, wc, parent_level);
6972                if (wret < 0)
6973                        ret = wret;
6974                if (wret != 0)
6975                        break;
6976        }
6977
6978        kfree(wc);
6979        btrfs_free_path(path);
6980        return ret;
6981}
6982
6983static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6984{
6985        u64 num_devices;
6986        u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6987                BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6988
6989        if (root->fs_info->balance_ctl) {
6990                struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
6991                u64 tgt = 0;
6992
6993                /* pick restriper's target profile and return */
6994                if (flags & BTRFS_BLOCK_GROUP_DATA &&
6995                    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6996                        tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
6997                } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
6998                           bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6999                        tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
7000                } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
7001                           bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
7002                        tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
7003                }
7004
7005                if (tgt) {
7006                        /* extended -> chunk profile */
7007                        tgt &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
7008                        return tgt;
7009                }
7010        }
7011
7012        /*
7013         * we add in the count of missing devices because we want
7014         * to make sure that any RAID levels on a degraded FS
7015         * continue to be honored.
7016         */
7017        num_devices = root->fs_info->fs_devices->rw_devices +
7018                root->fs_info->fs_devices->missing_devices;
7019
7020        if (num_devices == 1) {
7021                stripped |= BTRFS_BLOCK_GROUP_DUP;
7022                stripped = flags & ~stripped;
7023
7024                /* turn raid0 into single device chunks */
7025                if (flags & BTRFS_BLOCK_GROUP_RAID0)
7026                        return stripped;
7027
7028                /* turn mirroring into duplication */
7029                if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7030                             BTRFS_BLOCK_GROUP_RAID10))
7031                        return stripped | BTRFS_BLOCK_GROUP_DUP;
7032                return flags;
7033        } else {
7034                /* they already had raid on here, just return */
7035                if (flags & stripped)
7036                        return flags;
7037
7038                stripped |= BTRFS_BLOCK_GROUP_DUP;
7039                stripped = flags & ~stripped;
7040
7041                /* switch duplicated blocks with raid1 */
7042                if (flags & BTRFS_BLOCK_GROUP_DUP)
7043                        return stripped | BTRFS_BLOCK_GROUP_RAID1;
7044
7045                /* turn single device chunks into raid0 */
7046                return stripped | BTRFS_BLOCK_GROUP_RAID0;
7047        }
7048        return flags;
7049}
7050
7051static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7052{
7053        struct btrfs_space_info *sinfo = cache->space_info;
7054        u64 num_bytes;
7055        u64 min_allocable_bytes;
7056        int ret = -ENOSPC;
7057
7058
7059        /*
7060         * We need some metadata space and system metadata space for
7061         * allocating chunks in some corner cases until we force to set
7062         * it to be readonly.
7063         */
7064        if ((sinfo->flags &
7065             (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7066            !force)
7067                min_allocable_bytes = 1 * 1024 * 1024;
7068        else
7069                min_allocable_bytes = 0;
7070
7071        spin_lock(&sinfo->lock);
7072        spin_lock(&cache->lock);
7073
7074        if (cache->ro) {
7075                ret = 0;
7076                goto out;
7077        }
7078
7079        num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7080                    cache->bytes_super - btrfs_block_group_used(&cache->item);
7081
7082        if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7083            sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7084            min_allocable_bytes <= sinfo->total_bytes) {
7085                sinfo->bytes_readonly += num_bytes;
7086                cache->ro = 1;
7087                ret = 0;
7088        }
7089out:
7090        spin_unlock(&cache->lock);
7091        spin_unlock(&sinfo->lock);
7092        return ret;
7093}
7094
7095int btrfs_set_block_group_ro(struct btrfs_root *root,
7096                             struct btrfs_block_group_cache *cache)
7097
7098{
7099        struct btrfs_trans_handle *trans;
7100        u64 alloc_flags;
7101        int ret;
7102
7103        BUG_ON(cache->ro);
7104
7105        trans = btrfs_join_transaction(root);
7106        BUG_ON(IS_ERR(trans));
7107
7108        alloc_flags = update_block_group_flags(root, cache->flags);
7109        if (alloc_flags != cache->flags)
7110                do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7111                               CHUNK_ALLOC_FORCE);
7112
7113        ret = set_block_group_ro(cache, 0);
7114        if (!ret)
7115                goto out;
7116        alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7117        ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7118                             CHUNK_ALLOC_FORCE);
7119        if (ret < 0)
7120                goto out;
7121        ret = set_block_group_ro(cache, 0);
7122out:
7123        btrfs_end_transaction(trans, root);
7124        return ret;
7125}
7126
7127int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7128                            struct btrfs_root *root, u64 type)
7129{
7130        u64 alloc_flags = get_alloc_profile(root, type);
7131        return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7132                              CHUNK_ALLOC_FORCE);
7133}
7134
7135/*
7136 * helper to account the unused space of all the readonly block group in the
7137 * list. takes mirrors into account.
7138 */
7139static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7140{
7141        struct btrfs_block_group_cache *block_group;
7142        u64 free_bytes = 0;
7143        int factor;
7144
7145        list_for_each_entry(block_group, groups_list, list) {
7146                spin_lock(&block_group->lock);
7147
7148                if (!block_group->ro) {
7149                        spin_unlock(&block_group->lock);
7150                        continue;
7151                }
7152
7153                if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7154                                          BTRFS_BLOCK_GROUP_RAID10 |
7155                                          BTRFS_BLOCK_GROUP_DUP))
7156                        factor = 2;
7157                else
7158                        factor = 1;
7159
7160                free_bytes += (block_group->key.offset -
7161                               btrfs_block_group_used(&block_group->item)) *
7162                               factor;
7163
7164                spin_unlock(&block_group->lock);
7165        }
7166
7167        return free_bytes;
7168}
7169
7170/*
7171 * helper to account the unused space of all the readonly block group in the
7172 * space_info. takes mirrors into account.
7173 */
7174u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7175{
7176        int i;
7177        u64 free_bytes = 0;
7178
7179        spin_lock(&sinfo->lock);
7180
7181        for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7182                if (!list_empty(&sinfo->block_groups[i]))
7183                        free_bytes += __btrfs_get_ro_block_group_free_space(
7184                                                &sinfo->block_groups[i]);
7185
7186        spin_unlock(&sinfo->lock);
7187
7188        return free_bytes;
7189}
7190
7191int btrfs_set_block_group_rw(struct btrfs_root *root,
7192                              struct btrfs_block_group_cache *cache)
7193{
7194        struct btrfs_space_info *sinfo = cache->space_info;
7195        u64 num_bytes;
7196
7197        BUG_ON(!cache->ro);
7198
7199        spin_lock(&sinfo->lock);
7200        spin_lock(&cache->lock);
7201        num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7202                    cache->bytes_super - btrfs_block_group_used(&cache->item);
7203        sinfo->bytes_readonly -= num_bytes;
7204        cache->ro = 0;
7205        spin_unlock(&cache->lock);
7206        spin_unlock(&sinfo->lock);
7207        return 0;
7208}
7209
7210/*
7211 * checks to see if its even possible to relocate this block group.
7212 *
7213 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7214 * ok to go ahead and try.
7215 */
7216int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7217{
7218        struct btrfs_block_group_cache *block_group;
7219        struct btrfs_space_info *space_info;
7220        struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7221        struct btrfs_device *device;
7222        u64 min_free;
7223        u64 dev_min = 1;
7224        u64 dev_nr = 0;
7225        int index;
7226        int full = 0;
7227        int ret = 0;
7228
7229        block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7230
7231        /* odd, couldn't find the block group, leave it alone */
7232        if (!block_group)
7233                return -1;
7234
7235        min_free = btrfs_block_group_used(&block_group->item);
7236
7237        /* no bytes used, we're good */
7238        if (!min_free)
7239                goto out;
7240
7241        space_info = block_group->space_info;
7242        spin_lock(&space_info->lock);
7243
7244        full = space_info->full;
7245
7246        /*
7247         * if this is the last block group we have in this space, we can't
7248         * relocate it unless we're able to allocate a new chunk below.
7249         *
7250         * Otherwise, we need to make sure we have room in the space to handle
7251         * all of the extents from this block group.  If we can, we're good
7252         */
7253        if ((space_info->total_bytes != block_group->key.offset) &&
7254            (space_info->bytes_used + space_info->bytes_reserved +
7255             space_info->bytes_pinned + space_info->bytes_readonly +
7256             min_free < space_info->total_bytes)) {
7257                spin_unlock(&space_info->lock);
7258                goto out;
7259        }
7260        spin_unlock(&space_info->lock);
7261
7262        /*
7263         * ok we don't have enough space, but maybe we have free space on our
7264         * devices to allocate new chunks for relocation, so loop through our
7265         * alloc devices and guess if we have enough space.  However, if we
7266         * were marked as full, then we know there aren't enough chunks, and we
7267         * can just return.
7268         */
7269        ret = -1;
7270        if (full)
7271                goto out;
7272
7273        /*
7274         * index:
7275         *      0: raid10
7276         *      1: raid1
7277         *      2: dup
7278         *      3: raid0
7279         *      4: single
7280         */
7281        index = get_block_group_index(block_group);
7282        if (index == 0) {
7283                dev_min = 4;
7284                /* Divide by 2 */
7285                min_free >>= 1;
7286        } else if (index == 1) {
7287                dev_min = 2;
7288        } else if (index == 2) {
7289                /* Multiply by 2 */
7290                min_free <<= 1;
7291        } else if (index == 3) {
7292                dev_min = fs_devices->rw_devices;
7293                do_div(min_free, dev_min);
7294        }
7295
7296        mutex_lock(&root->fs_info->chunk_mutex);
7297        list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7298                u64 dev_offset;
7299
7300                /*
7301                 * check to make sure we can actually find a chunk with enough
7302                 * space to fit our block group in.
7303                 */
7304                if (device->total_bytes > device->bytes_used + min_free) {
7305                        ret = find_free_dev_extent(device, min_free,
7306                                                   &dev_offset, NULL);
7307                        if (!ret)
7308                                dev_nr++;
7309
7310                        if (dev_nr >= dev_min)
7311                                break;
7312
7313                        ret = -1;
7314                }
7315        }
7316        mutex_unlock(&root->fs_info->chunk_mutex);
7317out:
7318        btrfs_put_block_group(block_group);
7319        return ret;
7320}
7321
7322static int find_first_block_group(struct btrfs_root *root,
7323                struct btrfs_path *path, struct btrfs_key *key)
7324{
7325        int ret = 0;
7326        struct btrfs_key found_key;
7327        struct extent_buffer *leaf;
7328        int slot;
7329
7330        ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7331        if (ret < 0)
7332                goto out;
7333
7334        while (1) {
7335                slot = path->slots[0];
7336                leaf = path->nodes[0];
7337                if (slot >= btrfs_header_nritems(leaf)) {
7338                        ret = btrfs_next_leaf(root, path);
7339                        if (ret == 0)
7340                                continue;
7341                        if (ret < 0)
7342                                goto out;
7343                        break;
7344                }
7345                btrfs_item_key_to_cpu(leaf, &found_key, slot);
7346
7347                if (found_key.objectid >= key->objectid &&
7348                    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7349                        ret = 0;
7350                        goto out;
7351                }
7352                path->slots[0]++;
7353        }
7354out:
7355        return ret;
7356}
7357
7358void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7359{
7360        struct btrfs_block_group_cache *block_group;
7361        u64 last = 0;
7362
7363        while (1) {
7364                struct inode *inode;
7365
7366                block_group = btrfs_lookup_first_block_group(info, last);
7367                while (block_group) {
7368                        spin_lock(&block_group->lock);
7369                        if (block_group->iref)
7370                                break;
7371                        spin_unlock(&block_group->lock);
7372                        block_group = next_block_group(info->tree_root,
7373                                                       block_group);
7374                }
7375                if (!block_group) {
7376                        if (last == 0)
7377                                break;
7378                        last = 0;
7379                        continue;
7380                }
7381
7382                inode = block_group->inode;
7383                block_group->iref = 0;
7384                block_group->inode = NULL;
7385                spin_unlock(&block_group->lock);
7386                iput(inode);
7387                last = block_group->key.objectid + block_group->key.offset;
7388                btrfs_put_block_group(block_group);
7389        }
7390}
7391
7392int btrfs_free_block_groups(struct btrfs_fs_info *info)
7393{
7394        struct btrfs_block_group_cache *block_group;
7395        struct btrfs_space_info *space_info;
7396        struct btrfs_caching_control *caching_ctl;
7397        struct rb_node *n;
7398
7399        down_write(&info->extent_commit_sem);
7400        while (!list_empty(&info->caching_block_groups)) {
7401                caching_ctl = list_entry(info->caching_block_groups.next,
7402                                         struct btrfs_caching_control, list);
7403                list_del(&caching_ctl->list);
7404                put_caching_control(caching_ctl);
7405        }
7406        up_write(&info->extent_commit_sem);
7407
7408        spin_lock(&info->block_group_cache_lock);
7409        while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7410                block_group = rb_entry(n, struct btrfs_block_group_cache,
7411                                       cache_node);
7412                rb_erase(&block_group->cache_node,
7413                         &info->block_group_cache_tree);
7414                spin_unlock(&info->block_group_cache_lock);
7415
7416                down_write(&block_group->space_info->groups_sem);
7417                list_del(&block_group->list);
7418                up_write(&block_group->space_info->groups_sem);
7419
7420                if (block_group->cached == BTRFS_CACHE_STARTED)
7421                        wait_block_group_cache_done(block_group);
7422
7423                /*
7424                 * We haven't cached this block group, which means we could
7425                 * possibly have excluded extents on this block group.
7426                 */
7427                if (block_group->cached == BTRFS_CACHE_NO)
7428                        free_excluded_extents(info->extent_root, block_group);
7429
7430                btrfs_remove_free_space_cache(block_group);
7431                btrfs_put_block_group(block_group);
7432
7433                spin_lock(&info->block_group_cache_lock);
7434        }
7435        spin_unlock(&info->block_group_cache_lock);
7436
7437        /* now that all the block groups are freed, go through and
7438         * free all the space_info structs.  This is only called during
7439         * the final stages of unmount, and so we know nobody is
7440         * using them.  We call synchronize_rcu() once before we start,
7441         * just to be on the safe side.
7442         */
7443        synchronize_rcu();
7444
7445        release_global_block_rsv(info);
7446
7447        while(!list_empty(&info->space_info)) {
7448                space_info = list_entry(info->space_info.next,
7449                                        struct btrfs_space_info,
7450                                        list);
7451                if (space_info->bytes_pinned > 0 ||
7452                    space_info->bytes_reserved > 0 ||
7453                    space_info->bytes_may_use > 0) {
7454                        WARN_ON(1);
7455                        dump_space_info(space_info, 0, 0);
7456                }
7457                list_del(&space_info->list);
7458                kfree(space_info);
7459        }
7460        return 0;
7461}
7462
7463static void __link_block_group(struct btrfs_space_info *space_info,
7464                               struct btrfs_block_group_cache *cache)
7465{
7466        int index = get_block_group_index(cache);
7467
7468        down_write(&space_info->groups_sem);
7469        list_add_tail(&cache->list, &space_info->block_groups[index]);
7470        up_write(&space_info->groups_sem);
7471}
7472
7473int btrfs_read_block_groups(struct btrfs_root *root)
7474{
7475        struct btrfs_path *path;
7476        int ret;
7477        struct btrfs_block_group_cache *cache;
7478        struct btrfs_fs_info *info = root->fs_info;
7479        struct btrfs_space_info *space_info;
7480        struct btrfs_key key;
7481        struct btrfs_key found_key;
7482        struct extent_buffer *leaf;
7483        int need_clear = 0;
7484        u64 cache_gen;
7485
7486        root = info->extent_root;
7487        key.objectid = 0;
7488        key.offset = 0;
7489        btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7490        path = btrfs_alloc_path();
7491        if (!path)
7492                return -ENOMEM;
7493        path->reada = 1;
7494
7495        cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
7496        if (btrfs_test_opt(root, SPACE_CACHE) &&
7497            btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
7498                need_clear = 1;
7499        if (btrfs_test_opt(root, CLEAR_CACHE))
7500                need_clear = 1;
7501
7502        while (1) {
7503                ret = find_first_block_group(root, path, &key);
7504                if (ret > 0)
7505                        break;
7506                if (ret != 0)
7507                        goto error;
7508                leaf = path->nodes[0];
7509                btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7510                cache = kzalloc(sizeof(*cache), GFP_NOFS);
7511                if (!cache) {
7512                        ret = -ENOMEM;
7513                        goto error;
7514                }
7515                cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7516                                                GFP_NOFS);
7517                if (!cache->free_space_ctl) {
7518                        kfree(cache);
7519                        ret = -ENOMEM;
7520                        goto error;
7521                }
7522
7523                atomic_set(&cache->count, 1);
7524                spin_lock_init(&cache->lock);
7525                cache->fs_info = info;
7526                INIT_LIST_HEAD(&cache->list);
7527                INIT_LIST_HEAD(&cache->cluster_list);
7528
7529                if (need_clear)
7530                        cache->disk_cache_state = BTRFS_DC_CLEAR;
7531
7532                read_extent_buffer(leaf, &cache->item,
7533                                   btrfs_item_ptr_offset(leaf, path->slots[0]),
7534                                   sizeof(cache->item));
7535                memcpy(&cache->key, &found_key, sizeof(found_key));
7536
7537                key.objectid = found_key.objectid + found_key.offset;
7538                btrfs_release_path(path);
7539                cache->flags = btrfs_block_group_flags(&cache->item);
7540                cache->sectorsize = root->sectorsize;
7541
7542                btrfs_init_free_space_ctl(cache);
7543
7544                /*
7545                 * We need to exclude the super stripes now so that the space
7546                 * info has super bytes accounted for, otherwise we'll think
7547                 * we have more space than we actually do.
7548                 */
7549                exclude_super_stripes(root, cache);
7550
7551                /*
7552                 * check for two cases, either we are full, and therefore
7553                 * don't need to bother with the caching work since we won't
7554                 * find any space, or we are empty, and we can just add all
7555                 * the space in and be done with it.  This saves us _alot_ of
7556                 * time, particularly in the full case.
7557                 */
7558                if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7559                        cache->last_byte_to_unpin = (u64)-1;
7560                        cache->cached = BTRFS_CACHE_FINISHED;
7561                        free_excluded_extents(root, cache);
7562                } else if (btrfs_block_group_used(&cache->item) == 0) {
7563                        cache->last_byte_to_unpin = (u64)-1;
7564                        cache->cached = BTRFS_CACHE_FINISHED;
7565                        add_new_free_space(cache, root->fs_info,
7566                                           found_key.objectid,
7567                                           found_key.objectid +
7568                                           found_key.offset);
7569                        free_excluded_extents(root, cache);
7570                }
7571
7572                ret = update_space_info(info, cache->flags, found_key.offset,
7573                                        btrfs_block_group_used(&cache->item),
7574                                        &space_info);
7575                BUG_ON(ret);
7576                cache->space_info = space_info;
7577                spin_lock(&cache->space_info->lock);
7578                cache->space_info->bytes_readonly += cache->bytes_super;
7579                spin_unlock(&cache->space_info->lock);
7580
7581                __link_block_group(space_info, cache);
7582
7583                ret = btrfs_add_block_group_cache(root->fs_info, cache);
7584                BUG_ON(ret);
7585
7586                set_avail_alloc_bits(root->fs_info, cache->flags);
7587                if (btrfs_chunk_readonly(root, cache->key.objectid))
7588                        set_block_group_ro(cache, 1);
7589        }
7590
7591        list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7592                if (!(get_alloc_profile(root, space_info->flags) &
7593                      (BTRFS_BLOCK_GROUP_RAID10 |
7594                       BTRFS_BLOCK_GROUP_RAID1 |
7595                       BTRFS_BLOCK_GROUP_DUP)))
7596                        continue;
7597                /*
7598                 * avoid allocating from un-mirrored block group if there are
7599                 * mirrored block groups.
7600                 */
7601                list_for_each_entry(cache, &space_info->block_groups[3], list)
7602                        set_block_group_ro(cache, 1);
7603                list_for_each_entry(cache, &space_info->block_groups[4], list)
7604                        set_block_group_ro(cache, 1);
7605        }
7606
7607        init_global_block_rsv(info);
7608        ret = 0;
7609error:
7610        btrfs_free_path(path);
7611        return ret;
7612}
7613
7614int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7615                           struct btrfs_root *root, u64 bytes_used,
7616                           u64 type, u64 chunk_objectid, u64 chunk_offset,
7617                           u64 size)
7618{
7619        int ret;
7620        struct btrfs_root *extent_root;
7621        struct btrfs_block_group_cache *cache;
7622
7623        extent_root = root->fs_info->extent_root;
7624
7625        root->fs_info->last_trans_log_full_commit = trans->transid;
7626
7627        cache = kzalloc(sizeof(*cache), GFP_NOFS);
7628        if (!cache)
7629                return -ENOMEM;
7630        cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7631                                        GFP_NOFS);
7632        if (!cache->free_space_ctl) {
7633                kfree(cache);
7634                return -ENOMEM;
7635        }
7636
7637        cache->key.objectid = chunk_offset;
7638        cache->key.offset = size;
7639        cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7640        cache->sectorsize = root->sectorsize;
7641        cache->fs_info = root->fs_info;
7642
7643        atomic_set(&cache->count, 1);
7644        spin_lock_init(&cache->lock);
7645        INIT_LIST_HEAD(&cache->list);
7646        INIT_LIST_HEAD(&cache->cluster_list);
7647
7648        btrfs_init_free_space_ctl(cache);
7649
7650        btrfs_set_block_group_used(&cache->item, bytes_used);
7651        btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7652        cache->flags = type;
7653        btrfs_set_block_group_flags(&cache->item, type);
7654
7655        cache->last_byte_to_unpin = (u64)-1;
7656        cache->cached = BTRFS_CACHE_FINISHED;
7657        exclude_super_stripes(root, cache);
7658
7659        add_new_free_space(cache, root->fs_info, chunk_offset,
7660                           chunk_offset + size);
7661
7662        free_excluded_extents(root, cache);
7663
7664        ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7665                                &cache->space_info);
7666        BUG_ON(ret);
7667        update_global_block_rsv(root->fs_info);
7668
7669        spin_lock(&cache->space_info->lock);
7670        cache->space_info->bytes_readonly += cache->bytes_super;
7671        spin_unlock(&cache->space_info->lock);
7672
7673        __link_block_group(cache->space_info, cache);
7674
7675        ret = btrfs_add_block_group_cache(root->fs_info, cache);
7676        BUG_ON(ret);
7677
7678        ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7679                                sizeof(cache->item));
7680        BUG_ON(ret);
7681
7682        set_avail_alloc_bits(extent_root->fs_info, type);
7683
7684        return 0;
7685}
7686
7687static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
7688{
7689        u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
7690
7691        /* chunk -> extended profile */
7692        if (extra_flags == 0)
7693                extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
7694
7695        if (flags & BTRFS_BLOCK_GROUP_DATA)
7696                fs_info->avail_data_alloc_bits &= ~extra_flags;
7697        if (flags & BTRFS_BLOCK_GROUP_METADATA)
7698                fs_info->avail_metadata_alloc_bits &= ~extra_flags;
7699        if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
7700                fs_info->avail_system_alloc_bits &= ~extra_flags;
7701}
7702
7703int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7704                             struct btrfs_root *root, u64 group_start)
7705{
7706        struct btrfs_path *path;
7707        struct btrfs_block_group_cache *block_group;
7708        struct btrfs_free_cluster *cluster;
7709        struct btrfs_root *tree_root = root->fs_info->tree_root;
7710        struct btrfs_key key;
7711        struct inode *inode;
7712        int ret;
7713        int index;
7714        int factor;
7715
7716        root = root->fs_info->extent_root;
7717
7718        block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7719        BUG_ON(!block_group);
7720        BUG_ON(!block_group->ro);
7721
7722        /*
7723         * Free the reserved super bytes from this block group before
7724         * remove it.
7725         */
7726        free_excluded_extents(root, block_group);
7727
7728        memcpy(&key, &block_group->key, sizeof(key));
7729        index = get_block_group_index(block_group);
7730        if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
7731                                  BTRFS_BLOCK_GROUP_RAID1 |
7732                                  BTRFS_BLOCK_GROUP_RAID10))
7733                factor = 2;
7734        else
7735                factor = 1;
7736
7737        /* make sure this block group isn't part of an allocation cluster */
7738        cluster = &root->fs_info->data_alloc_cluster;
7739        spin_lock(&cluster->refill_lock);
7740        btrfs_return_cluster_to_free_space(block_group, cluster);
7741        spin_unlock(&cluster->refill_lock);
7742
7743        /*
7744         * make sure this block group isn't part of a metadata
7745         * allocation cluster
7746         */
7747        cluster = &root->fs_info->meta_alloc_cluster;
7748        spin_lock(&cluster->refill_lock);
7749        btrfs_return_cluster_to_free_space(block_group, cluster);
7750        spin_unlock(&cluster->refill_lock);
7751
7752        path = btrfs_alloc_path();
7753        if (!path) {
7754                ret = -ENOMEM;
7755                goto out;
7756        }
7757
7758        inode = lookup_free_space_inode(tree_root, block_group, path);
7759        if (!IS_ERR(inode)) {
7760                ret = btrfs_orphan_add(trans, inode);
7761                BUG_ON(ret);
7762                clear_nlink(inode);
7763                /* One for the block groups ref */
7764                spin_lock(&block_group->lock);
7765                if (block_group->iref) {
7766                        block_group->iref = 0;
7767                        block_group->inode = NULL;
7768                        spin_unlock(&block_group->lock);
7769                        iput(inode);
7770                } else {
7771                        spin_unlock(&block_group->lock);
7772                }
7773                /* One for our lookup ref */
7774                btrfs_add_delayed_iput(inode);
7775        }
7776
7777        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
7778        key.offset = block_group->key.objectid;
7779        key.type = 0;
7780
7781        ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
7782        if (ret < 0)
7783                goto out;
7784        if (ret > 0)
7785                btrfs_release_path(path);
7786        if (ret == 0) {
7787                ret = btrfs_del_item(trans, tree_root, path);
7788                if (ret)
7789                        goto out;
7790                btrfs_release_path(path);
7791        }
7792
7793        spin_lock(&root->fs_info->block_group_cache_lock);
7794        rb_erase(&block_group->cache_node,
7795                 &root->fs_info->block_group_cache_tree);
7796        spin_unlock(&root->fs_info->block_group_cache_lock);
7797
7798        down_write(&block_group->space_info->groups_sem);
7799        /*
7800         * we must use list_del_init so people can check to see if they
7801         * are still on the list after taking the semaphore
7802         */
7803        list_del_init(&block_group->list);
7804        if (list_empty(&block_group->space_info->block_groups[index]))
7805                clear_avail_alloc_bits(root->fs_info, block_group->flags);
7806        up_write(&block_group->space_info->groups_sem);
7807
7808        if (block_group->cached == BTRFS_CACHE_STARTED)
7809                wait_block_group_cache_done(block_group);
7810
7811        btrfs_remove_free_space_cache(block_group);
7812
7813        spin_lock(&block_group->space_info->lock);
7814        block_group->space_info->total_bytes -= block_group->key.offset;
7815        block_group->space_info->bytes_readonly -= block_group->key.offset;
7816        block_group->space_info->disk_total -= block_group->key.offset * factor;
7817        spin_unlock(&block_group->space_info->lock);
7818
7819        memcpy(&key, &block_group->key, sizeof(key));
7820
7821        btrfs_clear_space_info_full(root->fs_info);
7822
7823        btrfs_put_block_group(block_group);
7824        btrfs_put_block_group(block_group);
7825
7826        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7827        if (ret > 0)
7828                ret = -EIO;
7829        if (ret < 0)
7830                goto out;
7831
7832        ret = btrfs_del_item(trans, root, path);
7833out:
7834        btrfs_free_path(path);
7835        return ret;
7836}
7837
7838int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
7839{
7840        struct btrfs_space_info *space_info;
7841        struct btrfs_super_block *disk_super;
7842        u64 features;
7843        u64 flags;
7844        int mixed = 0;
7845        int ret;
7846
7847        disk_super = fs_info->super_copy;
7848        if (!btrfs_super_root(disk_super))
7849                return 1;
7850
7851        features = btrfs_super_incompat_flags(disk_super);
7852        if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
7853                mixed = 1;
7854
7855        flags = BTRFS_BLOCK_GROUP_SYSTEM;
7856        ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7857        if (ret)
7858                goto out;
7859
7860        if (mixed) {
7861                flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
7862                ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7863        } else {
7864                flags = BTRFS_BLOCK_GROUP_METADATA;
7865                ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7866                if (ret)
7867                        goto out;
7868
7869                flags = BTRFS_BLOCK_GROUP_DATA;
7870                ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7871        }
7872out:
7873        return ret;
7874}
7875
7876int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
7877{
7878        return unpin_extent_range(root, start, end);
7879}
7880
7881int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
7882                               u64 num_bytes, u64 *actual_bytes)
7883{
7884        return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
7885}
7886
7887int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
7888{
7889        struct btrfs_fs_info *fs_info = root->fs_info;
7890        struct btrfs_block_group_cache *cache = NULL;
7891        u64 group_trimmed;
7892        u64 start;
7893        u64 end;
7894        u64 trimmed = 0;
7895        u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
7896        int ret = 0;
7897
7898        /*
7899         * try to trim all FS space, our block group may start from non-zero.
7900         */
7901        if (range->len == total_bytes)
7902                cache = btrfs_lookup_first_block_group(fs_info, range->start);
7903        else
7904                cache = btrfs_lookup_block_group(fs_info, range->start);
7905
7906        while (cache) {
7907                if (cache->key.objectid >= (range->start + range->len)) {
7908                        btrfs_put_block_group(cache);
7909                        break;
7910                }
7911
7912                start = max(range->start, cache->key.objectid);
7913                end = min(range->start + range->len,
7914                                cache->key.objectid + cache->key.offset);
7915
7916                if (end - start >= range->minlen) {
7917                        if (!block_group_cache_done(cache)) {
7918                                ret = cache_block_group(cache, NULL, root, 0);
7919                                if (!ret)
7920                                        wait_block_group_cache_done(cache);
7921                        }
7922                        ret = btrfs_trim_block_group(cache,
7923                                                     &group_trimmed,
7924                                                     start,
7925                                                     end,
7926                                                     range->minlen);
7927
7928                        trimmed += group_trimmed;
7929                        if (ret) {
7930                                btrfs_put_block_group(cache);
7931                                break;
7932                        }
7933                }
7934
7935                cache = next_block_group(fs_info->tree_root, cache);
7936        }
7937
7938        range->len = trimmed;
7939        return ret;
7940}
7941