linux/fs/btrfs/free-space-cache.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008 Red Hat.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/pagemap.h>
  20#include <linux/sched.h>
  21#include <linux/slab.h>
  22#include <linux/math64.h>
  23#include <linux/ratelimit.h>
  24#include "ctree.h"
  25#include "free-space-cache.h"
  26#include "transaction.h"
  27#include "disk-io.h"
  28#include "extent_io.h"
  29#include "inode-map.h"
  30
  31#define BITS_PER_BITMAP         (PAGE_CACHE_SIZE * 8)
  32#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
  33
  34static int link_free_space(struct btrfs_free_space_ctl *ctl,
  35                           struct btrfs_free_space *info);
  36static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
  37                              struct btrfs_free_space *info);
  38
  39static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
  40                                               struct btrfs_path *path,
  41                                               u64 offset)
  42{
  43        struct btrfs_key key;
  44        struct btrfs_key location;
  45        struct btrfs_disk_key disk_key;
  46        struct btrfs_free_space_header *header;
  47        struct extent_buffer *leaf;
  48        struct inode *inode = NULL;
  49        int ret;
  50
  51        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  52        key.offset = offset;
  53        key.type = 0;
  54
  55        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  56        if (ret < 0)
  57                return ERR_PTR(ret);
  58        if (ret > 0) {
  59                btrfs_release_path(path);
  60                return ERR_PTR(-ENOENT);
  61        }
  62
  63        leaf = path->nodes[0];
  64        header = btrfs_item_ptr(leaf, path->slots[0],
  65                                struct btrfs_free_space_header);
  66        btrfs_free_space_key(leaf, header, &disk_key);
  67        btrfs_disk_key_to_cpu(&location, &disk_key);
  68        btrfs_release_path(path);
  69
  70        inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
  71        if (!inode)
  72                return ERR_PTR(-ENOENT);
  73        if (IS_ERR(inode))
  74                return inode;
  75        if (is_bad_inode(inode)) {
  76                iput(inode);
  77                return ERR_PTR(-ENOENT);
  78        }
  79
  80        mapping_set_gfp_mask(inode->i_mapping,
  81                        mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
  82
  83        return inode;
  84}
  85
  86struct inode *lookup_free_space_inode(struct btrfs_root *root,
  87                                      struct btrfs_block_group_cache
  88                                      *block_group, struct btrfs_path *path)
  89{
  90        struct inode *inode = NULL;
  91        u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
  92
  93        spin_lock(&block_group->lock);
  94        if (block_group->inode)
  95                inode = igrab(block_group->inode);
  96        spin_unlock(&block_group->lock);
  97        if (inode)
  98                return inode;
  99
 100        inode = __lookup_free_space_inode(root, path,
 101                                          block_group->key.objectid);
 102        if (IS_ERR(inode))
 103                return inode;
 104
 105        spin_lock(&block_group->lock);
 106        if (!((BTRFS_I(inode)->flags & flags) == flags)) {
 107                printk(KERN_INFO "Old style space inode found, converting.\n");
 108                BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
 109                        BTRFS_INODE_NODATACOW;
 110                block_group->disk_cache_state = BTRFS_DC_CLEAR;
 111        }
 112
 113        if (!block_group->iref) {
 114                block_group->inode = igrab(inode);
 115                block_group->iref = 1;
 116        }
 117        spin_unlock(&block_group->lock);
 118
 119        return inode;
 120}
 121
 122int __create_free_space_inode(struct btrfs_root *root,
 123                              struct btrfs_trans_handle *trans,
 124                              struct btrfs_path *path, u64 ino, u64 offset)
 125{
 126        struct btrfs_key key;
 127        struct btrfs_disk_key disk_key;
 128        struct btrfs_free_space_header *header;
 129        struct btrfs_inode_item *inode_item;
 130        struct extent_buffer *leaf;
 131        u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
 132        int ret;
 133
 134        ret = btrfs_insert_empty_inode(trans, root, path, ino);
 135        if (ret)
 136                return ret;
 137
 138        /* We inline crc's for the free disk space cache */
 139        if (ino != BTRFS_FREE_INO_OBJECTID)
 140                flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
 141
 142        leaf = path->nodes[0];
 143        inode_item = btrfs_item_ptr(leaf, path->slots[0],
 144                                    struct btrfs_inode_item);
 145        btrfs_item_key(leaf, &disk_key, path->slots[0]);
 146        memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
 147                             sizeof(*inode_item));
 148        btrfs_set_inode_generation(leaf, inode_item, trans->transid);
 149        btrfs_set_inode_size(leaf, inode_item, 0);
 150        btrfs_set_inode_nbytes(leaf, inode_item, 0);
 151        btrfs_set_inode_uid(leaf, inode_item, 0);
 152        btrfs_set_inode_gid(leaf, inode_item, 0);
 153        btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
 154        btrfs_set_inode_flags(leaf, inode_item, flags);
 155        btrfs_set_inode_nlink(leaf, inode_item, 1);
 156        btrfs_set_inode_transid(leaf, inode_item, trans->transid);
 157        btrfs_set_inode_block_group(leaf, inode_item, offset);
 158        btrfs_mark_buffer_dirty(leaf);
 159        btrfs_release_path(path);
 160
 161        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 162        key.offset = offset;
 163        key.type = 0;
 164
 165        ret = btrfs_insert_empty_item(trans, root, path, &key,
 166                                      sizeof(struct btrfs_free_space_header));
 167        if (ret < 0) {
 168                btrfs_release_path(path);
 169                return ret;
 170        }
 171        leaf = path->nodes[0];
 172        header = btrfs_item_ptr(leaf, path->slots[0],
 173                                struct btrfs_free_space_header);
 174        memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
 175        btrfs_set_free_space_key(leaf, header, &disk_key);
 176        btrfs_mark_buffer_dirty(leaf);
 177        btrfs_release_path(path);
 178
 179        return 0;
 180}
 181
 182int create_free_space_inode(struct btrfs_root *root,
 183                            struct btrfs_trans_handle *trans,
 184                            struct btrfs_block_group_cache *block_group,
 185                            struct btrfs_path *path)
 186{
 187        int ret;
 188        u64 ino;
 189
 190        ret = btrfs_find_free_objectid(root, &ino);
 191        if (ret < 0)
 192                return ret;
 193
 194        return __create_free_space_inode(root, trans, path, ino,
 195                                         block_group->key.objectid);
 196}
 197
 198int btrfs_truncate_free_space_cache(struct btrfs_root *root,
 199                                    struct btrfs_trans_handle *trans,
 200                                    struct btrfs_path *path,
 201                                    struct inode *inode)
 202{
 203        struct btrfs_block_rsv *rsv;
 204        u64 needed_bytes;
 205        loff_t oldsize;
 206        int ret = 0;
 207
 208        rsv = trans->block_rsv;
 209        trans->block_rsv = &root->fs_info->global_block_rsv;
 210
 211        /* 1 for slack space, 1 for updating the inode */
 212        needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
 213                btrfs_calc_trans_metadata_size(root, 1);
 214
 215        spin_lock(&trans->block_rsv->lock);
 216        if (trans->block_rsv->reserved < needed_bytes) {
 217                spin_unlock(&trans->block_rsv->lock);
 218                trans->block_rsv = rsv;
 219                return -ENOSPC;
 220        }
 221        spin_unlock(&trans->block_rsv->lock);
 222
 223        oldsize = i_size_read(inode);
 224        btrfs_i_size_write(inode, 0);
 225        truncate_pagecache(inode, oldsize, 0);
 226
 227        /*
 228         * We don't need an orphan item because truncating the free space cache
 229         * will never be split across transactions.
 230         */
 231        ret = btrfs_truncate_inode_items(trans, root, inode,
 232                                         0, BTRFS_EXTENT_DATA_KEY);
 233
 234        if (ret) {
 235                trans->block_rsv = rsv;
 236                btrfs_abort_transaction(trans, root, ret);
 237                return ret;
 238        }
 239
 240        ret = btrfs_update_inode(trans, root, inode);
 241        if (ret)
 242                btrfs_abort_transaction(trans, root, ret);
 243        trans->block_rsv = rsv;
 244
 245        return ret;
 246}
 247
 248static int readahead_cache(struct inode *inode)
 249{
 250        struct file_ra_state *ra;
 251        unsigned long last_index;
 252
 253        ra = kzalloc(sizeof(*ra), GFP_NOFS);
 254        if (!ra)
 255                return -ENOMEM;
 256
 257        file_ra_state_init(ra, inode->i_mapping);
 258        last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
 259
 260        page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
 261
 262        kfree(ra);
 263
 264        return 0;
 265}
 266
 267struct io_ctl {
 268        void *cur, *orig;
 269        struct page *page;
 270        struct page **pages;
 271        struct btrfs_root *root;
 272        unsigned long size;
 273        int index;
 274        int num_pages;
 275        unsigned check_crcs:1;
 276};
 277
 278static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
 279                       struct btrfs_root *root)
 280{
 281        memset(io_ctl, 0, sizeof(struct io_ctl));
 282        io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
 283                PAGE_CACHE_SHIFT;
 284        io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
 285                                GFP_NOFS);
 286        if (!io_ctl->pages)
 287                return -ENOMEM;
 288        io_ctl->root = root;
 289        if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
 290                io_ctl->check_crcs = 1;
 291        return 0;
 292}
 293
 294static void io_ctl_free(struct io_ctl *io_ctl)
 295{
 296        kfree(io_ctl->pages);
 297}
 298
 299static void io_ctl_unmap_page(struct io_ctl *io_ctl)
 300{
 301        if (io_ctl->cur) {
 302                kunmap(io_ctl->page);
 303                io_ctl->cur = NULL;
 304                io_ctl->orig = NULL;
 305        }
 306}
 307
 308static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
 309{
 310        BUG_ON(io_ctl->index >= io_ctl->num_pages);
 311        io_ctl->page = io_ctl->pages[io_ctl->index++];
 312        io_ctl->cur = kmap(io_ctl->page);
 313        io_ctl->orig = io_ctl->cur;
 314        io_ctl->size = PAGE_CACHE_SIZE;
 315        if (clear)
 316                memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
 317}
 318
 319static void io_ctl_drop_pages(struct io_ctl *io_ctl)
 320{
 321        int i;
 322
 323        io_ctl_unmap_page(io_ctl);
 324
 325        for (i = 0; i < io_ctl->num_pages; i++) {
 326                if (io_ctl->pages[i]) {
 327                        ClearPageChecked(io_ctl->pages[i]);
 328                        unlock_page(io_ctl->pages[i]);
 329                        page_cache_release(io_ctl->pages[i]);
 330                }
 331        }
 332}
 333
 334static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
 335                                int uptodate)
 336{
 337        struct page *page;
 338        gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
 339        int i;
 340
 341        for (i = 0; i < io_ctl->num_pages; i++) {
 342                page = find_or_create_page(inode->i_mapping, i, mask);
 343                if (!page) {
 344                        io_ctl_drop_pages(io_ctl);
 345                        return -ENOMEM;
 346                }
 347                io_ctl->pages[i] = page;
 348                if (uptodate && !PageUptodate(page)) {
 349                        btrfs_readpage(NULL, page);
 350                        lock_page(page);
 351                        if (!PageUptodate(page)) {
 352                                printk(KERN_ERR "btrfs: error reading free "
 353                                       "space cache\n");
 354                                io_ctl_drop_pages(io_ctl);
 355                                return -EIO;
 356                        }
 357                }
 358        }
 359
 360        for (i = 0; i < io_ctl->num_pages; i++) {
 361                clear_page_dirty_for_io(io_ctl->pages[i]);
 362                set_page_extent_mapped(io_ctl->pages[i]);
 363        }
 364
 365        return 0;
 366}
 367
 368static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
 369{
 370        __le64 *val;
 371
 372        io_ctl_map_page(io_ctl, 1);
 373
 374        /*
 375         * Skip the csum areas.  If we don't check crcs then we just have a
 376         * 64bit chunk at the front of the first page.
 377         */
 378        if (io_ctl->check_crcs) {
 379                io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
 380                io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
 381        } else {
 382                io_ctl->cur += sizeof(u64);
 383                io_ctl->size -= sizeof(u64) * 2;
 384        }
 385
 386        val = io_ctl->cur;
 387        *val = cpu_to_le64(generation);
 388        io_ctl->cur += sizeof(u64);
 389}
 390
 391static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
 392{
 393        __le64 *gen;
 394
 395        /*
 396         * Skip the crc area.  If we don't check crcs then we just have a 64bit
 397         * chunk at the front of the first page.
 398         */
 399        if (io_ctl->check_crcs) {
 400                io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
 401                io_ctl->size -= sizeof(u64) +
 402                        (sizeof(u32) * io_ctl->num_pages);
 403        } else {
 404                io_ctl->cur += sizeof(u64);
 405                io_ctl->size -= sizeof(u64) * 2;
 406        }
 407
 408        gen = io_ctl->cur;
 409        if (le64_to_cpu(*gen) != generation) {
 410                printk_ratelimited(KERN_ERR "btrfs: space cache generation "
 411                                   "(%Lu) does not match inode (%Lu)\n", *gen,
 412                                   generation);
 413                io_ctl_unmap_page(io_ctl);
 414                return -EIO;
 415        }
 416        io_ctl->cur += sizeof(u64);
 417        return 0;
 418}
 419
 420static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
 421{
 422        u32 *tmp;
 423        u32 crc = ~(u32)0;
 424        unsigned offset = 0;
 425
 426        if (!io_ctl->check_crcs) {
 427                io_ctl_unmap_page(io_ctl);
 428                return;
 429        }
 430
 431        if (index == 0)
 432                offset = sizeof(u32) * io_ctl->num_pages;
 433
 434        crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
 435                              PAGE_CACHE_SIZE - offset);
 436        btrfs_csum_final(crc, (char *)&crc);
 437        io_ctl_unmap_page(io_ctl);
 438        tmp = kmap(io_ctl->pages[0]);
 439        tmp += index;
 440        *tmp = crc;
 441        kunmap(io_ctl->pages[0]);
 442}
 443
 444static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
 445{
 446        u32 *tmp, val;
 447        u32 crc = ~(u32)0;
 448        unsigned offset = 0;
 449
 450        if (!io_ctl->check_crcs) {
 451                io_ctl_map_page(io_ctl, 0);
 452                return 0;
 453        }
 454
 455        if (index == 0)
 456                offset = sizeof(u32) * io_ctl->num_pages;
 457
 458        tmp = kmap(io_ctl->pages[0]);
 459        tmp += index;
 460        val = *tmp;
 461        kunmap(io_ctl->pages[0]);
 462
 463        io_ctl_map_page(io_ctl, 0);
 464        crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
 465                              PAGE_CACHE_SIZE - offset);
 466        btrfs_csum_final(crc, (char *)&crc);
 467        if (val != crc) {
 468                printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free "
 469                                   "space cache\n");
 470                io_ctl_unmap_page(io_ctl);
 471                return -EIO;
 472        }
 473
 474        return 0;
 475}
 476
 477static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
 478                            void *bitmap)
 479{
 480        struct btrfs_free_space_entry *entry;
 481
 482        if (!io_ctl->cur)
 483                return -ENOSPC;
 484
 485        entry = io_ctl->cur;
 486        entry->offset = cpu_to_le64(offset);
 487        entry->bytes = cpu_to_le64(bytes);
 488        entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
 489                BTRFS_FREE_SPACE_EXTENT;
 490        io_ctl->cur += sizeof(struct btrfs_free_space_entry);
 491        io_ctl->size -= sizeof(struct btrfs_free_space_entry);
 492
 493        if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
 494                return 0;
 495
 496        io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 497
 498        /* No more pages to map */
 499        if (io_ctl->index >= io_ctl->num_pages)
 500                return 0;
 501
 502        /* map the next page */
 503        io_ctl_map_page(io_ctl, 1);
 504        return 0;
 505}
 506
 507static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
 508{
 509        if (!io_ctl->cur)
 510                return -ENOSPC;
 511
 512        /*
 513         * If we aren't at the start of the current page, unmap this one and
 514         * map the next one if there is any left.
 515         */
 516        if (io_ctl->cur != io_ctl->orig) {
 517                io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 518                if (io_ctl->index >= io_ctl->num_pages)
 519                        return -ENOSPC;
 520                io_ctl_map_page(io_ctl, 0);
 521        }
 522
 523        memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
 524        io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 525        if (io_ctl->index < io_ctl->num_pages)
 526                io_ctl_map_page(io_ctl, 0);
 527        return 0;
 528}
 529
 530static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
 531{
 532        /*
 533         * If we're not on the boundary we know we've modified the page and we
 534         * need to crc the page.
 535         */
 536        if (io_ctl->cur != io_ctl->orig)
 537                io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 538        else
 539                io_ctl_unmap_page(io_ctl);
 540
 541        while (io_ctl->index < io_ctl->num_pages) {
 542                io_ctl_map_page(io_ctl, 1);
 543                io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 544        }
 545}
 546
 547static int io_ctl_read_entry(struct io_ctl *io_ctl,
 548                            struct btrfs_free_space *entry, u8 *type)
 549{
 550        struct btrfs_free_space_entry *e;
 551        int ret;
 552
 553        if (!io_ctl->cur) {
 554                ret = io_ctl_check_crc(io_ctl, io_ctl->index);
 555                if (ret)
 556                        return ret;
 557        }
 558
 559        e = io_ctl->cur;
 560        entry->offset = le64_to_cpu(e->offset);
 561        entry->bytes = le64_to_cpu(e->bytes);
 562        *type = e->type;
 563        io_ctl->cur += sizeof(struct btrfs_free_space_entry);
 564        io_ctl->size -= sizeof(struct btrfs_free_space_entry);
 565
 566        if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
 567                return 0;
 568
 569        io_ctl_unmap_page(io_ctl);
 570
 571        return 0;
 572}
 573
 574static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
 575                              struct btrfs_free_space *entry)
 576{
 577        int ret;
 578
 579        ret = io_ctl_check_crc(io_ctl, io_ctl->index);
 580        if (ret)
 581                return ret;
 582
 583        memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
 584        io_ctl_unmap_page(io_ctl);
 585
 586        return 0;
 587}
 588
 589/*
 590 * Since we attach pinned extents after the fact we can have contiguous sections
 591 * of free space that are split up in entries.  This poses a problem with the
 592 * tree logging stuff since it could have allocated across what appears to be 2
 593 * entries since we would have merged the entries when adding the pinned extents
 594 * back to the free space cache.  So run through the space cache that we just
 595 * loaded and merge contiguous entries.  This will make the log replay stuff not
 596 * blow up and it will make for nicer allocator behavior.
 597 */
 598static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
 599{
 600        struct btrfs_free_space *e, *prev = NULL;
 601        struct rb_node *n;
 602
 603again:
 604        spin_lock(&ctl->tree_lock);
 605        for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
 606                e = rb_entry(n, struct btrfs_free_space, offset_index);
 607                if (!prev)
 608                        goto next;
 609                if (e->bitmap || prev->bitmap)
 610                        goto next;
 611                if (prev->offset + prev->bytes == e->offset) {
 612                        unlink_free_space(ctl, prev);
 613                        unlink_free_space(ctl, e);
 614                        prev->bytes += e->bytes;
 615                        kmem_cache_free(btrfs_free_space_cachep, e);
 616                        link_free_space(ctl, prev);
 617                        prev = NULL;
 618                        spin_unlock(&ctl->tree_lock);
 619                        goto again;
 620                }
 621next:
 622                prev = e;
 623        }
 624        spin_unlock(&ctl->tree_lock);
 625}
 626
 627int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 628                            struct btrfs_free_space_ctl *ctl,
 629                            struct btrfs_path *path, u64 offset)
 630{
 631        struct btrfs_free_space_header *header;
 632        struct extent_buffer *leaf;
 633        struct io_ctl io_ctl;
 634        struct btrfs_key key;
 635        struct btrfs_free_space *e, *n;
 636        struct list_head bitmaps;
 637        u64 num_entries;
 638        u64 num_bitmaps;
 639        u64 generation;
 640        u8 type;
 641        int ret = 0;
 642
 643        INIT_LIST_HEAD(&bitmaps);
 644
 645        /* Nothing in the space cache, goodbye */
 646        if (!i_size_read(inode))
 647                return 0;
 648
 649        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 650        key.offset = offset;
 651        key.type = 0;
 652
 653        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 654        if (ret < 0)
 655                return 0;
 656        else if (ret > 0) {
 657                btrfs_release_path(path);
 658                return 0;
 659        }
 660
 661        ret = -1;
 662
 663        leaf = path->nodes[0];
 664        header = btrfs_item_ptr(leaf, path->slots[0],
 665                                struct btrfs_free_space_header);
 666        num_entries = btrfs_free_space_entries(leaf, header);
 667        num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
 668        generation = btrfs_free_space_generation(leaf, header);
 669        btrfs_release_path(path);
 670
 671        if (BTRFS_I(inode)->generation != generation) {
 672                printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
 673                       " not match free space cache generation (%llu)\n",
 674                       (unsigned long long)BTRFS_I(inode)->generation,
 675                       (unsigned long long)generation);
 676                return 0;
 677        }
 678
 679        if (!num_entries)
 680                return 0;
 681
 682        ret = io_ctl_init(&io_ctl, inode, root);
 683        if (ret)
 684                return ret;
 685
 686        ret = readahead_cache(inode);
 687        if (ret)
 688                goto out;
 689
 690        ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
 691        if (ret)
 692                goto out;
 693
 694        ret = io_ctl_check_crc(&io_ctl, 0);
 695        if (ret)
 696                goto free_cache;
 697
 698        ret = io_ctl_check_generation(&io_ctl, generation);
 699        if (ret)
 700                goto free_cache;
 701
 702        while (num_entries) {
 703                e = kmem_cache_zalloc(btrfs_free_space_cachep,
 704                                      GFP_NOFS);
 705                if (!e)
 706                        goto free_cache;
 707
 708                ret = io_ctl_read_entry(&io_ctl, e, &type);
 709                if (ret) {
 710                        kmem_cache_free(btrfs_free_space_cachep, e);
 711                        goto free_cache;
 712                }
 713
 714                if (!e->bytes) {
 715                        kmem_cache_free(btrfs_free_space_cachep, e);
 716                        goto free_cache;
 717                }
 718
 719                if (type == BTRFS_FREE_SPACE_EXTENT) {
 720                        spin_lock(&ctl->tree_lock);
 721                        ret = link_free_space(ctl, e);
 722                        spin_unlock(&ctl->tree_lock);
 723                        if (ret) {
 724                                printk(KERN_ERR "Duplicate entries in "
 725                                       "free space cache, dumping\n");
 726                                kmem_cache_free(btrfs_free_space_cachep, e);
 727                                goto free_cache;
 728                        }
 729                } else {
 730                        BUG_ON(!num_bitmaps);
 731                        num_bitmaps--;
 732                        e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
 733                        if (!e->bitmap) {
 734                                kmem_cache_free(
 735                                        btrfs_free_space_cachep, e);
 736                                goto free_cache;
 737                        }
 738                        spin_lock(&ctl->tree_lock);
 739                        ret = link_free_space(ctl, e);
 740                        ctl->total_bitmaps++;
 741                        ctl->op->recalc_thresholds(ctl);
 742                        spin_unlock(&ctl->tree_lock);
 743                        if (ret) {
 744                                printk(KERN_ERR "Duplicate entries in "
 745                                       "free space cache, dumping\n");
 746                                kmem_cache_free(btrfs_free_space_cachep, e);
 747                                goto free_cache;
 748                        }
 749                        list_add_tail(&e->list, &bitmaps);
 750                }
 751
 752                num_entries--;
 753        }
 754
 755        io_ctl_unmap_page(&io_ctl);
 756
 757        /*
 758         * We add the bitmaps at the end of the entries in order that
 759         * the bitmap entries are added to the cache.
 760         */
 761        list_for_each_entry_safe(e, n, &bitmaps, list) {
 762                list_del_init(&e->list);
 763                ret = io_ctl_read_bitmap(&io_ctl, e);
 764                if (ret)
 765                        goto free_cache;
 766        }
 767
 768        io_ctl_drop_pages(&io_ctl);
 769        merge_space_tree(ctl);
 770        ret = 1;
 771out:
 772        io_ctl_free(&io_ctl);
 773        return ret;
 774free_cache:
 775        io_ctl_drop_pages(&io_ctl);
 776        __btrfs_remove_free_space_cache(ctl);
 777        goto out;
 778}
 779
 780int load_free_space_cache(struct btrfs_fs_info *fs_info,
 781                          struct btrfs_block_group_cache *block_group)
 782{
 783        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
 784        struct btrfs_root *root = fs_info->tree_root;
 785        struct inode *inode;
 786        struct btrfs_path *path;
 787        int ret = 0;
 788        bool matched;
 789        u64 used = btrfs_block_group_used(&block_group->item);
 790
 791        /*
 792         * If this block group has been marked to be cleared for one reason or
 793         * another then we can't trust the on disk cache, so just return.
 794         */
 795        spin_lock(&block_group->lock);
 796        if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
 797                spin_unlock(&block_group->lock);
 798                return 0;
 799        }
 800        spin_unlock(&block_group->lock);
 801
 802        path = btrfs_alloc_path();
 803        if (!path)
 804                return 0;
 805        path->search_commit_root = 1;
 806        path->skip_locking = 1;
 807
 808        inode = lookup_free_space_inode(root, block_group, path);
 809        if (IS_ERR(inode)) {
 810                btrfs_free_path(path);
 811                return 0;
 812        }
 813
 814        /* We may have converted the inode and made the cache invalid. */
 815        spin_lock(&block_group->lock);
 816        if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
 817                spin_unlock(&block_group->lock);
 818                btrfs_free_path(path);
 819                goto out;
 820        }
 821        spin_unlock(&block_group->lock);
 822
 823        ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
 824                                      path, block_group->key.objectid);
 825        btrfs_free_path(path);
 826        if (ret <= 0)
 827                goto out;
 828
 829        spin_lock(&ctl->tree_lock);
 830        matched = (ctl->free_space == (block_group->key.offset - used -
 831                                       block_group->bytes_super));
 832        spin_unlock(&ctl->tree_lock);
 833
 834        if (!matched) {
 835                __btrfs_remove_free_space_cache(ctl);
 836                printk(KERN_ERR "block group %llu has an wrong amount of free "
 837                       "space\n", block_group->key.objectid);
 838                ret = -1;
 839        }
 840out:
 841        if (ret < 0) {
 842                /* This cache is bogus, make sure it gets cleared */
 843                spin_lock(&block_group->lock);
 844                block_group->disk_cache_state = BTRFS_DC_CLEAR;
 845                spin_unlock(&block_group->lock);
 846                ret = 0;
 847
 848                printk(KERN_ERR "btrfs: failed to load free space cache "
 849                       "for block group %llu\n", block_group->key.objectid);
 850        }
 851
 852        iput(inode);
 853        return ret;
 854}
 855
 856/**
 857 * __btrfs_write_out_cache - write out cached info to an inode
 858 * @root - the root the inode belongs to
 859 * @ctl - the free space cache we are going to write out
 860 * @block_group - the block_group for this cache if it belongs to a block_group
 861 * @trans - the trans handle
 862 * @path - the path to use
 863 * @offset - the offset for the key we'll insert
 864 *
 865 * This function writes out a free space cache struct to disk for quick recovery
 866 * on mount.  This will return 0 if it was successfull in writing the cache out,
 867 * and -1 if it was not.
 868 */
 869int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 870                            struct btrfs_free_space_ctl *ctl,
 871                            struct btrfs_block_group_cache *block_group,
 872                            struct btrfs_trans_handle *trans,
 873                            struct btrfs_path *path, u64 offset)
 874{
 875        struct btrfs_free_space_header *header;
 876        struct extent_buffer *leaf;
 877        struct rb_node *node;
 878        struct list_head *pos, *n;
 879        struct extent_state *cached_state = NULL;
 880        struct btrfs_free_cluster *cluster = NULL;
 881        struct extent_io_tree *unpin = NULL;
 882        struct io_ctl io_ctl;
 883        struct list_head bitmap_list;
 884        struct btrfs_key key;
 885        u64 start, extent_start, extent_end, len;
 886        int entries = 0;
 887        int bitmaps = 0;
 888        int ret;
 889        int err = -1;
 890
 891        INIT_LIST_HEAD(&bitmap_list);
 892
 893        if (!i_size_read(inode))
 894                return -1;
 895
 896        ret = io_ctl_init(&io_ctl, inode, root);
 897        if (ret)
 898                return -1;
 899
 900        /* Get the cluster for this block_group if it exists */
 901        if (block_group && !list_empty(&block_group->cluster_list))
 902                cluster = list_entry(block_group->cluster_list.next,
 903                                     struct btrfs_free_cluster,
 904                                     block_group_list);
 905
 906        /* Lock all pages first so we can lock the extent safely. */
 907        io_ctl_prepare_pages(&io_ctl, inode, 0);
 908
 909        lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
 910                         0, &cached_state);
 911
 912        node = rb_first(&ctl->free_space_offset);
 913        if (!node && cluster) {
 914                node = rb_first(&cluster->root);
 915                cluster = NULL;
 916        }
 917
 918        /* Make sure we can fit our crcs into the first page */
 919        if (io_ctl.check_crcs &&
 920            (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) {
 921                WARN_ON(1);
 922                goto out_nospc;
 923        }
 924
 925        io_ctl_set_generation(&io_ctl, trans->transid);
 926
 927        /* Write out the extent entries */
 928        while (node) {
 929                struct btrfs_free_space *e;
 930
 931                e = rb_entry(node, struct btrfs_free_space, offset_index);
 932                entries++;
 933
 934                ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes,
 935                                       e->bitmap);
 936                if (ret)
 937                        goto out_nospc;
 938
 939                if (e->bitmap) {
 940                        list_add_tail(&e->list, &bitmap_list);
 941                        bitmaps++;
 942                }
 943                node = rb_next(node);
 944                if (!node && cluster) {
 945                        node = rb_first(&cluster->root);
 946                        cluster = NULL;
 947                }
 948        }
 949
 950        /*
 951         * We want to add any pinned extents to our free space cache
 952         * so we don't leak the space
 953         */
 954
 955        /*
 956         * We shouldn't have switched the pinned extents yet so this is the
 957         * right one
 958         */
 959        unpin = root->fs_info->pinned_extents;
 960
 961        if (block_group)
 962                start = block_group->key.objectid;
 963
 964        while (block_group && (start < block_group->key.objectid +
 965                               block_group->key.offset)) {
 966                ret = find_first_extent_bit(unpin, start,
 967                                            &extent_start, &extent_end,
 968                                            EXTENT_DIRTY, NULL);
 969                if (ret) {
 970                        ret = 0;
 971                        break;
 972                }
 973
 974                /* This pinned extent is out of our range */
 975                if (extent_start >= block_group->key.objectid +
 976                    block_group->key.offset)
 977                        break;
 978
 979                extent_start = max(extent_start, start);
 980                extent_end = min(block_group->key.objectid +
 981                                 block_group->key.offset, extent_end + 1);
 982                len = extent_end - extent_start;
 983
 984                entries++;
 985                ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL);
 986                if (ret)
 987                        goto out_nospc;
 988
 989                start = extent_end;
 990        }
 991
 992        /* Write out the bitmaps */
 993        list_for_each_safe(pos, n, &bitmap_list) {
 994                struct btrfs_free_space *entry =
 995                        list_entry(pos, struct btrfs_free_space, list);
 996
 997                ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap);
 998                if (ret)
 999                        goto out_nospc;
1000                list_del_init(&entry->list);
1001        }
1002
1003        /* Zero out the rest of the pages just to make sure */
1004        io_ctl_zero_remaining_pages(&io_ctl);
1005
1006        ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
1007                                0, i_size_read(inode), &cached_state);
1008        io_ctl_drop_pages(&io_ctl);
1009        unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1010                             i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1011
1012        if (ret)
1013                goto out;
1014
1015
1016        btrfs_wait_ordered_range(inode, 0, (u64)-1);
1017
1018        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
1019        key.offset = offset;
1020        key.type = 0;
1021
1022        ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1023        if (ret < 0) {
1024                clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1025                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
1026                                 GFP_NOFS);
1027                goto out;
1028        }
1029        leaf = path->nodes[0];
1030        if (ret > 0) {
1031                struct btrfs_key found_key;
1032                BUG_ON(!path->slots[0]);
1033                path->slots[0]--;
1034                btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1035                if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
1036                    found_key.offset != offset) {
1037                        clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1038                                         inode->i_size - 1,
1039                                         EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
1040                                         NULL, GFP_NOFS);
1041                        btrfs_release_path(path);
1042                        goto out;
1043                }
1044        }
1045
1046        BTRFS_I(inode)->generation = trans->transid;
1047        header = btrfs_item_ptr(leaf, path->slots[0],
1048                                struct btrfs_free_space_header);
1049        btrfs_set_free_space_entries(leaf, header, entries);
1050        btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1051        btrfs_set_free_space_generation(leaf, header, trans->transid);
1052        btrfs_mark_buffer_dirty(leaf);
1053        btrfs_release_path(path);
1054
1055        err = 0;
1056out:
1057        io_ctl_free(&io_ctl);
1058        if (err) {
1059                invalidate_inode_pages2(inode->i_mapping);
1060                BTRFS_I(inode)->generation = 0;
1061        }
1062        btrfs_update_inode(trans, root, inode);
1063        return err;
1064
1065out_nospc:
1066        list_for_each_safe(pos, n, &bitmap_list) {
1067                struct btrfs_free_space *entry =
1068                        list_entry(pos, struct btrfs_free_space, list);
1069                list_del_init(&entry->list);
1070        }
1071        io_ctl_drop_pages(&io_ctl);
1072        unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1073                             i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1074        goto out;
1075}
1076
1077int btrfs_write_out_cache(struct btrfs_root *root,
1078                          struct btrfs_trans_handle *trans,
1079                          struct btrfs_block_group_cache *block_group,
1080                          struct btrfs_path *path)
1081{
1082        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1083        struct inode *inode;
1084        int ret = 0;
1085
1086        root = root->fs_info->tree_root;
1087
1088        spin_lock(&block_group->lock);
1089        if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1090                spin_unlock(&block_group->lock);
1091                return 0;
1092        }
1093        spin_unlock(&block_group->lock);
1094
1095        inode = lookup_free_space_inode(root, block_group, path);
1096        if (IS_ERR(inode))
1097                return 0;
1098
1099        ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
1100                                      path, block_group->key.objectid);
1101        if (ret) {
1102                spin_lock(&block_group->lock);
1103                block_group->disk_cache_state = BTRFS_DC_ERROR;
1104                spin_unlock(&block_group->lock);
1105                ret = 0;
1106#ifdef DEBUG
1107                printk(KERN_ERR "btrfs: failed to write free space cache "
1108                       "for block group %llu\n", block_group->key.objectid);
1109#endif
1110        }
1111
1112        iput(inode);
1113        return ret;
1114}
1115
1116static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1117                                          u64 offset)
1118{
1119        BUG_ON(offset < bitmap_start);
1120        offset -= bitmap_start;
1121        return (unsigned long)(div_u64(offset, unit));
1122}
1123
1124static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1125{
1126        return (unsigned long)(div_u64(bytes, unit));
1127}
1128
1129static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1130                                   u64 offset)
1131{
1132        u64 bitmap_start;
1133        u64 bytes_per_bitmap;
1134
1135        bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1136        bitmap_start = offset - ctl->start;
1137        bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1138        bitmap_start *= bytes_per_bitmap;
1139        bitmap_start += ctl->start;
1140
1141        return bitmap_start;
1142}
1143
1144static int tree_insert_offset(struct rb_root *root, u64 offset,
1145                              struct rb_node *node, int bitmap)
1146{
1147        struct rb_node **p = &root->rb_node;
1148        struct rb_node *parent = NULL;
1149        struct btrfs_free_space *info;
1150
1151        while (*p) {
1152                parent = *p;
1153                info = rb_entry(parent, struct btrfs_free_space, offset_index);
1154
1155                if (offset < info->offset) {
1156                        p = &(*p)->rb_left;
1157                } else if (offset > info->offset) {
1158                        p = &(*p)->rb_right;
1159                } else {
1160                        /*
1161                         * we could have a bitmap entry and an extent entry
1162                         * share the same offset.  If this is the case, we want
1163                         * the extent entry to always be found first if we do a
1164                         * linear search through the tree, since we want to have
1165                         * the quickest allocation time, and allocating from an
1166                         * extent is faster than allocating from a bitmap.  So
1167                         * if we're inserting a bitmap and we find an entry at
1168                         * this offset, we want to go right, or after this entry
1169                         * logically.  If we are inserting an extent and we've
1170                         * found a bitmap, we want to go left, or before
1171                         * logically.
1172                         */
1173                        if (bitmap) {
1174                                if (info->bitmap) {
1175                                        WARN_ON_ONCE(1);
1176                                        return -EEXIST;
1177                                }
1178                                p = &(*p)->rb_right;
1179                        } else {
1180                                if (!info->bitmap) {
1181                                        WARN_ON_ONCE(1);
1182                                        return -EEXIST;
1183                                }
1184                                p = &(*p)->rb_left;
1185                        }
1186                }
1187        }
1188
1189        rb_link_node(node, parent, p);
1190        rb_insert_color(node, root);
1191
1192        return 0;
1193}
1194
1195/*
1196 * searches the tree for the given offset.
1197 *
1198 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1199 * want a section that has at least bytes size and comes at or after the given
1200 * offset.
1201 */
1202static struct btrfs_free_space *
1203tree_search_offset(struct btrfs_free_space_ctl *ctl,
1204                   u64 offset, int bitmap_only, int fuzzy)
1205{
1206        struct rb_node *n = ctl->free_space_offset.rb_node;
1207        struct btrfs_free_space *entry, *prev = NULL;
1208
1209        /* find entry that is closest to the 'offset' */
1210        while (1) {
1211                if (!n) {
1212                        entry = NULL;
1213                        break;
1214                }
1215
1216                entry = rb_entry(n, struct btrfs_free_space, offset_index);
1217                prev = entry;
1218
1219                if (offset < entry->offset)
1220                        n = n->rb_left;
1221                else if (offset > entry->offset)
1222                        n = n->rb_right;
1223                else
1224                        break;
1225        }
1226
1227        if (bitmap_only) {
1228                if (!entry)
1229                        return NULL;
1230                if (entry->bitmap)
1231                        return entry;
1232
1233                /*
1234                 * bitmap entry and extent entry may share same offset,
1235                 * in that case, bitmap entry comes after extent entry.
1236                 */
1237                n = rb_next(n);
1238                if (!n)
1239                        return NULL;
1240                entry = rb_entry(n, struct btrfs_free_space, offset_index);
1241                if (entry->offset != offset)
1242                        return NULL;
1243
1244                WARN_ON(!entry->bitmap);
1245                return entry;
1246        } else if (entry) {
1247                if (entry->bitmap) {
1248                        /*
1249                         * if previous extent entry covers the offset,
1250                         * we should return it instead of the bitmap entry
1251                         */
1252                        n = rb_prev(&entry->offset_index);
1253                        if (n) {
1254                                prev = rb_entry(n, struct btrfs_free_space,
1255                                                offset_index);
1256                                if (!prev->bitmap &&
1257                                    prev->offset + prev->bytes > offset)
1258                                        entry = prev;
1259                        }
1260                }
1261                return entry;
1262        }
1263
1264        if (!prev)
1265                return NULL;
1266
1267        /* find last entry before the 'offset' */
1268        entry = prev;
1269        if (entry->offset > offset) {
1270                n = rb_prev(&entry->offset_index);
1271                if (n) {
1272                        entry = rb_entry(n, struct btrfs_free_space,
1273                                        offset_index);
1274                        BUG_ON(entry->offset > offset);
1275                } else {
1276                        if (fuzzy)
1277                                return entry;
1278                        else
1279                                return NULL;
1280                }
1281        }
1282
1283        if (entry->bitmap) {
1284                n = rb_prev(&entry->offset_index);
1285                if (n) {
1286                        prev = rb_entry(n, struct btrfs_free_space,
1287                                        offset_index);
1288                        if (!prev->bitmap &&
1289                            prev->offset + prev->bytes > offset)
1290                                return prev;
1291                }
1292                if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1293                        return entry;
1294        } else if (entry->offset + entry->bytes > offset)
1295                return entry;
1296
1297        if (!fuzzy)
1298                return NULL;
1299
1300        while (1) {
1301                if (entry->bitmap) {
1302                        if (entry->offset + BITS_PER_BITMAP *
1303                            ctl->unit > offset)
1304                                break;
1305                } else {
1306                        if (entry->offset + entry->bytes > offset)
1307                                break;
1308                }
1309
1310                n = rb_next(&entry->offset_index);
1311                if (!n)
1312                        return NULL;
1313                entry = rb_entry(n, struct btrfs_free_space, offset_index);
1314        }
1315        return entry;
1316}
1317
1318static inline void
1319__unlink_free_space(struct btrfs_free_space_ctl *ctl,
1320                    struct btrfs_free_space *info)
1321{
1322        rb_erase(&info->offset_index, &ctl->free_space_offset);
1323        ctl->free_extents--;
1324}
1325
1326static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1327                              struct btrfs_free_space *info)
1328{
1329        __unlink_free_space(ctl, info);
1330        ctl->free_space -= info->bytes;
1331}
1332
1333static int link_free_space(struct btrfs_free_space_ctl *ctl,
1334                           struct btrfs_free_space *info)
1335{
1336        int ret = 0;
1337
1338        BUG_ON(!info->bitmap && !info->bytes);
1339        ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1340                                 &info->offset_index, (info->bitmap != NULL));
1341        if (ret)
1342                return ret;
1343
1344        ctl->free_space += info->bytes;
1345        ctl->free_extents++;
1346        return ret;
1347}
1348
1349static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1350{
1351        struct btrfs_block_group_cache *block_group = ctl->private;
1352        u64 max_bytes;
1353        u64 bitmap_bytes;
1354        u64 extent_bytes;
1355        u64 size = block_group->key.offset;
1356        u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
1357        int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1358
1359        max_bitmaps = max(max_bitmaps, 1);
1360
1361        BUG_ON(ctl->total_bitmaps > max_bitmaps);
1362
1363        /*
1364         * The goal is to keep the total amount of memory used per 1gb of space
1365         * at or below 32k, so we need to adjust how much memory we allow to be
1366         * used by extent based free space tracking
1367         */
1368        if (size < 1024 * 1024 * 1024)
1369                max_bytes = MAX_CACHE_BYTES_PER_GIG;
1370        else
1371                max_bytes = MAX_CACHE_BYTES_PER_GIG *
1372                        div64_u64(size, 1024 * 1024 * 1024);
1373
1374        /*
1375         * we want to account for 1 more bitmap than what we have so we can make
1376         * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1377         * we add more bitmaps.
1378         */
1379        bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1380
1381        if (bitmap_bytes >= max_bytes) {
1382                ctl->extents_thresh = 0;
1383                return;
1384        }
1385
1386        /*
1387         * we want the extent entry threshold to always be at most 1/2 the maxw
1388         * bytes we can have, or whatever is less than that.
1389         */
1390        extent_bytes = max_bytes - bitmap_bytes;
1391        extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1392
1393        ctl->extents_thresh =
1394                div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1395}
1396
1397static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1398                                       struct btrfs_free_space *info,
1399                                       u64 offset, u64 bytes)
1400{
1401        unsigned long start, count;
1402
1403        start = offset_to_bit(info->offset, ctl->unit, offset);
1404        count = bytes_to_bits(bytes, ctl->unit);
1405        BUG_ON(start + count > BITS_PER_BITMAP);
1406
1407        bitmap_clear(info->bitmap, start, count);
1408
1409        info->bytes -= bytes;
1410}
1411
1412static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1413                              struct btrfs_free_space *info, u64 offset,
1414                              u64 bytes)
1415{
1416        __bitmap_clear_bits(ctl, info, offset, bytes);
1417        ctl->free_space -= bytes;
1418}
1419
1420static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1421                            struct btrfs_free_space *info, u64 offset,
1422                            u64 bytes)
1423{
1424        unsigned long start, count;
1425
1426        start = offset_to_bit(info->offset, ctl->unit, offset);
1427        count = bytes_to_bits(bytes, ctl->unit);
1428        BUG_ON(start + count > BITS_PER_BITMAP);
1429
1430        bitmap_set(info->bitmap, start, count);
1431
1432        info->bytes += bytes;
1433        ctl->free_space += bytes;
1434}
1435
1436static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1437                         struct btrfs_free_space *bitmap_info, u64 *offset,
1438                         u64 *bytes)
1439{
1440        unsigned long found_bits = 0;
1441        unsigned long bits, i;
1442        unsigned long next_zero;
1443
1444        i = offset_to_bit(bitmap_info->offset, ctl->unit,
1445                          max_t(u64, *offset, bitmap_info->offset));
1446        bits = bytes_to_bits(*bytes, ctl->unit);
1447
1448        for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1449                next_zero = find_next_zero_bit(bitmap_info->bitmap,
1450                                               BITS_PER_BITMAP, i);
1451                if ((next_zero - i) >= bits) {
1452                        found_bits = next_zero - i;
1453                        break;
1454                }
1455                i = next_zero;
1456        }
1457
1458        if (found_bits) {
1459                *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1460                *bytes = (u64)(found_bits) * ctl->unit;
1461                return 0;
1462        }
1463
1464        return -1;
1465}
1466
1467static struct btrfs_free_space *
1468find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1469                unsigned long align)
1470{
1471        struct btrfs_free_space *entry;
1472        struct rb_node *node;
1473        u64 ctl_off;
1474        u64 tmp;
1475        u64 align_off;
1476        int ret;
1477
1478        if (!ctl->free_space_offset.rb_node)
1479                return NULL;
1480
1481        entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1482        if (!entry)
1483                return NULL;
1484
1485        for (node = &entry->offset_index; node; node = rb_next(node)) {
1486                entry = rb_entry(node, struct btrfs_free_space, offset_index);
1487                if (entry->bytes < *bytes)
1488                        continue;
1489
1490                /* make sure the space returned is big enough
1491                 * to match our requested alignment
1492                 */
1493                if (*bytes >= align) {
1494                        ctl_off = entry->offset - ctl->start;
1495                        tmp = ctl_off + align - 1;;
1496                        do_div(tmp, align);
1497                        tmp = tmp * align + ctl->start;
1498                        align_off = tmp - entry->offset;
1499                } else {
1500                        align_off = 0;
1501                        tmp = entry->offset;
1502                }
1503
1504                if (entry->bytes < *bytes + align_off)
1505                        continue;
1506
1507                if (entry->bitmap) {
1508                        ret = search_bitmap(ctl, entry, &tmp, bytes);
1509                        if (!ret) {
1510                                *offset = tmp;
1511                                return entry;
1512                        }
1513                        continue;
1514                }
1515
1516                *offset = tmp;
1517                *bytes = entry->bytes - align_off;
1518                return entry;
1519        }
1520
1521        return NULL;
1522}
1523
1524static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1525                           struct btrfs_free_space *info, u64 offset)
1526{
1527        info->offset = offset_to_bitmap(ctl, offset);
1528        info->bytes = 0;
1529        INIT_LIST_HEAD(&info->list);
1530        link_free_space(ctl, info);
1531        ctl->total_bitmaps++;
1532
1533        ctl->op->recalc_thresholds(ctl);
1534}
1535
1536static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1537                        struct btrfs_free_space *bitmap_info)
1538{
1539        unlink_free_space(ctl, bitmap_info);
1540        kfree(bitmap_info->bitmap);
1541        kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1542        ctl->total_bitmaps--;
1543        ctl->op->recalc_thresholds(ctl);
1544}
1545
1546static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1547                              struct btrfs_free_space *bitmap_info,
1548                              u64 *offset, u64 *bytes)
1549{
1550        u64 end;
1551        u64 search_start, search_bytes;
1552        int ret;
1553
1554again:
1555        end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1556
1557        /*
1558         * We need to search for bits in this bitmap.  We could only cover some
1559         * of the extent in this bitmap thanks to how we add space, so we need
1560         * to search for as much as it as we can and clear that amount, and then
1561         * go searching for the next bit.
1562         */
1563        search_start = *offset;
1564        search_bytes = ctl->unit;
1565        search_bytes = min(search_bytes, end - search_start + 1);
1566        ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1567        BUG_ON(ret < 0 || search_start != *offset);
1568
1569        /* We may have found more bits than what we need */
1570        search_bytes = min(search_bytes, *bytes);
1571
1572        /* Cannot clear past the end of the bitmap */
1573        search_bytes = min(search_bytes, end - search_start + 1);
1574
1575        bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
1576        *offset += search_bytes;
1577        *bytes -= search_bytes;
1578
1579        if (*bytes) {
1580                struct rb_node *next = rb_next(&bitmap_info->offset_index);
1581                if (!bitmap_info->bytes)
1582                        free_bitmap(ctl, bitmap_info);
1583
1584                /*
1585                 * no entry after this bitmap, but we still have bytes to
1586                 * remove, so something has gone wrong.
1587                 */
1588                if (!next)
1589                        return -EINVAL;
1590
1591                bitmap_info = rb_entry(next, struct btrfs_free_space,
1592                                       offset_index);
1593
1594                /*
1595                 * if the next entry isn't a bitmap we need to return to let the
1596                 * extent stuff do its work.
1597                 */
1598                if (!bitmap_info->bitmap)
1599                        return -EAGAIN;
1600
1601                /*
1602                 * Ok the next item is a bitmap, but it may not actually hold
1603                 * the information for the rest of this free space stuff, so
1604                 * look for it, and if we don't find it return so we can try
1605                 * everything over again.
1606                 */
1607                search_start = *offset;
1608                search_bytes = ctl->unit;
1609                ret = search_bitmap(ctl, bitmap_info, &search_start,
1610                                    &search_bytes);
1611                if (ret < 0 || search_start != *offset)
1612                        return -EAGAIN;
1613
1614                goto again;
1615        } else if (!bitmap_info->bytes)
1616                free_bitmap(ctl, bitmap_info);
1617
1618        return 0;
1619}
1620
1621static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1622                               struct btrfs_free_space *info, u64 offset,
1623                               u64 bytes)
1624{
1625        u64 bytes_to_set = 0;
1626        u64 end;
1627
1628        end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1629
1630        bytes_to_set = min(end - offset, bytes);
1631
1632        bitmap_set_bits(ctl, info, offset, bytes_to_set);
1633
1634        return bytes_to_set;
1635
1636}
1637
1638static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1639                      struct btrfs_free_space *info)
1640{
1641        struct btrfs_block_group_cache *block_group = ctl->private;
1642
1643        /*
1644         * If we are below the extents threshold then we can add this as an
1645         * extent, and don't have to deal with the bitmap
1646         */
1647        if (ctl->free_extents < ctl->extents_thresh) {
1648                /*
1649                 * If this block group has some small extents we don't want to
1650                 * use up all of our free slots in the cache with them, we want
1651                 * to reserve them to larger extents, however if we have plent
1652                 * of cache left then go ahead an dadd them, no sense in adding
1653                 * the overhead of a bitmap if we don't have to.
1654                 */
1655                if (info->bytes <= block_group->sectorsize * 4) {
1656                        if (ctl->free_extents * 2 <= ctl->extents_thresh)
1657                                return false;
1658                } else {
1659                        return false;
1660                }
1661        }
1662
1663        /*
1664         * The original block groups from mkfs can be really small, like 8
1665         * megabytes, so don't bother with a bitmap for those entries.  However
1666         * some block groups can be smaller than what a bitmap would cover but
1667         * are still large enough that they could overflow the 32k memory limit,
1668         * so allow those block groups to still be allowed to have a bitmap
1669         * entry.
1670         */
1671        if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
1672                return false;
1673
1674        return true;
1675}
1676
1677static struct btrfs_free_space_op free_space_op = {
1678        .recalc_thresholds      = recalculate_thresholds,
1679        .use_bitmap             = use_bitmap,
1680};
1681
1682static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1683                              struct btrfs_free_space *info)
1684{
1685        struct btrfs_free_space *bitmap_info;
1686        struct btrfs_block_group_cache *block_group = NULL;
1687        int added = 0;
1688        u64 bytes, offset, bytes_added;
1689        int ret;
1690
1691        bytes = info->bytes;
1692        offset = info->offset;
1693
1694        if (!ctl->op->use_bitmap(ctl, info))
1695                return 0;
1696
1697        if (ctl->op == &free_space_op)
1698                block_group = ctl->private;
1699again:
1700        /*
1701         * Since we link bitmaps right into the cluster we need to see if we
1702         * have a cluster here, and if so and it has our bitmap we need to add
1703         * the free space to that bitmap.
1704         */
1705        if (block_group && !list_empty(&block_group->cluster_list)) {
1706                struct btrfs_free_cluster *cluster;
1707                struct rb_node *node;
1708                struct btrfs_free_space *entry;
1709
1710                cluster = list_entry(block_group->cluster_list.next,
1711                                     struct btrfs_free_cluster,
1712                                     block_group_list);
1713                spin_lock(&cluster->lock);
1714                node = rb_first(&cluster->root);
1715                if (!node) {
1716                        spin_unlock(&cluster->lock);
1717                        goto no_cluster_bitmap;
1718                }
1719
1720                entry = rb_entry(node, struct btrfs_free_space, offset_index);
1721                if (!entry->bitmap) {
1722                        spin_unlock(&cluster->lock);
1723                        goto no_cluster_bitmap;
1724                }
1725
1726                if (entry->offset == offset_to_bitmap(ctl, offset)) {
1727                        bytes_added = add_bytes_to_bitmap(ctl, entry,
1728                                                          offset, bytes);
1729                        bytes -= bytes_added;
1730                        offset += bytes_added;
1731                }
1732                spin_unlock(&cluster->lock);
1733                if (!bytes) {
1734                        ret = 1;
1735                        goto out;
1736                }
1737        }
1738
1739no_cluster_bitmap:
1740        bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1741                                         1, 0);
1742        if (!bitmap_info) {
1743                BUG_ON(added);
1744                goto new_bitmap;
1745        }
1746
1747        bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
1748        bytes -= bytes_added;
1749        offset += bytes_added;
1750        added = 0;
1751
1752        if (!bytes) {
1753                ret = 1;
1754                goto out;
1755        } else
1756                goto again;
1757
1758new_bitmap:
1759        if (info && info->bitmap) {
1760                add_new_bitmap(ctl, info, offset);
1761                added = 1;
1762                info = NULL;
1763                goto again;
1764        } else {
1765                spin_unlock(&ctl->tree_lock);
1766
1767                /* no pre-allocated info, allocate a new one */
1768                if (!info) {
1769                        info = kmem_cache_zalloc(btrfs_free_space_cachep,
1770                                                 GFP_NOFS);
1771                        if (!info) {
1772                                spin_lock(&ctl->tree_lock);
1773                                ret = -ENOMEM;
1774                                goto out;
1775                        }
1776                }
1777
1778                /* allocate the bitmap */
1779                info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
1780                spin_lock(&ctl->tree_lock);
1781                if (!info->bitmap) {
1782                        ret = -ENOMEM;
1783                        goto out;
1784                }
1785                goto again;
1786        }
1787
1788out:
1789        if (info) {
1790                if (info->bitmap)
1791                        kfree(info->bitmap);
1792                kmem_cache_free(btrfs_free_space_cachep, info);
1793        }
1794
1795        return ret;
1796}
1797
1798static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
1799                          struct btrfs_free_space *info, bool update_stat)
1800{
1801        struct btrfs_free_space *left_info;
1802        struct btrfs_free_space *right_info;
1803        bool merged = false;
1804        u64 offset = info->offset;
1805        u64 bytes = info->bytes;
1806
1807        /*
1808         * first we want to see if there is free space adjacent to the range we
1809         * are adding, if there is remove that struct and add a new one to
1810         * cover the entire range
1811         */
1812        right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
1813        if (right_info && rb_prev(&right_info->offset_index))
1814                left_info = rb_entry(rb_prev(&right_info->offset_index),
1815                                     struct btrfs_free_space, offset_index);
1816        else
1817                left_info = tree_search_offset(ctl, offset - 1, 0, 0);
1818
1819        if (right_info && !right_info->bitmap) {
1820                if (update_stat)
1821                        unlink_free_space(ctl, right_info);
1822                else
1823                        __unlink_free_space(ctl, right_info);
1824                info->bytes += right_info->bytes;
1825                kmem_cache_free(btrfs_free_space_cachep, right_info);
1826                merged = true;
1827        }
1828
1829        if (left_info && !left_info->bitmap &&
1830            left_info->offset + left_info->bytes == offset) {
1831                if (update_stat)
1832                        unlink_free_space(ctl, left_info);
1833                else
1834                        __unlink_free_space(ctl, left_info);
1835                info->offset = left_info->offset;
1836                info->bytes += left_info->bytes;
1837                kmem_cache_free(btrfs_free_space_cachep, left_info);
1838                merged = true;
1839        }
1840
1841        return merged;
1842}
1843
1844int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
1845                           u64 offset, u64 bytes)
1846{
1847        struct btrfs_free_space *info;
1848        int ret = 0;
1849
1850        info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
1851        if (!info)
1852                return -ENOMEM;
1853
1854        info->offset = offset;
1855        info->bytes = bytes;
1856
1857        spin_lock(&ctl->tree_lock);
1858
1859        if (try_merge_free_space(ctl, info, true))
1860                goto link;
1861
1862        /*
1863         * There was no extent directly to the left or right of this new
1864         * extent then we know we're going to have to allocate a new extent, so
1865         * before we do that see if we need to drop this into a bitmap
1866         */
1867        ret = insert_into_bitmap(ctl, info);
1868        if (ret < 0) {
1869                goto out;
1870        } else if (ret) {
1871                ret = 0;
1872                goto out;
1873        }
1874link:
1875        ret = link_free_space(ctl, info);
1876        if (ret)
1877                kmem_cache_free(btrfs_free_space_cachep, info);
1878out:
1879        spin_unlock(&ctl->tree_lock);
1880
1881        if (ret) {
1882                printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
1883                BUG_ON(ret == -EEXIST);
1884        }
1885
1886        return ret;
1887}
1888
1889int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1890                            u64 offset, u64 bytes)
1891{
1892        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1893        struct btrfs_free_space *info;
1894        int ret;
1895        bool re_search = false;
1896
1897        spin_lock(&ctl->tree_lock);
1898
1899again:
1900        ret = 0;
1901        if (!bytes)
1902                goto out_lock;
1903
1904        info = tree_search_offset(ctl, offset, 0, 0);
1905        if (!info) {
1906                /*
1907                 * oops didn't find an extent that matched the space we wanted
1908                 * to remove, look for a bitmap instead
1909                 */
1910                info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1911                                          1, 0);
1912                if (!info) {
1913                        /*
1914                         * If we found a partial bit of our free space in a
1915                         * bitmap but then couldn't find the other part this may
1916                         * be a problem, so WARN about it.
1917                         */
1918                        WARN_ON(re_search);
1919                        goto out_lock;
1920                }
1921        }
1922
1923        re_search = false;
1924        if (!info->bitmap) {
1925                unlink_free_space(ctl, info);
1926                if (offset == info->offset) {
1927                        u64 to_free = min(bytes, info->bytes);
1928
1929                        info->bytes -= to_free;
1930                        info->offset += to_free;
1931                        if (info->bytes) {
1932                                ret = link_free_space(ctl, info);
1933                                WARN_ON(ret);
1934                        } else {
1935                                kmem_cache_free(btrfs_free_space_cachep, info);
1936                        }
1937
1938                        offset += to_free;
1939                        bytes -= to_free;
1940                        goto again;
1941                } else {
1942                        u64 old_end = info->bytes + info->offset;
1943
1944                        info->bytes = offset - info->offset;
1945                        ret = link_free_space(ctl, info);
1946                        WARN_ON(ret);
1947                        if (ret)
1948                                goto out_lock;
1949
1950                        /* Not enough bytes in this entry to satisfy us */
1951                        if (old_end < offset + bytes) {
1952                                bytes -= old_end - offset;
1953                                offset = old_end;
1954                                goto again;
1955                        } else if (old_end == offset + bytes) {
1956                                /* all done */
1957                                goto out_lock;
1958                        }
1959                        spin_unlock(&ctl->tree_lock);
1960
1961                        ret = btrfs_add_free_space(block_group, offset + bytes,
1962                                                   old_end - (offset + bytes));
1963                        WARN_ON(ret);
1964                        goto out;
1965                }
1966        }
1967
1968        ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1969        if (ret == -EAGAIN) {
1970                re_search = true;
1971                goto again;
1972        }
1973        BUG_ON(ret); /* logic error */
1974out_lock:
1975        spin_unlock(&ctl->tree_lock);
1976out:
1977        return ret;
1978}
1979
1980void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1981                           u64 bytes)
1982{
1983        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1984        struct btrfs_free_space *info;
1985        struct rb_node *n;
1986        int count = 0;
1987
1988        for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
1989                info = rb_entry(n, struct btrfs_free_space, offset_index);
1990                if (info->bytes >= bytes && !block_group->ro)
1991                        count++;
1992                printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
1993                       (unsigned long long)info->offset,
1994                       (unsigned long long)info->bytes,
1995                       (info->bitmap) ? "yes" : "no");
1996        }
1997        printk(KERN_INFO "block group has cluster?: %s\n",
1998               list_empty(&block_group->cluster_list) ? "no" : "yes");
1999        printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
2000               "\n", count);
2001}
2002
2003void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
2004{
2005        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2006
2007        spin_lock_init(&ctl->tree_lock);
2008        ctl->unit = block_group->sectorsize;
2009        ctl->start = block_group->key.objectid;
2010        ctl->private = block_group;
2011        ctl->op = &free_space_op;
2012
2013        /*
2014         * we only want to have 32k of ram per block group for keeping
2015         * track of free space, and if we pass 1/2 of that we want to
2016         * start converting things over to using bitmaps
2017         */
2018        ctl->extents_thresh = ((1024 * 32) / 2) /
2019                                sizeof(struct btrfs_free_space);
2020}
2021
2022/*
2023 * for a given cluster, put all of its extents back into the free
2024 * space cache.  If the block group passed doesn't match the block group
2025 * pointed to by the cluster, someone else raced in and freed the
2026 * cluster already.  In that case, we just return without changing anything
2027 */
2028static int
2029__btrfs_return_cluster_to_free_space(
2030                             struct btrfs_block_group_cache *block_group,
2031                             struct btrfs_free_cluster *cluster)
2032{
2033        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2034        struct btrfs_free_space *entry;
2035        struct rb_node *node;
2036
2037        spin_lock(&cluster->lock);
2038        if (cluster->block_group != block_group)
2039                goto out;
2040
2041        cluster->block_group = NULL;
2042        cluster->window_start = 0;
2043        list_del_init(&cluster->block_group_list);
2044
2045        node = rb_first(&cluster->root);
2046        while (node) {
2047                bool bitmap;
2048
2049                entry = rb_entry(node, struct btrfs_free_space, offset_index);
2050                node = rb_next(&entry->offset_index);
2051                rb_erase(&entry->offset_index, &cluster->root);
2052
2053                bitmap = (entry->bitmap != NULL);
2054                if (!bitmap)
2055                        try_merge_free_space(ctl, entry, false);
2056                tree_insert_offset(&ctl->free_space_offset,
2057                                   entry->offset, &entry->offset_index, bitmap);
2058        }
2059        cluster->root = RB_ROOT;
2060
2061out:
2062        spin_unlock(&cluster->lock);
2063        btrfs_put_block_group(block_group);
2064        return 0;
2065}
2066
2067void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
2068{
2069        struct btrfs_free_space *info;
2070        struct rb_node *node;
2071
2072        while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
2073                info = rb_entry(node, struct btrfs_free_space, offset_index);
2074                if (!info->bitmap) {
2075                        unlink_free_space(ctl, info);
2076                        kmem_cache_free(btrfs_free_space_cachep, info);
2077                } else {
2078                        free_bitmap(ctl, info);
2079                }
2080                if (need_resched()) {
2081                        spin_unlock(&ctl->tree_lock);
2082                        cond_resched();
2083                        spin_lock(&ctl->tree_lock);
2084                }
2085        }
2086}
2087
2088void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
2089{
2090        spin_lock(&ctl->tree_lock);
2091        __btrfs_remove_free_space_cache_locked(ctl);
2092        spin_unlock(&ctl->tree_lock);
2093}
2094
2095void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
2096{
2097        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2098        struct btrfs_free_cluster *cluster;
2099        struct list_head *head;
2100
2101        spin_lock(&ctl->tree_lock);
2102        while ((head = block_group->cluster_list.next) !=
2103               &block_group->cluster_list) {
2104                cluster = list_entry(head, struct btrfs_free_cluster,
2105                                     block_group_list);
2106
2107                WARN_ON(cluster->block_group != block_group);
2108                __btrfs_return_cluster_to_free_space(block_group, cluster);
2109                if (need_resched()) {
2110                        spin_unlock(&ctl->tree_lock);
2111                        cond_resched();
2112                        spin_lock(&ctl->tree_lock);
2113                }
2114        }
2115        __btrfs_remove_free_space_cache_locked(ctl);
2116        spin_unlock(&ctl->tree_lock);
2117
2118}
2119
2120u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2121                               u64 offset, u64 bytes, u64 empty_size)
2122{
2123        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2124        struct btrfs_free_space *entry = NULL;
2125        u64 bytes_search = bytes + empty_size;
2126        u64 ret = 0;
2127        u64 align_gap = 0;
2128        u64 align_gap_len = 0;
2129
2130        spin_lock(&ctl->tree_lock);
2131        entry = find_free_space(ctl, &offset, &bytes_search,
2132                                block_group->full_stripe_len);
2133        if (!entry)
2134                goto out;
2135
2136        ret = offset;
2137        if (entry->bitmap) {
2138                bitmap_clear_bits(ctl, entry, offset, bytes);
2139                if (!entry->bytes)
2140                        free_bitmap(ctl, entry);
2141        } else {
2142
2143                unlink_free_space(ctl, entry);
2144                align_gap_len = offset - entry->offset;
2145                align_gap = entry->offset;
2146
2147                entry->offset = offset + bytes;
2148                WARN_ON(entry->bytes < bytes + align_gap_len);
2149
2150                entry->bytes -= bytes + align_gap_len;
2151                if (!entry->bytes)
2152                        kmem_cache_free(btrfs_free_space_cachep, entry);
2153                else
2154                        link_free_space(ctl, entry);
2155        }
2156
2157out:
2158        spin_unlock(&ctl->tree_lock);
2159
2160        if (align_gap_len)
2161                __btrfs_add_free_space(ctl, align_gap, align_gap_len);
2162        return ret;
2163}
2164
2165/*
2166 * given a cluster, put all of its extents back into the free space
2167 * cache.  If a block group is passed, this function will only free
2168 * a cluster that belongs to the passed block group.
2169 *
2170 * Otherwise, it'll get a reference on the block group pointed to by the
2171 * cluster and remove the cluster from it.
2172 */
2173int btrfs_return_cluster_to_free_space(
2174                               struct btrfs_block_group_cache *block_group,
2175                               struct btrfs_free_cluster *cluster)
2176{
2177        struct btrfs_free_space_ctl *ctl;
2178        int ret;
2179
2180        /* first, get a safe pointer to the block group */
2181        spin_lock(&cluster->lock);
2182        if (!block_group) {
2183                block_group = cluster->block_group;
2184                if (!block_group) {
2185                        spin_unlock(&cluster->lock);
2186                        return 0;
2187                }
2188        } else if (cluster->block_group != block_group) {
2189                /* someone else has already freed it don't redo their work */
2190                spin_unlock(&cluster->lock);
2191                return 0;
2192        }
2193        atomic_inc(&block_group->count);
2194        spin_unlock(&cluster->lock);
2195
2196        ctl = block_group->free_space_ctl;
2197
2198        /* now return any extents the cluster had on it */
2199        spin_lock(&ctl->tree_lock);
2200        ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2201        spin_unlock(&ctl->tree_lock);
2202
2203        /* finally drop our ref */
2204        btrfs_put_block_group(block_group);
2205        return ret;
2206}
2207
2208static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2209                                   struct btrfs_free_cluster *cluster,
2210                                   struct btrfs_free_space *entry,
2211                                   u64 bytes, u64 min_start)
2212{
2213        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2214        int err;
2215        u64 search_start = cluster->window_start;
2216        u64 search_bytes = bytes;
2217        u64 ret = 0;
2218
2219        search_start = min_start;
2220        search_bytes = bytes;
2221
2222        err = search_bitmap(ctl, entry, &search_start, &search_bytes);
2223        if (err)
2224                return 0;
2225
2226        ret = search_start;
2227        __bitmap_clear_bits(ctl, entry, ret, bytes);
2228
2229        return ret;
2230}
2231
2232/*
2233 * given a cluster, try to allocate 'bytes' from it, returns 0
2234 * if it couldn't find anything suitably large, or a logical disk offset
2235 * if things worked out
2236 */
2237u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2238                             struct btrfs_free_cluster *cluster, u64 bytes,
2239                             u64 min_start)
2240{
2241        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2242        struct btrfs_free_space *entry = NULL;
2243        struct rb_node *node;
2244        u64 ret = 0;
2245
2246        spin_lock(&cluster->lock);
2247        if (bytes > cluster->max_size)
2248                goto out;
2249
2250        if (cluster->block_group != block_group)
2251                goto out;
2252
2253        node = rb_first(&cluster->root);
2254        if (!node)
2255                goto out;
2256
2257        entry = rb_entry(node, struct btrfs_free_space, offset_index);
2258        while(1) {
2259                if (entry->bytes < bytes ||
2260                    (!entry->bitmap && entry->offset < min_start)) {
2261                        node = rb_next(&entry->offset_index);
2262                        if (!node)
2263                                break;
2264                        entry = rb_entry(node, struct btrfs_free_space,
2265                                         offset_index);
2266                        continue;
2267                }
2268
2269                if (entry->bitmap) {
2270                        ret = btrfs_alloc_from_bitmap(block_group,
2271                                                      cluster, entry, bytes,
2272                                                      cluster->window_start);
2273                        if (ret == 0) {
2274                                node = rb_next(&entry->offset_index);
2275                                if (!node)
2276                                        break;
2277                                entry = rb_entry(node, struct btrfs_free_space,
2278                                                 offset_index);
2279                                continue;
2280                        }
2281                        cluster->window_start += bytes;
2282                } else {
2283                        ret = entry->offset;
2284
2285                        entry->offset += bytes;
2286                        entry->bytes -= bytes;
2287                }
2288
2289                if (entry->bytes == 0)
2290                        rb_erase(&entry->offset_index, &cluster->root);
2291                break;
2292        }
2293out:
2294        spin_unlock(&cluster->lock);
2295
2296        if (!ret)
2297                return 0;
2298
2299        spin_lock(&ctl->tree_lock);
2300
2301        ctl->free_space -= bytes;
2302        if (entry->bytes == 0) {
2303                ctl->free_extents--;
2304                if (entry->bitmap) {
2305                        kfree(entry->bitmap);
2306                        ctl->total_bitmaps--;
2307                        ctl->op->recalc_thresholds(ctl);
2308                }
2309                kmem_cache_free(btrfs_free_space_cachep, entry);
2310        }
2311
2312        spin_unlock(&ctl->tree_lock);
2313
2314        return ret;
2315}
2316
2317static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2318                                struct btrfs_free_space *entry,
2319                                struct btrfs_free_cluster *cluster,
2320                                u64 offset, u64 bytes,
2321                                u64 cont1_bytes, u64 min_bytes)
2322{
2323        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2324        unsigned long next_zero;
2325        unsigned long i;
2326        unsigned long want_bits;
2327        unsigned long min_bits;
2328        unsigned long found_bits;
2329        unsigned long start = 0;
2330        unsigned long total_found = 0;
2331        int ret;
2332
2333        i = offset_to_bit(entry->offset, ctl->unit,
2334                          max_t(u64, offset, entry->offset));
2335        want_bits = bytes_to_bits(bytes, ctl->unit);
2336        min_bits = bytes_to_bits(min_bytes, ctl->unit);
2337
2338again:
2339        found_bits = 0;
2340        for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
2341                next_zero = find_next_zero_bit(entry->bitmap,
2342                                               BITS_PER_BITMAP, i);
2343                if (next_zero - i >= min_bits) {
2344                        found_bits = next_zero - i;
2345                        break;
2346                }
2347                i = next_zero;
2348        }
2349
2350        if (!found_bits)
2351                return -ENOSPC;
2352
2353        if (!total_found) {
2354                start = i;
2355                cluster->max_size = 0;
2356        }
2357
2358        total_found += found_bits;
2359
2360        if (cluster->max_size < found_bits * ctl->unit)
2361                cluster->max_size = found_bits * ctl->unit;
2362
2363        if (total_found < want_bits || cluster->max_size < cont1_bytes) {
2364                i = next_zero + 1;
2365                goto again;
2366        }
2367
2368        cluster->window_start = start * ctl->unit + entry->offset;
2369        rb_erase(&entry->offset_index, &ctl->free_space_offset);
2370        ret = tree_insert_offset(&cluster->root, entry->offset,
2371                                 &entry->offset_index, 1);
2372        BUG_ON(ret); /* -EEXIST; Logic error */
2373
2374        trace_btrfs_setup_cluster(block_group, cluster,
2375                                  total_found * ctl->unit, 1);
2376        return 0;
2377}
2378
2379/*
2380 * This searches the block group for just extents to fill the cluster with.
2381 * Try to find a cluster with at least bytes total bytes, at least one
2382 * extent of cont1_bytes, and other clusters of at least min_bytes.
2383 */
2384static noinline int
2385setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2386                        struct btrfs_free_cluster *cluster,
2387                        struct list_head *bitmaps, u64 offset, u64 bytes,
2388                        u64 cont1_bytes, u64 min_bytes)
2389{
2390        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2391        struct btrfs_free_space *first = NULL;
2392        struct btrfs_free_space *entry = NULL;
2393        struct btrfs_free_space *last;
2394        struct rb_node *node;
2395        u64 window_start;
2396        u64 window_free;
2397        u64 max_extent;
2398        u64 total_size = 0;
2399
2400        entry = tree_search_offset(ctl, offset, 0, 1);
2401        if (!entry)
2402                return -ENOSPC;
2403
2404        /*
2405         * We don't want bitmaps, so just move along until we find a normal
2406         * extent entry.
2407         */
2408        while (entry->bitmap || entry->bytes < min_bytes) {
2409                if (entry->bitmap && list_empty(&entry->list))
2410                        list_add_tail(&entry->list, bitmaps);
2411                node = rb_next(&entry->offset_index);
2412                if (!node)
2413                        return -ENOSPC;
2414                entry = rb_entry(node, struct btrfs_free_space, offset_index);
2415        }
2416
2417        window_start = entry->offset;
2418        window_free = entry->bytes;
2419        max_extent = entry->bytes;
2420        first = entry;
2421        last = entry;
2422
2423        for (node = rb_next(&entry->offset_index); node;
2424             node = rb_next(&entry->offset_index)) {
2425                entry = rb_entry(node, struct btrfs_free_space, offset_index);
2426
2427                if (entry->bitmap) {
2428                        if (list_empty(&entry->list))
2429                                list_add_tail(&entry->list, bitmaps);
2430                        continue;
2431                }
2432
2433                if (entry->bytes < min_bytes)
2434                        continue;
2435
2436                last = entry;
2437                window_free += entry->bytes;
2438                if (entry->bytes > max_extent)
2439                        max_extent = entry->bytes;
2440        }
2441
2442        if (window_free < bytes || max_extent < cont1_bytes)
2443                return -ENOSPC;
2444
2445        cluster->window_start = first->offset;
2446
2447        node = &first->offset_index;
2448
2449        /*
2450         * now we've found our entries, pull them out of the free space
2451         * cache and put them into the cluster rbtree
2452         */
2453        do {
2454                int ret;
2455
2456                entry = rb_entry(node, struct btrfs_free_space, offset_index);
2457                node = rb_next(&entry->offset_index);
2458                if (entry->bitmap || entry->bytes < min_bytes)
2459                        continue;
2460
2461                rb_erase(&entry->offset_index, &ctl->free_space_offset);
2462                ret = tree_insert_offset(&cluster->root, entry->offset,
2463                                         &entry->offset_index, 0);
2464                total_size += entry->bytes;
2465                BUG_ON(ret); /* -EEXIST; Logic error */
2466        } while (node && entry != last);
2467
2468        cluster->max_size = max_extent;
2469        trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
2470        return 0;
2471}
2472
2473/*
2474 * This specifically looks for bitmaps that may work in the cluster, we assume
2475 * that we have already failed to find extents that will work.
2476 */
2477static noinline int
2478setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2479                     struct btrfs_free_cluster *cluster,
2480                     struct list_head *bitmaps, u64 offset, u64 bytes,
2481                     u64 cont1_bytes, u64 min_bytes)
2482{
2483        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2484        struct btrfs_free_space *entry;
2485        int ret = -ENOSPC;
2486        u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2487
2488        if (ctl->total_bitmaps == 0)
2489                return -ENOSPC;
2490
2491        /*
2492         * The bitmap that covers offset won't be in the list unless offset
2493         * is just its start offset.
2494         */
2495        entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
2496        if (entry->offset != bitmap_offset) {
2497                entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2498                if (entry && list_empty(&entry->list))
2499                        list_add(&entry->list, bitmaps);
2500        }
2501
2502        list_for_each_entry(entry, bitmaps, list) {
2503                if (entry->bytes < bytes)
2504                        continue;
2505                ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2506                                           bytes, cont1_bytes, min_bytes);
2507                if (!ret)
2508                        return 0;
2509        }
2510
2511        /*
2512         * The bitmaps list has all the bitmaps that record free space
2513         * starting after offset, so no more search is required.
2514         */
2515        return -ENOSPC;
2516}
2517
2518/*
2519 * here we try to find a cluster of blocks in a block group.  The goal
2520 * is to find at least bytes+empty_size.
2521 * We might not find them all in one contiguous area.
2522 *
2523 * returns zero and sets up cluster if things worked out, otherwise
2524 * it returns -enospc
2525 */
2526int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2527                             struct btrfs_root *root,
2528                             struct btrfs_block_group_cache *block_group,
2529                             struct btrfs_free_cluster *cluster,
2530                             u64 offset, u64 bytes, u64 empty_size)
2531{
2532        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2533        struct btrfs_free_space *entry, *tmp;
2534        LIST_HEAD(bitmaps);
2535        u64 min_bytes;
2536        u64 cont1_bytes;
2537        int ret;
2538
2539        /*
2540         * Choose the minimum extent size we'll require for this
2541         * cluster.  For SSD_SPREAD, don't allow any fragmentation.
2542         * For metadata, allow allocates with smaller extents.  For
2543         * data, keep it dense.
2544         */
2545        if (btrfs_test_opt(root, SSD_SPREAD)) {
2546                cont1_bytes = min_bytes = bytes + empty_size;
2547        } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
2548                cont1_bytes = bytes;
2549                min_bytes = block_group->sectorsize;
2550        } else {
2551                cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
2552                min_bytes = block_group->sectorsize;
2553        }
2554
2555        spin_lock(&ctl->tree_lock);
2556
2557        /*
2558         * If we know we don't have enough space to make a cluster don't even
2559         * bother doing all the work to try and find one.
2560         */
2561        if (ctl->free_space < bytes) {
2562                spin_unlock(&ctl->tree_lock);
2563                return -ENOSPC;
2564        }
2565
2566        spin_lock(&cluster->lock);
2567
2568        /* someone already found a cluster, hooray */
2569        if (cluster->block_group) {
2570                ret = 0;
2571                goto out;
2572        }
2573
2574        trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
2575                                 min_bytes);
2576
2577        INIT_LIST_HEAD(&bitmaps);
2578        ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2579                                      bytes + empty_size,
2580                                      cont1_bytes, min_bytes);
2581        if (ret)
2582                ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
2583                                           offset, bytes + empty_size,
2584                                           cont1_bytes, min_bytes);
2585
2586        /* Clear our temporary list */
2587        list_for_each_entry_safe(entry, tmp, &bitmaps, list)
2588                list_del_init(&entry->list);
2589
2590        if (!ret) {
2591                atomic_inc(&block_group->count);
2592                list_add_tail(&cluster->block_group_list,
2593                              &block_group->cluster_list);
2594                cluster->block_group = block_group;
2595        } else {
2596                trace_btrfs_failed_cluster_setup(block_group);
2597        }
2598out:
2599        spin_unlock(&cluster->lock);
2600        spin_unlock(&ctl->tree_lock);
2601
2602        return ret;
2603}
2604
2605/*
2606 * simple code to zero out a cluster
2607 */
2608void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2609{
2610        spin_lock_init(&cluster->lock);
2611        spin_lock_init(&cluster->refill_lock);
2612        cluster->root = RB_ROOT;
2613        cluster->max_size = 0;
2614        INIT_LIST_HEAD(&cluster->block_group_list);
2615        cluster->block_group = NULL;
2616}
2617
2618static int do_trimming(struct btrfs_block_group_cache *block_group,
2619                       u64 *total_trimmed, u64 start, u64 bytes,
2620                       u64 reserved_start, u64 reserved_bytes)
2621{
2622        struct btrfs_space_info *space_info = block_group->space_info;
2623        struct btrfs_fs_info *fs_info = block_group->fs_info;
2624        int ret;
2625        int update = 0;
2626        u64 trimmed = 0;
2627
2628        spin_lock(&space_info->lock);
2629        spin_lock(&block_group->lock);
2630        if (!block_group->ro) {
2631                block_group->reserved += reserved_bytes;
2632                space_info->bytes_reserved += reserved_bytes;
2633                update = 1;
2634        }
2635        spin_unlock(&block_group->lock);
2636        spin_unlock(&space_info->lock);
2637
2638        ret = btrfs_error_discard_extent(fs_info->extent_root,
2639                                         start, bytes, &trimmed);
2640        if (!ret)
2641                *total_trimmed += trimmed;
2642
2643        btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
2644
2645        if (update) {
2646                spin_lock(&space_info->lock);
2647                spin_lock(&block_group->lock);
2648                if (block_group->ro)
2649                        space_info->bytes_readonly += reserved_bytes;
2650                block_group->reserved -= reserved_bytes;
2651                space_info->bytes_reserved -= reserved_bytes;
2652                spin_unlock(&space_info->lock);
2653                spin_unlock(&block_group->lock);
2654        }
2655
2656        return ret;
2657}
2658
2659static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
2660                          u64 *total_trimmed, u64 start, u64 end, u64 minlen)
2661{
2662        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2663        struct btrfs_free_space *entry;
2664        struct rb_node *node;
2665        int ret = 0;
2666        u64 extent_start;
2667        u64 extent_bytes;
2668        u64 bytes;
2669
2670        while (start < end) {
2671                spin_lock(&ctl->tree_lock);
2672
2673                if (ctl->free_space < minlen) {
2674                        spin_unlock(&ctl->tree_lock);
2675                        break;
2676                }
2677
2678                entry = tree_search_offset(ctl, start, 0, 1);
2679                if (!entry) {
2680                        spin_unlock(&ctl->tree_lock);
2681                        break;
2682                }
2683
2684                /* skip bitmaps */
2685                while (entry->bitmap) {
2686                        node = rb_next(&entry->offset_index);
2687                        if (!node) {
2688                                spin_unlock(&ctl->tree_lock);
2689                                goto out;
2690                        }
2691                        entry = rb_entry(node, struct btrfs_free_space,
2692                                         offset_index);
2693                }
2694
2695                if (entry->offset >= end) {
2696                        spin_unlock(&ctl->tree_lock);
2697                        break;
2698                }
2699
2700                extent_start = entry->offset;
2701                extent_bytes = entry->bytes;
2702                start = max(start, extent_start);
2703                bytes = min(extent_start + extent_bytes, end) - start;
2704                if (bytes < minlen) {
2705                        spin_unlock(&ctl->tree_lock);
2706                        goto next;
2707                }
2708
2709                unlink_free_space(ctl, entry);
2710                kmem_cache_free(btrfs_free_space_cachep, entry);
2711
2712                spin_unlock(&ctl->tree_lock);
2713
2714                ret = do_trimming(block_group, total_trimmed, start, bytes,
2715                                  extent_start, extent_bytes);
2716                if (ret)
2717                        break;
2718next:
2719                start += bytes;
2720
2721                if (fatal_signal_pending(current)) {
2722                        ret = -ERESTARTSYS;
2723                        break;
2724                }
2725
2726                cond_resched();
2727        }
2728out:
2729        return ret;
2730}
2731
2732static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
2733                        u64 *total_trimmed, u64 start, u64 end, u64 minlen)
2734{
2735        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2736        struct btrfs_free_space *entry;
2737        int ret = 0;
2738        int ret2;
2739        u64 bytes;
2740        u64 offset = offset_to_bitmap(ctl, start);
2741
2742        while (offset < end) {
2743                bool next_bitmap = false;
2744
2745                spin_lock(&ctl->tree_lock);
2746
2747                if (ctl->free_space < minlen) {
2748                        spin_unlock(&ctl->tree_lock);
2749                        break;
2750                }
2751
2752                entry = tree_search_offset(ctl, offset, 1, 0);
2753                if (!entry) {
2754                        spin_unlock(&ctl->tree_lock);
2755                        next_bitmap = true;
2756                        goto next;
2757                }
2758
2759                bytes = minlen;
2760                ret2 = search_bitmap(ctl, entry, &start, &bytes);
2761                if (ret2 || start >= end) {
2762                        spin_unlock(&ctl->tree_lock);
2763                        next_bitmap = true;
2764                        goto next;
2765                }
2766
2767                bytes = min(bytes, end - start);
2768                if (bytes < minlen) {
2769                        spin_unlock(&ctl->tree_lock);
2770                        goto next;
2771                }
2772
2773                bitmap_clear_bits(ctl, entry, start, bytes);
2774                if (entry->bytes == 0)
2775                        free_bitmap(ctl, entry);
2776
2777                spin_unlock(&ctl->tree_lock);
2778
2779                ret = do_trimming(block_group, total_trimmed, start, bytes,
2780                                  start, bytes);
2781                if (ret)
2782                        break;
2783next:
2784                if (next_bitmap) {
2785                        offset += BITS_PER_BITMAP * ctl->unit;
2786                } else {
2787                        start += bytes;
2788                        if (start >= offset + BITS_PER_BITMAP * ctl->unit)
2789                                offset += BITS_PER_BITMAP * ctl->unit;
2790                }
2791
2792                if (fatal_signal_pending(current)) {
2793                        ret = -ERESTARTSYS;
2794                        break;
2795                }
2796
2797                cond_resched();
2798        }
2799
2800        return ret;
2801}
2802
2803int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2804                           u64 *trimmed, u64 start, u64 end, u64 minlen)
2805{
2806        int ret;
2807
2808        *trimmed = 0;
2809
2810        ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
2811        if (ret)
2812                return ret;
2813
2814        ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
2815
2816        return ret;
2817}
2818
2819/*
2820 * Find the left-most item in the cache tree, and then return the
2821 * smallest inode number in the item.
2822 *
2823 * Note: the returned inode number may not be the smallest one in
2824 * the tree, if the left-most item is a bitmap.
2825 */
2826u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2827{
2828        struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
2829        struct btrfs_free_space *entry = NULL;
2830        u64 ino = 0;
2831
2832        spin_lock(&ctl->tree_lock);
2833
2834        if (RB_EMPTY_ROOT(&ctl->free_space_offset))
2835                goto out;
2836
2837        entry = rb_entry(rb_first(&ctl->free_space_offset),
2838                         struct btrfs_free_space, offset_index);
2839
2840        if (!entry->bitmap) {
2841                ino = entry->offset;
2842
2843                unlink_free_space(ctl, entry);
2844                entry->offset++;
2845                entry->bytes--;
2846                if (!entry->bytes)
2847                        kmem_cache_free(btrfs_free_space_cachep, entry);
2848                else
2849                        link_free_space(ctl, entry);
2850        } else {
2851                u64 offset = 0;
2852                u64 count = 1;
2853                int ret;
2854
2855                ret = search_bitmap(ctl, entry, &offset, &count);
2856                /* Logic error; Should be empty if it can't find anything */
2857                BUG_ON(ret);
2858
2859                ino = offset;
2860                bitmap_clear_bits(ctl, entry, offset, 1);
2861                if (entry->bytes == 0)
2862                        free_bitmap(ctl, entry);
2863        }
2864out:
2865        spin_unlock(&ctl->tree_lock);
2866
2867        return ino;
2868}
2869
2870struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2871                                    struct btrfs_path *path)
2872{
2873        struct inode *inode = NULL;
2874
2875        spin_lock(&root->cache_lock);
2876        if (root->cache_inode)
2877                inode = igrab(root->cache_inode);
2878        spin_unlock(&root->cache_lock);
2879        if (inode)
2880                return inode;
2881
2882        inode = __lookup_free_space_inode(root, path, 0);
2883        if (IS_ERR(inode))
2884                return inode;
2885
2886        spin_lock(&root->cache_lock);
2887        if (!btrfs_fs_closing(root->fs_info))
2888                root->cache_inode = igrab(inode);
2889        spin_unlock(&root->cache_lock);
2890
2891        return inode;
2892}
2893
2894int create_free_ino_inode(struct btrfs_root *root,
2895                          struct btrfs_trans_handle *trans,
2896                          struct btrfs_path *path)
2897{
2898        return __create_free_space_inode(root, trans, path,
2899                                         BTRFS_FREE_INO_OBJECTID, 0);
2900}
2901
2902int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2903{
2904        struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2905        struct btrfs_path *path;
2906        struct inode *inode;
2907        int ret = 0;
2908        u64 root_gen = btrfs_root_generation(&root->root_item);
2909
2910        if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2911                return 0;
2912
2913        /*
2914         * If we're unmounting then just return, since this does a search on the
2915         * normal root and not the commit root and we could deadlock.
2916         */
2917        if (btrfs_fs_closing(fs_info))
2918                return 0;
2919
2920        path = btrfs_alloc_path();
2921        if (!path)
2922                return 0;
2923
2924        inode = lookup_free_ino_inode(root, path);
2925        if (IS_ERR(inode))
2926                goto out;
2927
2928        if (root_gen != BTRFS_I(inode)->generation)
2929                goto out_put;
2930
2931        ret = __load_free_space_cache(root, inode, ctl, path, 0);
2932
2933        if (ret < 0)
2934                printk(KERN_ERR "btrfs: failed to load free ino cache for "
2935                       "root %llu\n", root->root_key.objectid);
2936out_put:
2937        iput(inode);
2938out:
2939        btrfs_free_path(path);
2940        return ret;
2941}
2942
2943int btrfs_write_out_ino_cache(struct btrfs_root *root,
2944                              struct btrfs_trans_handle *trans,
2945                              struct btrfs_path *path)
2946{
2947        struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2948        struct inode *inode;
2949        int ret;
2950
2951        if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2952                return 0;
2953
2954        inode = lookup_free_ino_inode(root, path);
2955        if (IS_ERR(inode))
2956                return 0;
2957
2958        ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
2959        if (ret) {
2960                btrfs_delalloc_release_metadata(inode, inode->i_size);
2961#ifdef DEBUG
2962                printk(KERN_ERR "btrfs: failed to write free ino cache "
2963                       "for root %llu\n", root->root_key.objectid);
2964#endif
2965        }
2966
2967        iput(inode);
2968        return ret;
2969}
2970