linux/fs/btrfs/extent_map.c
<<
>>
Prefs
   1#include <linux/err.h>
   2#include <linux/slab.h>
   3#include <linux/module.h>
   4#include <linux/spinlock.h>
   5#include <linux/hardirq.h>
   6#include "ctree.h"
   7#include "extent_map.h"
   8
   9
  10static struct kmem_cache *extent_map_cache;
  11
  12int __init extent_map_init(void)
  13{
  14        extent_map_cache = kmem_cache_create("extent_map",
  15                        sizeof(struct extent_map), 0,
  16                        SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
  17        if (!extent_map_cache)
  18                return -ENOMEM;
  19        return 0;
  20}
  21
  22void extent_map_exit(void)
  23{
  24        if (extent_map_cache)
  25                kmem_cache_destroy(extent_map_cache);
  26}
  27
  28/**
  29 * extent_map_tree_init - initialize extent map tree
  30 * @tree:               tree to initialize
  31 * @mask:               flags for memory allocations during tree operations
  32 *
  33 * Initialize the extent tree @tree.  Should be called for each new inode
  34 * or other user of the extent_map interface.
  35 */
  36void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
  37{
  38        tree->map = RB_ROOT;
  39        rwlock_init(&tree->lock);
  40}
  41
  42/**
  43 * alloc_extent_map - allocate new extent map structure
  44 * @mask:       memory allocation flags
  45 *
  46 * Allocate a new extent_map structure.  The new structure is
  47 * returned with a reference count of one and needs to be
  48 * freed using free_extent_map()
  49 */
  50struct extent_map *alloc_extent_map(gfp_t mask)
  51{
  52        struct extent_map *em;
  53        em = kmem_cache_alloc(extent_map_cache, mask);
  54        if (!em)
  55                return NULL;
  56        em->in_tree = 0;
  57        em->flags = 0;
  58        em->compress_type = BTRFS_COMPRESS_NONE;
  59        atomic_set(&em->refs, 1);
  60        return em;
  61}
  62
  63/**
  64 * free_extent_map - drop reference count of an extent_map
  65 * @em:         extent map beeing releasead
  66 *
  67 * Drops the reference out on @em by one and free the structure
  68 * if the reference count hits zero.
  69 */
  70void free_extent_map(struct extent_map *em)
  71{
  72        if (!em)
  73                return;
  74        WARN_ON(atomic_read(&em->refs) == 0);
  75        if (atomic_dec_and_test(&em->refs)) {
  76                WARN_ON(em->in_tree);
  77                kmem_cache_free(extent_map_cache, em);
  78        }
  79}
  80
  81static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
  82                                   struct rb_node *node)
  83{
  84        struct rb_node **p = &root->rb_node;
  85        struct rb_node *parent = NULL;
  86        struct extent_map *entry;
  87
  88        while (*p) {
  89                parent = *p;
  90                entry = rb_entry(parent, struct extent_map, rb_node);
  91
  92                WARN_ON(!entry->in_tree);
  93
  94                if (offset < entry->start)
  95                        p = &(*p)->rb_left;
  96                else if (offset >= extent_map_end(entry))
  97                        p = &(*p)->rb_right;
  98                else
  99                        return parent;
 100        }
 101
 102        entry = rb_entry(node, struct extent_map, rb_node);
 103        entry->in_tree = 1;
 104        rb_link_node(node, parent, p);
 105        rb_insert_color(node, root);
 106        return NULL;
 107}
 108
 109/*
 110 * search through the tree for an extent_map with a given offset.  If
 111 * it can't be found, try to find some neighboring extents
 112 */
 113static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
 114                                     struct rb_node **prev_ret,
 115                                     struct rb_node **next_ret)
 116{
 117        struct rb_node *n = root->rb_node;
 118        struct rb_node *prev = NULL;
 119        struct rb_node *orig_prev = NULL;
 120        struct extent_map *entry;
 121        struct extent_map *prev_entry = NULL;
 122
 123        while (n) {
 124                entry = rb_entry(n, struct extent_map, rb_node);
 125                prev = n;
 126                prev_entry = entry;
 127
 128                WARN_ON(!entry->in_tree);
 129
 130                if (offset < entry->start)
 131                        n = n->rb_left;
 132                else if (offset >= extent_map_end(entry))
 133                        n = n->rb_right;
 134                else
 135                        return n;
 136        }
 137
 138        if (prev_ret) {
 139                orig_prev = prev;
 140                while (prev && offset >= extent_map_end(prev_entry)) {
 141                        prev = rb_next(prev);
 142                        prev_entry = rb_entry(prev, struct extent_map, rb_node);
 143                }
 144                *prev_ret = prev;
 145                prev = orig_prev;
 146        }
 147
 148        if (next_ret) {
 149                prev_entry = rb_entry(prev, struct extent_map, rb_node);
 150                while (prev && offset < prev_entry->start) {
 151                        prev = rb_prev(prev);
 152                        prev_entry = rb_entry(prev, struct extent_map, rb_node);
 153                }
 154                *next_ret = prev;
 155        }
 156        return NULL;
 157}
 158
 159/* check to see if two extent_map structs are adjacent and safe to merge */
 160static int mergable_maps(struct extent_map *prev, struct extent_map *next)
 161{
 162        if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
 163                return 0;
 164
 165        /*
 166         * don't merge compressed extents, we need to know their
 167         * actual size
 168         */
 169        if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
 170                return 0;
 171
 172        if (extent_map_end(prev) == next->start &&
 173            prev->flags == next->flags &&
 174            prev->bdev == next->bdev &&
 175            ((next->block_start == EXTENT_MAP_HOLE &&
 176              prev->block_start == EXTENT_MAP_HOLE) ||
 177             (next->block_start == EXTENT_MAP_INLINE &&
 178              prev->block_start == EXTENT_MAP_INLINE) ||
 179             (next->block_start == EXTENT_MAP_DELALLOC &&
 180              prev->block_start == EXTENT_MAP_DELALLOC) ||
 181             (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
 182              next->block_start == extent_map_block_end(prev)))) {
 183                return 1;
 184        }
 185        return 0;
 186}
 187
 188int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
 189{
 190        int ret = 0;
 191        struct extent_map *merge = NULL;
 192        struct rb_node *rb;
 193        struct extent_map *em;
 194
 195        write_lock(&tree->lock);
 196        em = lookup_extent_mapping(tree, start, len);
 197
 198        WARN_ON(!em || em->start != start);
 199
 200        if (!em)
 201                goto out;
 202
 203        clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 204
 205        if (em->start != 0) {
 206                rb = rb_prev(&em->rb_node);
 207                if (rb)
 208                        merge = rb_entry(rb, struct extent_map, rb_node);
 209                if (rb && mergable_maps(merge, em)) {
 210                        em->start = merge->start;
 211                        em->len += merge->len;
 212                        em->block_len += merge->block_len;
 213                        em->block_start = merge->block_start;
 214                        merge->in_tree = 0;
 215                        rb_erase(&merge->rb_node, &tree->map);
 216                        free_extent_map(merge);
 217                }
 218        }
 219
 220        rb = rb_next(&em->rb_node);
 221        if (rb)
 222                merge = rb_entry(rb, struct extent_map, rb_node);
 223        if (rb && mergable_maps(em, merge)) {
 224                em->len += merge->len;
 225                em->block_len += merge->len;
 226                rb_erase(&merge->rb_node, &tree->map);
 227                merge->in_tree = 0;
 228                free_extent_map(merge);
 229        }
 230
 231        free_extent_map(em);
 232out:
 233        write_unlock(&tree->lock);
 234        return ret;
 235
 236}
 237
 238/**
 239 * add_extent_mapping - add new extent map to the extent tree
 240 * @tree:       tree to insert new map in
 241 * @em:         map to insert
 242 *
 243 * Insert @em into @tree or perform a simple forward/backward merge with
 244 * existing mappings.  The extent_map struct passed in will be inserted
 245 * into the tree directly, with an additional reference taken, or a
 246 * reference dropped if the merge attempt was successfull.
 247 */
 248int add_extent_mapping(struct extent_map_tree *tree,
 249                       struct extent_map *em)
 250{
 251        int ret = 0;
 252        struct extent_map *merge = NULL;
 253        struct rb_node *rb;
 254        struct extent_map *exist;
 255
 256        exist = lookup_extent_mapping(tree, em->start, em->len);
 257        if (exist) {
 258                free_extent_map(exist);
 259                ret = -EEXIST;
 260                goto out;
 261        }
 262        rb = tree_insert(&tree->map, em->start, &em->rb_node);
 263        if (rb) {
 264                ret = -EEXIST;
 265                goto out;
 266        }
 267        atomic_inc(&em->refs);
 268        if (em->start != 0) {
 269                rb = rb_prev(&em->rb_node);
 270                if (rb)
 271                        merge = rb_entry(rb, struct extent_map, rb_node);
 272                if (rb && mergable_maps(merge, em)) {
 273                        em->start = merge->start;
 274                        em->len += merge->len;
 275                        em->block_len += merge->block_len;
 276                        em->block_start = merge->block_start;
 277                        merge->in_tree = 0;
 278                        rb_erase(&merge->rb_node, &tree->map);
 279                        free_extent_map(merge);
 280                }
 281         }
 282        rb = rb_next(&em->rb_node);
 283        if (rb)
 284                merge = rb_entry(rb, struct extent_map, rb_node);
 285        if (rb && mergable_maps(em, merge)) {
 286                em->len += merge->len;
 287                em->block_len += merge->len;
 288                rb_erase(&merge->rb_node, &tree->map);
 289                merge->in_tree = 0;
 290                free_extent_map(merge);
 291        }
 292out:
 293        return ret;
 294}
 295
 296/* simple helper to do math around the end of an extent, handling wrap */
 297static u64 range_end(u64 start, u64 len)
 298{
 299        if (start + len < start)
 300                return (u64)-1;
 301        return start + len;
 302}
 303
 304/**
 305 * lookup_extent_mapping - lookup extent_map
 306 * @tree:       tree to lookup in
 307 * @start:      byte offset to start the search
 308 * @len:        length of the lookup range
 309 *
 310 * Find and return the first extent_map struct in @tree that intersects the
 311 * [start, len] range.  There may be additional objects in the tree that
 312 * intersect, so check the object returned carefully to make sure that no
 313 * additional lookups are needed.
 314 */
 315struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
 316                                         u64 start, u64 len)
 317{
 318        struct extent_map *em;
 319        struct rb_node *rb_node;
 320        struct rb_node *prev = NULL;
 321        struct rb_node *next = NULL;
 322        u64 end = range_end(start, len);
 323
 324        rb_node = __tree_search(&tree->map, start, &prev, &next);
 325        if (!rb_node && prev) {
 326                em = rb_entry(prev, struct extent_map, rb_node);
 327                if (end > em->start && start < extent_map_end(em))
 328                        goto found;
 329        }
 330        if (!rb_node && next) {
 331                em = rb_entry(next, struct extent_map, rb_node);
 332                if (end > em->start && start < extent_map_end(em))
 333                        goto found;
 334        }
 335        if (!rb_node) {
 336                em = NULL;
 337                goto out;
 338        }
 339        if (IS_ERR(rb_node)) {
 340                em = ERR_CAST(rb_node);
 341                goto out;
 342        }
 343        em = rb_entry(rb_node, struct extent_map, rb_node);
 344        if (end > em->start && start < extent_map_end(em))
 345                goto found;
 346
 347        em = NULL;
 348        goto out;
 349
 350found:
 351        atomic_inc(&em->refs);
 352out:
 353        return em;
 354}
 355
 356/**
 357 * search_extent_mapping - find a nearby extent map
 358 * @tree:       tree to lookup in
 359 * @start:      byte offset to start the search
 360 * @len:        length of the lookup range
 361 *
 362 * Find and return the first extent_map struct in @tree that intersects the
 363 * [start, len] range.
 364 *
 365 * If one can't be found, any nearby extent may be returned
 366 */
 367struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
 368                                         u64 start, u64 len)
 369{
 370        struct extent_map *em;
 371        struct rb_node *rb_node;
 372        struct rb_node *prev = NULL;
 373        struct rb_node *next = NULL;
 374
 375        rb_node = __tree_search(&tree->map, start, &prev, &next);
 376        if (!rb_node && prev) {
 377                em = rb_entry(prev, struct extent_map, rb_node);
 378                goto found;
 379        }
 380        if (!rb_node && next) {
 381                em = rb_entry(next, struct extent_map, rb_node);
 382                goto found;
 383        }
 384        if (!rb_node) {
 385                em = NULL;
 386                goto out;
 387        }
 388        if (IS_ERR(rb_node)) {
 389                em = ERR_CAST(rb_node);
 390                goto out;
 391        }
 392        em = rb_entry(rb_node, struct extent_map, rb_node);
 393        goto found;
 394
 395        em = NULL;
 396        goto out;
 397
 398found:
 399        atomic_inc(&em->refs);
 400out:
 401        return em;
 402}
 403
 404/**
 405 * remove_extent_mapping - removes an extent_map from the extent tree
 406 * @tree:       extent tree to remove from
 407 * @em:         extent map beeing removed
 408 *
 409 * Removes @em from @tree.  No reference counts are dropped, and no checks
 410 * are done to see if the range is in use
 411 */
 412int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
 413{
 414        int ret = 0;
 415
 416        WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
 417        rb_erase(&em->rb_node, &tree->map);
 418        em->in_tree = 0;
 419        return ret;
 420}
 421