linux/fs/btrfs/extent_map.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/err.h>
   4#include <linux/slab.h>
   5#include <linux/spinlock.h>
   6#include "ctree.h"
   7#include "volumes.h"
   8#include "extent_map.h"
   9#include "compression.h"
  10
  11
  12static struct kmem_cache *extent_map_cache;
  13
  14int __init extent_map_init(void)
  15{
  16        extent_map_cache = kmem_cache_create("btrfs_extent_map",
  17                        sizeof(struct extent_map), 0,
  18                        SLAB_MEM_SPREAD, NULL);
  19        if (!extent_map_cache)
  20                return -ENOMEM;
  21        return 0;
  22}
  23
  24void __cold extent_map_exit(void)
  25{
  26        kmem_cache_destroy(extent_map_cache);
  27}
  28
  29/**
  30 * extent_map_tree_init - initialize extent map tree
  31 * @tree:               tree to initialize
  32 *
  33 * Initialize the extent tree @tree.  Should be called for each new inode
  34 * or other user of the extent_map interface.
  35 */
  36void extent_map_tree_init(struct extent_map_tree *tree)
  37{
  38        tree->map = RB_ROOT_CACHED;
  39        INIT_LIST_HEAD(&tree->modified_extents);
  40        rwlock_init(&tree->lock);
  41}
  42
  43/**
  44 * alloc_extent_map - allocate new extent map structure
  45 *
  46 * Allocate a new extent_map structure.  The new structure is
  47 * returned with a reference count of one and needs to be
  48 * freed using free_extent_map()
  49 */
  50struct extent_map *alloc_extent_map(void)
  51{
  52        struct extent_map *em;
  53        em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
  54        if (!em)
  55                return NULL;
  56        RB_CLEAR_NODE(&em->rb_node);
  57        em->flags = 0;
  58        em->compress_type = BTRFS_COMPRESS_NONE;
  59        em->generation = 0;
  60        refcount_set(&em->refs, 1);
  61        INIT_LIST_HEAD(&em->list);
  62        return em;
  63}
  64
  65/**
  66 * free_extent_map - drop reference count of an extent_map
  67 * @em:         extent map being released
  68 *
  69 * Drops the reference out on @em by one and free the structure
  70 * if the reference count hits zero.
  71 */
  72void free_extent_map(struct extent_map *em)
  73{
  74        if (!em)
  75                return;
  76        WARN_ON(refcount_read(&em->refs) == 0);
  77        if (refcount_dec_and_test(&em->refs)) {
  78                WARN_ON(extent_map_in_tree(em));
  79                WARN_ON(!list_empty(&em->list));
  80                if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
  81                        kfree(em->map_lookup);
  82                kmem_cache_free(extent_map_cache, em);
  83        }
  84}
  85
  86/* simple helper to do math around the end of an extent, handling wrap */
  87static u64 range_end(u64 start, u64 len)
  88{
  89        if (start + len < start)
  90                return (u64)-1;
  91        return start + len;
  92}
  93
  94static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
  95{
  96        struct rb_node **p = &root->rb_root.rb_node;
  97        struct rb_node *parent = NULL;
  98        struct extent_map *entry = NULL;
  99        struct rb_node *orig_parent = NULL;
 100        u64 end = range_end(em->start, em->len);
 101        bool leftmost = true;
 102
 103        while (*p) {
 104                parent = *p;
 105                entry = rb_entry(parent, struct extent_map, rb_node);
 106
 107                if (em->start < entry->start) {
 108                        p = &(*p)->rb_left;
 109                } else if (em->start >= extent_map_end(entry)) {
 110                        p = &(*p)->rb_right;
 111                        leftmost = false;
 112                } else {
 113                        return -EEXIST;
 114                }
 115        }
 116
 117        orig_parent = parent;
 118        while (parent && em->start >= extent_map_end(entry)) {
 119                parent = rb_next(parent);
 120                entry = rb_entry(parent, struct extent_map, rb_node);
 121        }
 122        if (parent)
 123                if (end > entry->start && em->start < extent_map_end(entry))
 124                        return -EEXIST;
 125
 126        parent = orig_parent;
 127        entry = rb_entry(parent, struct extent_map, rb_node);
 128        while (parent && em->start < entry->start) {
 129                parent = rb_prev(parent);
 130                entry = rb_entry(parent, struct extent_map, rb_node);
 131        }
 132        if (parent)
 133                if (end > entry->start && em->start < extent_map_end(entry))
 134                        return -EEXIST;
 135
 136        rb_link_node(&em->rb_node, orig_parent, p);
 137        rb_insert_color_cached(&em->rb_node, root, leftmost);
 138        return 0;
 139}
 140
 141/*
 142 * search through the tree for an extent_map with a given offset.  If
 143 * it can't be found, try to find some neighboring extents
 144 */
 145static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
 146                                     struct rb_node **prev_ret,
 147                                     struct rb_node **next_ret)
 148{
 149        struct rb_node *n = root->rb_node;
 150        struct rb_node *prev = NULL;
 151        struct rb_node *orig_prev = NULL;
 152        struct extent_map *entry;
 153        struct extent_map *prev_entry = NULL;
 154
 155        while (n) {
 156                entry = rb_entry(n, struct extent_map, rb_node);
 157                prev = n;
 158                prev_entry = entry;
 159
 160                if (offset < entry->start)
 161                        n = n->rb_left;
 162                else if (offset >= extent_map_end(entry))
 163                        n = n->rb_right;
 164                else
 165                        return n;
 166        }
 167
 168        if (prev_ret) {
 169                orig_prev = prev;
 170                while (prev && offset >= extent_map_end(prev_entry)) {
 171                        prev = rb_next(prev);
 172                        prev_entry = rb_entry(prev, struct extent_map, rb_node);
 173                }
 174                *prev_ret = prev;
 175                prev = orig_prev;
 176        }
 177
 178        if (next_ret) {
 179                prev_entry = rb_entry(prev, struct extent_map, rb_node);
 180                while (prev && offset < prev_entry->start) {
 181                        prev = rb_prev(prev);
 182                        prev_entry = rb_entry(prev, struct extent_map, rb_node);
 183                }
 184                *next_ret = prev;
 185        }
 186        return NULL;
 187}
 188
 189/* check to see if two extent_map structs are adjacent and safe to merge */
 190static int mergable_maps(struct extent_map *prev, struct extent_map *next)
 191{
 192        if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
 193                return 0;
 194
 195        /*
 196         * don't merge compressed extents, we need to know their
 197         * actual size
 198         */
 199        if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
 200                return 0;
 201
 202        if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
 203            test_bit(EXTENT_FLAG_LOGGING, &next->flags))
 204                return 0;
 205
 206        /*
 207         * We don't want to merge stuff that hasn't been written to the log yet
 208         * since it may not reflect exactly what is on disk, and that would be
 209         * bad.
 210         */
 211        if (!list_empty(&prev->list) || !list_empty(&next->list))
 212                return 0;
 213
 214        ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
 215               prev->block_start != EXTENT_MAP_DELALLOC);
 216
 217        if (prev->map_lookup || next->map_lookup)
 218                ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
 219                       test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
 220
 221        if (extent_map_end(prev) == next->start &&
 222            prev->flags == next->flags &&
 223            prev->map_lookup == next->map_lookup &&
 224            ((next->block_start == EXTENT_MAP_HOLE &&
 225              prev->block_start == EXTENT_MAP_HOLE) ||
 226             (next->block_start == EXTENT_MAP_INLINE &&
 227              prev->block_start == EXTENT_MAP_INLINE) ||
 228             (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
 229              next->block_start == extent_map_block_end(prev)))) {
 230                return 1;
 231        }
 232        return 0;
 233}
 234
 235static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
 236{
 237        struct extent_map *merge = NULL;
 238        struct rb_node *rb;
 239
 240        /*
 241         * We can't modify an extent map that is in the tree and that is being
 242         * used by another task, as it can cause that other task to see it in
 243         * inconsistent state during the merging. We always have 1 reference for
 244         * the tree and 1 for this task (which is unpinning the extent map or
 245         * clearing the logging flag), so anything > 2 means it's being used by
 246         * other tasks too.
 247         */
 248        if (refcount_read(&em->refs) > 2)
 249                return;
 250
 251        if (em->start != 0) {
 252                rb = rb_prev(&em->rb_node);
 253                if (rb)
 254                        merge = rb_entry(rb, struct extent_map, rb_node);
 255                if (rb && mergable_maps(merge, em)) {
 256                        em->start = merge->start;
 257                        em->orig_start = merge->orig_start;
 258                        em->len += merge->len;
 259                        em->block_len += merge->block_len;
 260                        em->block_start = merge->block_start;
 261                        em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
 262                        em->mod_start = merge->mod_start;
 263                        em->generation = max(em->generation, merge->generation);
 264
 265                        rb_erase_cached(&merge->rb_node, &tree->map);
 266                        RB_CLEAR_NODE(&merge->rb_node);
 267                        free_extent_map(merge);
 268                }
 269        }
 270
 271        rb = rb_next(&em->rb_node);
 272        if (rb)
 273                merge = rb_entry(rb, struct extent_map, rb_node);
 274        if (rb && mergable_maps(em, merge)) {
 275                em->len += merge->len;
 276                em->block_len += merge->block_len;
 277                rb_erase_cached(&merge->rb_node, &tree->map);
 278                RB_CLEAR_NODE(&merge->rb_node);
 279                em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
 280                em->generation = max(em->generation, merge->generation);
 281                free_extent_map(merge);
 282        }
 283}
 284
 285/**
 286 * unpin_extent_cache - unpin an extent from the cache
 287 * @tree:       tree to unpin the extent in
 288 * @start:      logical offset in the file
 289 * @len:        length of the extent
 290 * @gen:        generation that this extent has been modified in
 291 *
 292 * Called after an extent has been written to disk properly.  Set the generation
 293 * to the generation that actually added the file item to the inode so we know
 294 * we need to sync this extent when we call fsync().
 295 */
 296int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
 297                       u64 gen)
 298{
 299        int ret = 0;
 300        struct extent_map *em;
 301        bool prealloc = false;
 302
 303        write_lock(&tree->lock);
 304        em = lookup_extent_mapping(tree, start, len);
 305
 306        WARN_ON(!em || em->start != start);
 307
 308        if (!em)
 309                goto out;
 310
 311        em->generation = gen;
 312        clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 313        em->mod_start = em->start;
 314        em->mod_len = em->len;
 315
 316        if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
 317                prealloc = true;
 318                clear_bit(EXTENT_FLAG_FILLING, &em->flags);
 319        }
 320
 321        try_merge_map(tree, em);
 322
 323        if (prealloc) {
 324                em->mod_start = em->start;
 325                em->mod_len = em->len;
 326        }
 327
 328        free_extent_map(em);
 329out:
 330        write_unlock(&tree->lock);
 331        return ret;
 332
 333}
 334
 335void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
 336{
 337        clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
 338        if (extent_map_in_tree(em))
 339                try_merge_map(tree, em);
 340}
 341
 342static inline void setup_extent_mapping(struct extent_map_tree *tree,
 343                                        struct extent_map *em,
 344                                        int modified)
 345{
 346        refcount_inc(&em->refs);
 347        em->mod_start = em->start;
 348        em->mod_len = em->len;
 349
 350        if (modified)
 351                list_move(&em->list, &tree->modified_extents);
 352        else
 353                try_merge_map(tree, em);
 354}
 355
 356static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
 357{
 358        struct map_lookup *map = em->map_lookup;
 359        u64 stripe_size = em->orig_block_len;
 360        int i;
 361
 362        for (i = 0; i < map->num_stripes; i++) {
 363                struct btrfs_bio_stripe *stripe = &map->stripes[i];
 364                struct btrfs_device *device = stripe->dev;
 365
 366                set_extent_bits_nowait(&device->alloc_state, stripe->physical,
 367                                 stripe->physical + stripe_size - 1, bits);
 368        }
 369}
 370
 371static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
 372{
 373        struct map_lookup *map = em->map_lookup;
 374        u64 stripe_size = em->orig_block_len;
 375        int i;
 376
 377        for (i = 0; i < map->num_stripes; i++) {
 378                struct btrfs_bio_stripe *stripe = &map->stripes[i];
 379                struct btrfs_device *device = stripe->dev;
 380
 381                __clear_extent_bit(&device->alloc_state, stripe->physical,
 382                                   stripe->physical + stripe_size - 1, bits,
 383                                   0, 0, NULL, GFP_NOWAIT, NULL);
 384        }
 385}
 386
 387/**
 388 * add_extent_mapping - add new extent map to the extent tree
 389 * @tree:       tree to insert new map in
 390 * @em:         map to insert
 391 *
 392 * Insert @em into @tree or perform a simple forward/backward merge with
 393 * existing mappings.  The extent_map struct passed in will be inserted
 394 * into the tree directly, with an additional reference taken, or a
 395 * reference dropped if the merge attempt was successful.
 396 */
 397int add_extent_mapping(struct extent_map_tree *tree,
 398                       struct extent_map *em, int modified)
 399{
 400        int ret = 0;
 401
 402        lockdep_assert_held_write(&tree->lock);
 403
 404        ret = tree_insert(&tree->map, em);
 405        if (ret)
 406                goto out;
 407
 408        setup_extent_mapping(tree, em, modified);
 409        if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
 410                extent_map_device_set_bits(em, CHUNK_ALLOCATED);
 411                extent_map_device_clear_bits(em, CHUNK_TRIMMED);
 412        }
 413out:
 414        return ret;
 415}
 416
 417static struct extent_map *
 418__lookup_extent_mapping(struct extent_map_tree *tree,
 419                        u64 start, u64 len, int strict)
 420{
 421        struct extent_map *em;
 422        struct rb_node *rb_node;
 423        struct rb_node *prev = NULL;
 424        struct rb_node *next = NULL;
 425        u64 end = range_end(start, len);
 426
 427        rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next);
 428        if (!rb_node) {
 429                if (prev)
 430                        rb_node = prev;
 431                else if (next)
 432                        rb_node = next;
 433                else
 434                        return NULL;
 435        }
 436
 437        em = rb_entry(rb_node, struct extent_map, rb_node);
 438
 439        if (strict && !(end > em->start && start < extent_map_end(em)))
 440                return NULL;
 441
 442        refcount_inc(&em->refs);
 443        return em;
 444}
 445
 446/**
 447 * lookup_extent_mapping - lookup extent_map
 448 * @tree:       tree to lookup in
 449 * @start:      byte offset to start the search
 450 * @len:        length of the lookup range
 451 *
 452 * Find and return the first extent_map struct in @tree that intersects the
 453 * [start, len] range.  There may be additional objects in the tree that
 454 * intersect, so check the object returned carefully to make sure that no
 455 * additional lookups are needed.
 456 */
 457struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
 458                                         u64 start, u64 len)
 459{
 460        return __lookup_extent_mapping(tree, start, len, 1);
 461}
 462
 463/**
 464 * search_extent_mapping - find a nearby extent map
 465 * @tree:       tree to lookup in
 466 * @start:      byte offset to start the search
 467 * @len:        length of the lookup range
 468 *
 469 * Find and return the first extent_map struct in @tree that intersects the
 470 * [start, len] range.
 471 *
 472 * If one can't be found, any nearby extent may be returned
 473 */
 474struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
 475                                         u64 start, u64 len)
 476{
 477        return __lookup_extent_mapping(tree, start, len, 0);
 478}
 479
 480/**
 481 * remove_extent_mapping - removes an extent_map from the extent tree
 482 * @tree:       extent tree to remove from
 483 * @em:         extent map being removed
 484 *
 485 * Removes @em from @tree.  No reference counts are dropped, and no checks
 486 * are done to see if the range is in use
 487 */
 488void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
 489{
 490        WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
 491        rb_erase_cached(&em->rb_node, &tree->map);
 492        if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
 493                list_del_init(&em->list);
 494        if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
 495                extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
 496        RB_CLEAR_NODE(&em->rb_node);
 497}
 498
 499void replace_extent_mapping(struct extent_map_tree *tree,
 500                            struct extent_map *cur,
 501                            struct extent_map *new,
 502                            int modified)
 503{
 504        WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
 505        ASSERT(extent_map_in_tree(cur));
 506        if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
 507                list_del_init(&cur->list);
 508        rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
 509        RB_CLEAR_NODE(&cur->rb_node);
 510
 511        setup_extent_mapping(tree, new, modified);
 512}
 513
 514static struct extent_map *next_extent_map(struct extent_map *em)
 515{
 516        struct rb_node *next;
 517
 518        next = rb_next(&em->rb_node);
 519        if (!next)
 520                return NULL;
 521        return container_of(next, struct extent_map, rb_node);
 522}
 523
 524static struct extent_map *prev_extent_map(struct extent_map *em)
 525{
 526        struct rb_node *prev;
 527
 528        prev = rb_prev(&em->rb_node);
 529        if (!prev)
 530                return NULL;
 531        return container_of(prev, struct extent_map, rb_node);
 532}
 533
 534/*
 535 * Helper for btrfs_get_extent.  Given an existing extent in the tree,
 536 * the existing extent is the nearest extent to map_start,
 537 * and an extent that you want to insert, deal with overlap and insert
 538 * the best fitted new extent into the tree.
 539 */
 540static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
 541                                         struct extent_map *existing,
 542                                         struct extent_map *em,
 543                                         u64 map_start)
 544{
 545        struct extent_map *prev;
 546        struct extent_map *next;
 547        u64 start;
 548        u64 end;
 549        u64 start_diff;
 550
 551        BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
 552
 553        if (existing->start > map_start) {
 554                next = existing;
 555                prev = prev_extent_map(next);
 556        } else {
 557                prev = existing;
 558                next = next_extent_map(prev);
 559        }
 560
 561        start = prev ? extent_map_end(prev) : em->start;
 562        start = max_t(u64, start, em->start);
 563        end = next ? next->start : extent_map_end(em);
 564        end = min_t(u64, end, extent_map_end(em));
 565        start_diff = start - em->start;
 566        em->start = start;
 567        em->len = end - start;
 568        if (em->block_start < EXTENT_MAP_LAST_BYTE &&
 569            !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
 570                em->block_start += start_diff;
 571                em->block_len = em->len;
 572        }
 573        return add_extent_mapping(em_tree, em, 0);
 574}
 575
 576/**
 577 * btrfs_add_extent_mapping - add extent mapping into em_tree
 578 * @fs_info - used for tracepoint
 579 * @em_tree - the extent tree into which we want to insert the extent mapping
 580 * @em_in   - extent we are inserting
 581 * @start   - start of the logical range btrfs_get_extent() is requesting
 582 * @len     - length of the logical range btrfs_get_extent() is requesting
 583 *
 584 * Note that @em_in's range may be different from [start, start+len),
 585 * but they must be overlapped.
 586 *
 587 * Insert @em_in into @em_tree. In case there is an overlapping range, handle
 588 * the -EEXIST by either:
 589 * a) Returning the existing extent in @em_in if @start is within the
 590 *    existing em.
 591 * b) Merge the existing extent with @em_in passed in.
 592 *
 593 * Return 0 on success, otherwise -EEXIST.
 594 *
 595 */
 596int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
 597                             struct extent_map_tree *em_tree,
 598                             struct extent_map **em_in, u64 start, u64 len)
 599{
 600        int ret;
 601        struct extent_map *em = *em_in;
 602
 603        ret = add_extent_mapping(em_tree, em, 0);
 604        /* it is possible that someone inserted the extent into the tree
 605         * while we had the lock dropped.  It is also possible that
 606         * an overlapping map exists in the tree
 607         */
 608        if (ret == -EEXIST) {
 609                struct extent_map *existing;
 610
 611                ret = 0;
 612
 613                existing = search_extent_mapping(em_tree, start, len);
 614
 615                trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
 616
 617                /*
 618                 * existing will always be non-NULL, since there must be
 619                 * extent causing the -EEXIST.
 620                 */
 621                if (start >= existing->start &&
 622                    start < extent_map_end(existing)) {
 623                        free_extent_map(em);
 624                        *em_in = existing;
 625                        ret = 0;
 626                } else {
 627                        u64 orig_start = em->start;
 628                        u64 orig_len = em->len;
 629
 630                        /*
 631                         * The existing extent map is the one nearest to
 632                         * the [start, start + len) range which overlaps
 633                         */
 634                        ret = merge_extent_mapping(em_tree, existing,
 635                                                   em, start);
 636                        if (ret) {
 637                                free_extent_map(em);
 638                                *em_in = NULL;
 639                                WARN_ONCE(ret,
 640"unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
 641                                          ret, existing->start, existing->len,
 642                                          orig_start, orig_len);
 643                        }
 644                        free_extent_map(existing);
 645                }
 646        }
 647
 648        ASSERT(ret == 0 || ret == -EEXIST);
 649        return ret;
 650}
 651