linux/fs/btrfs/delayed-ref.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2009 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/sched.h>
  20#include <linux/slab.h>
  21#include <linux/sort.h>
  22#include "ctree.h"
  23#include "delayed-ref.h"
  24#include "transaction.h"
  25#include "qgroup.h"
  26
  27struct kmem_cache *btrfs_delayed_ref_head_cachep;
  28struct kmem_cache *btrfs_delayed_tree_ref_cachep;
  29struct kmem_cache *btrfs_delayed_data_ref_cachep;
  30struct kmem_cache *btrfs_delayed_extent_op_cachep;
  31/*
  32 * delayed back reference update tracking.  For subvolume trees
  33 * we queue up extent allocations and backref maintenance for
  34 * delayed processing.   This avoids deep call chains where we
  35 * add extents in the middle of btrfs_search_slot, and it allows
  36 * us to buffer up frequently modified backrefs in an rb tree instead
  37 * of hammering updates on the extent allocation tree.
  38 */
  39
  40/*
  41 * compare two delayed tree backrefs with same bytenr and type
  42 */
  43static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
  44                          struct btrfs_delayed_tree_ref *ref1, int type)
  45{
  46        if (type == BTRFS_TREE_BLOCK_REF_KEY) {
  47                if (ref1->root < ref2->root)
  48                        return -1;
  49                if (ref1->root > ref2->root)
  50                        return 1;
  51        } else {
  52                if (ref1->parent < ref2->parent)
  53                        return -1;
  54                if (ref1->parent > ref2->parent)
  55                        return 1;
  56        }
  57        return 0;
  58}
  59
  60/*
  61 * compare two delayed data backrefs with same bytenr and type
  62 */
  63static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
  64                          struct btrfs_delayed_data_ref *ref1)
  65{
  66        if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
  67                if (ref1->root < ref2->root)
  68                        return -1;
  69                if (ref1->root > ref2->root)
  70                        return 1;
  71                if (ref1->objectid < ref2->objectid)
  72                        return -1;
  73                if (ref1->objectid > ref2->objectid)
  74                        return 1;
  75                if (ref1->offset < ref2->offset)
  76                        return -1;
  77                if (ref1->offset > ref2->offset)
  78                        return 1;
  79        } else {
  80                if (ref1->parent < ref2->parent)
  81                        return -1;
  82                if (ref1->parent > ref2->parent)
  83                        return 1;
  84        }
  85        return 0;
  86}
  87
  88/* insert a new ref to head ref rbtree */
  89static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
  90                                                   struct rb_node *node)
  91{
  92        struct rb_node **p = &root->rb_node;
  93        struct rb_node *parent_node = NULL;
  94        struct btrfs_delayed_ref_head *entry;
  95        struct btrfs_delayed_ref_head *ins;
  96        u64 bytenr;
  97
  98        ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
  99        bytenr = ins->node.bytenr;
 100        while (*p) {
 101                parent_node = *p;
 102                entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
 103                                 href_node);
 104
 105                if (bytenr < entry->node.bytenr)
 106                        p = &(*p)->rb_left;
 107                else if (bytenr > entry->node.bytenr)
 108                        p = &(*p)->rb_right;
 109                else
 110                        return entry;
 111        }
 112
 113        rb_link_node(node, parent_node, p);
 114        rb_insert_color(node, root);
 115        return NULL;
 116}
 117
 118/*
 119 * find an head entry based on bytenr. This returns the delayed ref
 120 * head if it was able to find one, or NULL if nothing was in that spot.
 121 * If return_bigger is given, the next bigger entry is returned if no exact
 122 * match is found.
 123 */
 124static struct btrfs_delayed_ref_head *
 125find_ref_head(struct rb_root *root, u64 bytenr,
 126              int return_bigger)
 127{
 128        struct rb_node *n;
 129        struct btrfs_delayed_ref_head *entry;
 130
 131        n = root->rb_node;
 132        entry = NULL;
 133        while (n) {
 134                entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
 135
 136                if (bytenr < entry->node.bytenr)
 137                        n = n->rb_left;
 138                else if (bytenr > entry->node.bytenr)
 139                        n = n->rb_right;
 140                else
 141                        return entry;
 142        }
 143        if (entry && return_bigger) {
 144                if (bytenr > entry->node.bytenr) {
 145                        n = rb_next(&entry->href_node);
 146                        if (!n)
 147                                n = rb_first(root);
 148                        entry = rb_entry(n, struct btrfs_delayed_ref_head,
 149                                         href_node);
 150                        return entry;
 151                }
 152                return entry;
 153        }
 154        return NULL;
 155}
 156
 157int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
 158                           struct btrfs_delayed_ref_head *head)
 159{
 160        struct btrfs_delayed_ref_root *delayed_refs;
 161
 162        delayed_refs = &trans->transaction->delayed_refs;
 163        assert_spin_locked(&delayed_refs->lock);
 164        if (mutex_trylock(&head->mutex))
 165                return 0;
 166
 167        atomic_inc(&head->node.refs);
 168        spin_unlock(&delayed_refs->lock);
 169
 170        mutex_lock(&head->mutex);
 171        spin_lock(&delayed_refs->lock);
 172        if (!head->node.in_tree) {
 173                mutex_unlock(&head->mutex);
 174                btrfs_put_delayed_ref(&head->node);
 175                return -EAGAIN;
 176        }
 177        btrfs_put_delayed_ref(&head->node);
 178        return 0;
 179}
 180
 181static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
 182                                    struct btrfs_delayed_ref_root *delayed_refs,
 183                                    struct btrfs_delayed_ref_head *head,
 184                                    struct btrfs_delayed_ref_node *ref)
 185{
 186        if (btrfs_delayed_ref_is_head(ref)) {
 187                head = btrfs_delayed_node_to_head(ref);
 188                rb_erase(&head->href_node, &delayed_refs->href_root);
 189        } else {
 190                assert_spin_locked(&head->lock);
 191                list_del(&ref->list);
 192        }
 193        ref->in_tree = 0;
 194        btrfs_put_delayed_ref(ref);
 195        atomic_dec(&delayed_refs->num_entries);
 196        if (trans->delayed_ref_updates)
 197                trans->delayed_ref_updates--;
 198}
 199
 200static bool merge_ref(struct btrfs_trans_handle *trans,
 201                      struct btrfs_delayed_ref_root *delayed_refs,
 202                      struct btrfs_delayed_ref_head *head,
 203                      struct btrfs_delayed_ref_node *ref,
 204                      u64 seq)
 205{
 206        struct btrfs_delayed_ref_node *next;
 207        bool done = false;
 208
 209        next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
 210                                list);
 211        while (!done && &next->list != &head->ref_list) {
 212                int mod;
 213                struct btrfs_delayed_ref_node *next2;
 214
 215                next2 = list_next_entry(next, list);
 216
 217                if (next == ref)
 218                        goto next;
 219
 220                if (seq && next->seq >= seq)
 221                        goto next;
 222
 223                if (next->type != ref->type)
 224                        goto next;
 225
 226                if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
 227                     ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
 228                    comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
 229                                   btrfs_delayed_node_to_tree_ref(next),
 230                                   ref->type))
 231                        goto next;
 232                if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
 233                     ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
 234                    comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
 235                                   btrfs_delayed_node_to_data_ref(next)))
 236                        goto next;
 237
 238                if (ref->action == next->action) {
 239                        mod = next->ref_mod;
 240                } else {
 241                        if (ref->ref_mod < next->ref_mod) {
 242                                swap(ref, next);
 243                                done = true;
 244                        }
 245                        mod = -next->ref_mod;
 246                }
 247
 248                drop_delayed_ref(trans, delayed_refs, head, next);
 249                ref->ref_mod += mod;
 250                if (ref->ref_mod == 0) {
 251                        drop_delayed_ref(trans, delayed_refs, head, ref);
 252                        done = true;
 253                } else {
 254                        /*
 255                         * Can't have multiples of the same ref on a tree block.
 256                         */
 257                        WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
 258                                ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
 259                }
 260next:
 261                next = next2;
 262        }
 263
 264        return done;
 265}
 266
 267void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
 268                              struct btrfs_fs_info *fs_info,
 269                              struct btrfs_delayed_ref_root *delayed_refs,
 270                              struct btrfs_delayed_ref_head *head)
 271{
 272        struct btrfs_delayed_ref_node *ref;
 273        u64 seq = 0;
 274
 275        assert_spin_locked(&head->lock);
 276
 277        if (list_empty(&head->ref_list))
 278                return;
 279
 280        /* We don't have too many refs to merge for data. */
 281        if (head->is_data)
 282                return;
 283
 284        spin_lock(&fs_info->tree_mod_seq_lock);
 285        if (!list_empty(&fs_info->tree_mod_seq_list)) {
 286                struct seq_list *elem;
 287
 288                elem = list_first_entry(&fs_info->tree_mod_seq_list,
 289                                        struct seq_list, list);
 290                seq = elem->seq;
 291        }
 292        spin_unlock(&fs_info->tree_mod_seq_lock);
 293
 294        ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
 295                               list);
 296        while (&ref->list != &head->ref_list) {
 297                if (seq && ref->seq >= seq)
 298                        goto next;
 299
 300                if (merge_ref(trans, delayed_refs, head, ref, seq)) {
 301                        if (list_empty(&head->ref_list))
 302                                break;
 303                        ref = list_first_entry(&head->ref_list,
 304                                               struct btrfs_delayed_ref_node,
 305                                               list);
 306                        continue;
 307                }
 308next:
 309                ref = list_next_entry(ref, list);
 310        }
 311}
 312
 313int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
 314                            struct btrfs_delayed_ref_root *delayed_refs,
 315                            u64 seq)
 316{
 317        struct seq_list *elem;
 318        int ret = 0;
 319
 320        spin_lock(&fs_info->tree_mod_seq_lock);
 321        if (!list_empty(&fs_info->tree_mod_seq_list)) {
 322                elem = list_first_entry(&fs_info->tree_mod_seq_list,
 323                                        struct seq_list, list);
 324                if (seq >= elem->seq) {
 325                        pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
 326                                 (u32)(seq >> 32), (u32)seq,
 327                                 (u32)(elem->seq >> 32), (u32)elem->seq,
 328                                 delayed_refs);
 329                        ret = 1;
 330                }
 331        }
 332
 333        spin_unlock(&fs_info->tree_mod_seq_lock);
 334        return ret;
 335}
 336
 337struct btrfs_delayed_ref_head *
 338btrfs_select_ref_head(struct btrfs_trans_handle *trans)
 339{
 340        struct btrfs_delayed_ref_root *delayed_refs;
 341        struct btrfs_delayed_ref_head *head;
 342        u64 start;
 343        bool loop = false;
 344
 345        delayed_refs = &trans->transaction->delayed_refs;
 346
 347again:
 348        start = delayed_refs->run_delayed_start;
 349        head = find_ref_head(&delayed_refs->href_root, start, 1);
 350        if (!head && !loop) {
 351                delayed_refs->run_delayed_start = 0;
 352                start = 0;
 353                loop = true;
 354                head = find_ref_head(&delayed_refs->href_root, start, 1);
 355                if (!head)
 356                        return NULL;
 357        } else if (!head && loop) {
 358                return NULL;
 359        }
 360
 361        while (head->processing) {
 362                struct rb_node *node;
 363
 364                node = rb_next(&head->href_node);
 365                if (!node) {
 366                        if (loop)
 367                                return NULL;
 368                        delayed_refs->run_delayed_start = 0;
 369                        start = 0;
 370                        loop = true;
 371                        goto again;
 372                }
 373                head = rb_entry(node, struct btrfs_delayed_ref_head,
 374                                href_node);
 375        }
 376
 377        head->processing = 1;
 378        WARN_ON(delayed_refs->num_heads_ready == 0);
 379        delayed_refs->num_heads_ready--;
 380        delayed_refs->run_delayed_start = head->node.bytenr +
 381                head->node.num_bytes;
 382        return head;
 383}
 384
 385/*
 386 * Helper to insert the ref_node to the tail or merge with tail.
 387 *
 388 * Return 0 for insert.
 389 * Return >0 for merge.
 390 */
 391static int
 392add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
 393                           struct btrfs_delayed_ref_root *root,
 394                           struct btrfs_delayed_ref_head *href,
 395                           struct btrfs_delayed_ref_node *ref)
 396{
 397        struct btrfs_delayed_ref_node *exist;
 398        int mod;
 399        int ret = 0;
 400
 401        spin_lock(&href->lock);
 402        /* Check whether we can merge the tail node with ref */
 403        if (list_empty(&href->ref_list))
 404                goto add_tail;
 405        exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
 406                           list);
 407        /* No need to compare bytenr nor is_head */
 408        if (exist->type != ref->type || exist->seq != ref->seq)
 409                goto add_tail;
 410
 411        if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
 412             exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
 413            comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
 414                           btrfs_delayed_node_to_tree_ref(ref),
 415                           ref->type))
 416                goto add_tail;
 417        if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
 418             exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
 419            comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
 420                           btrfs_delayed_node_to_data_ref(ref)))
 421                goto add_tail;
 422
 423        /* Now we are sure we can merge */
 424        ret = 1;
 425        if (exist->action == ref->action) {
 426                mod = ref->ref_mod;
 427        } else {
 428                /* Need to change action */
 429                if (exist->ref_mod < ref->ref_mod) {
 430                        exist->action = ref->action;
 431                        mod = -exist->ref_mod;
 432                        exist->ref_mod = ref->ref_mod;
 433                } else
 434                        mod = -ref->ref_mod;
 435        }
 436        exist->ref_mod += mod;
 437
 438        /* remove existing tail if its ref_mod is zero */
 439        if (exist->ref_mod == 0)
 440                drop_delayed_ref(trans, root, href, exist);
 441        spin_unlock(&href->lock);
 442        return ret;
 443
 444add_tail:
 445        list_add_tail(&ref->list, &href->ref_list);
 446        atomic_inc(&root->num_entries);
 447        trans->delayed_ref_updates++;
 448        spin_unlock(&href->lock);
 449        return ret;
 450}
 451
 452/*
 453 * helper function to update the accounting in the head ref
 454 * existing and update must have the same bytenr
 455 */
 456static noinline void
 457update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
 458                         struct btrfs_delayed_ref_node *existing,
 459                         struct btrfs_delayed_ref_node *update)
 460{
 461        struct btrfs_delayed_ref_head *existing_ref;
 462        struct btrfs_delayed_ref_head *ref;
 463        int old_ref_mod;
 464
 465        existing_ref = btrfs_delayed_node_to_head(existing);
 466        ref = btrfs_delayed_node_to_head(update);
 467        BUG_ON(existing_ref->is_data != ref->is_data);
 468
 469        spin_lock(&existing_ref->lock);
 470        if (ref->must_insert_reserved) {
 471                /* if the extent was freed and then
 472                 * reallocated before the delayed ref
 473                 * entries were processed, we can end up
 474                 * with an existing head ref without
 475                 * the must_insert_reserved flag set.
 476                 * Set it again here
 477                 */
 478                existing_ref->must_insert_reserved = ref->must_insert_reserved;
 479
 480                /*
 481                 * update the num_bytes so we make sure the accounting
 482                 * is done correctly
 483                 */
 484                existing->num_bytes = update->num_bytes;
 485
 486        }
 487
 488        if (ref->extent_op) {
 489                if (!existing_ref->extent_op) {
 490                        existing_ref->extent_op = ref->extent_op;
 491                } else {
 492                        if (ref->extent_op->update_key) {
 493                                memcpy(&existing_ref->extent_op->key,
 494                                       &ref->extent_op->key,
 495                                       sizeof(ref->extent_op->key));
 496                                existing_ref->extent_op->update_key = 1;
 497                        }
 498                        if (ref->extent_op->update_flags) {
 499                                existing_ref->extent_op->flags_to_set |=
 500                                        ref->extent_op->flags_to_set;
 501                                existing_ref->extent_op->update_flags = 1;
 502                        }
 503                        btrfs_free_delayed_extent_op(ref->extent_op);
 504                }
 505        }
 506        /*
 507         * update the reference mod on the head to reflect this new operation,
 508         * only need the lock for this case cause we could be processing it
 509         * currently, for refs we just added we know we're a-ok.
 510         */
 511        old_ref_mod = existing_ref->total_ref_mod;
 512        existing->ref_mod += update->ref_mod;
 513        existing_ref->total_ref_mod += update->ref_mod;
 514
 515        /*
 516         * If we are going to from a positive ref mod to a negative or vice
 517         * versa we need to make sure to adjust pending_csums accordingly.
 518         */
 519        if (existing_ref->is_data) {
 520                if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
 521                        delayed_refs->pending_csums -= existing->num_bytes;
 522                if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
 523                        delayed_refs->pending_csums += existing->num_bytes;
 524        }
 525        spin_unlock(&existing_ref->lock);
 526}
 527
 528/*
 529 * helper function to actually insert a head node into the rbtree.
 530 * this does all the dirty work in terms of maintaining the correct
 531 * overall modification count.
 532 */
 533static noinline struct btrfs_delayed_ref_head *
 534add_delayed_ref_head(struct btrfs_fs_info *fs_info,
 535                     struct btrfs_trans_handle *trans,
 536                     struct btrfs_delayed_ref_node *ref,
 537                     struct btrfs_qgroup_extent_record *qrecord,
 538                     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
 539                     int action, int is_data)
 540{
 541        struct btrfs_delayed_ref_head *existing;
 542        struct btrfs_delayed_ref_head *head_ref = NULL;
 543        struct btrfs_delayed_ref_root *delayed_refs;
 544        struct btrfs_qgroup_extent_record *qexisting;
 545        int count_mod = 1;
 546        int must_insert_reserved = 0;
 547
 548        /* If reserved is provided, it must be a data extent. */
 549        BUG_ON(!is_data && reserved);
 550
 551        /*
 552         * the head node stores the sum of all the mods, so dropping a ref
 553         * should drop the sum in the head node by one.
 554         */
 555        if (action == BTRFS_UPDATE_DELAYED_HEAD)
 556                count_mod = 0;
 557        else if (action == BTRFS_DROP_DELAYED_REF)
 558                count_mod = -1;
 559
 560        /*
 561         * BTRFS_ADD_DELAYED_EXTENT means that we need to update
 562         * the reserved accounting when the extent is finally added, or
 563         * if a later modification deletes the delayed ref without ever
 564         * inserting the extent into the extent allocation tree.
 565         * ref->must_insert_reserved is the flag used to record
 566         * that accounting mods are required.
 567         *
 568         * Once we record must_insert_reserved, switch the action to
 569         * BTRFS_ADD_DELAYED_REF because other special casing is not required.
 570         */
 571        if (action == BTRFS_ADD_DELAYED_EXTENT)
 572                must_insert_reserved = 1;
 573        else
 574                must_insert_reserved = 0;
 575
 576        delayed_refs = &trans->transaction->delayed_refs;
 577
 578        /* first set the basic ref node struct up */
 579        atomic_set(&ref->refs, 1);
 580        ref->bytenr = bytenr;
 581        ref->num_bytes = num_bytes;
 582        ref->ref_mod = count_mod;
 583        ref->type  = 0;
 584        ref->action  = 0;
 585        ref->is_head = 1;
 586        ref->in_tree = 1;
 587        ref->seq = 0;
 588
 589        head_ref = btrfs_delayed_node_to_head(ref);
 590        head_ref->must_insert_reserved = must_insert_reserved;
 591        head_ref->is_data = is_data;
 592        INIT_LIST_HEAD(&head_ref->ref_list);
 593        head_ref->processing = 0;
 594        head_ref->total_ref_mod = count_mod;
 595        head_ref->qgroup_reserved = 0;
 596        head_ref->qgroup_ref_root = 0;
 597
 598        /* Record qgroup extent info if provided */
 599        if (qrecord) {
 600                if (ref_root && reserved) {
 601                        head_ref->qgroup_ref_root = ref_root;
 602                        head_ref->qgroup_reserved = reserved;
 603                }
 604
 605                qrecord->bytenr = bytenr;
 606                qrecord->num_bytes = num_bytes;
 607                qrecord->old_roots = NULL;
 608
 609                qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
 610                                                             qrecord);
 611                if (qexisting)
 612                        kfree(qrecord);
 613        }
 614
 615        spin_lock_init(&head_ref->lock);
 616        mutex_init(&head_ref->mutex);
 617
 618        trace_add_delayed_ref_head(ref, head_ref, action);
 619
 620        existing = htree_insert(&delayed_refs->href_root,
 621                                &head_ref->href_node);
 622        if (existing) {
 623                WARN_ON(ref_root && reserved && existing->qgroup_ref_root
 624                        && existing->qgroup_reserved);
 625                update_existing_head_ref(delayed_refs, &existing->node, ref);
 626                /*
 627                 * we've updated the existing ref, free the newly
 628                 * allocated ref
 629                 */
 630                kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
 631                head_ref = existing;
 632        } else {
 633                if (is_data && count_mod < 0)
 634                        delayed_refs->pending_csums += num_bytes;
 635                delayed_refs->num_heads++;
 636                delayed_refs->num_heads_ready++;
 637                atomic_inc(&delayed_refs->num_entries);
 638                trans->delayed_ref_updates++;
 639        }
 640        return head_ref;
 641}
 642
 643/*
 644 * helper to insert a delayed tree ref into the rbtree.
 645 */
 646static noinline void
 647add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
 648                     struct btrfs_trans_handle *trans,
 649                     struct btrfs_delayed_ref_head *head_ref,
 650                     struct btrfs_delayed_ref_node *ref, u64 bytenr,
 651                     u64 num_bytes, u64 parent, u64 ref_root, int level,
 652                     int action)
 653{
 654        struct btrfs_delayed_tree_ref *full_ref;
 655        struct btrfs_delayed_ref_root *delayed_refs;
 656        u64 seq = 0;
 657        int ret;
 658
 659        if (action == BTRFS_ADD_DELAYED_EXTENT)
 660                action = BTRFS_ADD_DELAYED_REF;
 661
 662        if (is_fstree(ref_root))
 663                seq = atomic64_read(&fs_info->tree_mod_seq);
 664        delayed_refs = &trans->transaction->delayed_refs;
 665
 666        /* first set the basic ref node struct up */
 667        atomic_set(&ref->refs, 1);
 668        ref->bytenr = bytenr;
 669        ref->num_bytes = num_bytes;
 670        ref->ref_mod = 1;
 671        ref->action = action;
 672        ref->is_head = 0;
 673        ref->in_tree = 1;
 674        ref->seq = seq;
 675
 676        full_ref = btrfs_delayed_node_to_tree_ref(ref);
 677        full_ref->parent = parent;
 678        full_ref->root = ref_root;
 679        if (parent)
 680                ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
 681        else
 682                ref->type = BTRFS_TREE_BLOCK_REF_KEY;
 683        full_ref->level = level;
 684
 685        trace_add_delayed_tree_ref(ref, full_ref, action);
 686
 687        ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
 688
 689        /*
 690         * XXX: memory should be freed at the same level allocated.
 691         * But bad practice is anywhere... Follow it now. Need cleanup.
 692         */
 693        if (ret > 0)
 694                kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
 695}
 696
 697/*
 698 * helper to insert a delayed data ref into the rbtree.
 699 */
 700static noinline void
 701add_delayed_data_ref(struct btrfs_fs_info *fs_info,
 702                     struct btrfs_trans_handle *trans,
 703                     struct btrfs_delayed_ref_head *head_ref,
 704                     struct btrfs_delayed_ref_node *ref, u64 bytenr,
 705                     u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
 706                     u64 offset, int action)
 707{
 708        struct btrfs_delayed_data_ref *full_ref;
 709        struct btrfs_delayed_ref_root *delayed_refs;
 710        u64 seq = 0;
 711        int ret;
 712
 713        if (action == BTRFS_ADD_DELAYED_EXTENT)
 714                action = BTRFS_ADD_DELAYED_REF;
 715
 716        delayed_refs = &trans->transaction->delayed_refs;
 717
 718        if (is_fstree(ref_root))
 719                seq = atomic64_read(&fs_info->tree_mod_seq);
 720
 721        /* first set the basic ref node struct up */
 722        atomic_set(&ref->refs, 1);
 723        ref->bytenr = bytenr;
 724        ref->num_bytes = num_bytes;
 725        ref->ref_mod = 1;
 726        ref->action = action;
 727        ref->is_head = 0;
 728        ref->in_tree = 1;
 729        ref->seq = seq;
 730
 731        full_ref = btrfs_delayed_node_to_data_ref(ref);
 732        full_ref->parent = parent;
 733        full_ref->root = ref_root;
 734        if (parent)
 735                ref->type = BTRFS_SHARED_DATA_REF_KEY;
 736        else
 737                ref->type = BTRFS_EXTENT_DATA_REF_KEY;
 738
 739        full_ref->objectid = owner;
 740        full_ref->offset = offset;
 741
 742        trace_add_delayed_data_ref(ref, full_ref, action);
 743
 744        ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
 745
 746        if (ret > 0)
 747                kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
 748}
 749
 750/*
 751 * add a delayed tree ref.  This does all of the accounting required
 752 * to make sure the delayed ref is eventually processed before this
 753 * transaction commits.
 754 */
 755int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
 756                               struct btrfs_trans_handle *trans,
 757                               u64 bytenr, u64 num_bytes, u64 parent,
 758                               u64 ref_root,  int level, int action,
 759                               struct btrfs_delayed_extent_op *extent_op)
 760{
 761        struct btrfs_delayed_tree_ref *ref;
 762        struct btrfs_delayed_ref_head *head_ref;
 763        struct btrfs_delayed_ref_root *delayed_refs;
 764        struct btrfs_qgroup_extent_record *record = NULL;
 765
 766        BUG_ON(extent_op && extent_op->is_data);
 767        ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
 768        if (!ref)
 769                return -ENOMEM;
 770
 771        head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
 772        if (!head_ref)
 773                goto free_ref;
 774
 775        if (fs_info->quota_enabled && is_fstree(ref_root)) {
 776                record = kmalloc(sizeof(*record), GFP_NOFS);
 777                if (!record)
 778                        goto free_head_ref;
 779        }
 780
 781        head_ref->extent_op = extent_op;
 782
 783        delayed_refs = &trans->transaction->delayed_refs;
 784        spin_lock(&delayed_refs->lock);
 785
 786        /*
 787         * insert both the head node and the new ref without dropping
 788         * the spin lock
 789         */
 790        head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
 791                                        bytenr, num_bytes, 0, 0, action, 0);
 792
 793        add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
 794                             num_bytes, parent, ref_root, level, action);
 795        spin_unlock(&delayed_refs->lock);
 796
 797        return 0;
 798
 799free_head_ref:
 800        kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
 801free_ref:
 802        kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 803
 804        return -ENOMEM;
 805}
 806
 807/*
 808 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
 809 */
 810int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
 811                               struct btrfs_trans_handle *trans,
 812                               u64 bytenr, u64 num_bytes,
 813                               u64 parent, u64 ref_root,
 814                               u64 owner, u64 offset, u64 reserved, int action,
 815                               struct btrfs_delayed_extent_op *extent_op)
 816{
 817        struct btrfs_delayed_data_ref *ref;
 818        struct btrfs_delayed_ref_head *head_ref;
 819        struct btrfs_delayed_ref_root *delayed_refs;
 820        struct btrfs_qgroup_extent_record *record = NULL;
 821
 822        BUG_ON(extent_op && !extent_op->is_data);
 823        ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
 824        if (!ref)
 825                return -ENOMEM;
 826
 827        head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
 828        if (!head_ref) {
 829                kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
 830                return -ENOMEM;
 831        }
 832
 833        if (fs_info->quota_enabled && is_fstree(ref_root)) {
 834                record = kmalloc(sizeof(*record), GFP_NOFS);
 835                if (!record) {
 836                        kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
 837                        kmem_cache_free(btrfs_delayed_ref_head_cachep,
 838                                        head_ref);
 839                        return -ENOMEM;
 840                }
 841        }
 842
 843        head_ref->extent_op = extent_op;
 844
 845        delayed_refs = &trans->transaction->delayed_refs;
 846        spin_lock(&delayed_refs->lock);
 847
 848        /*
 849         * insert both the head node and the new ref without dropping
 850         * the spin lock
 851         */
 852        head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
 853                                        bytenr, num_bytes, ref_root, reserved,
 854                                        action, 1);
 855
 856        add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
 857                                   num_bytes, parent, ref_root, owner, offset,
 858                                   action);
 859        spin_unlock(&delayed_refs->lock);
 860
 861        return 0;
 862}
 863
 864int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
 865                                     struct btrfs_trans_handle *trans,
 866                                     u64 ref_root, u64 bytenr, u64 num_bytes)
 867{
 868        struct btrfs_delayed_ref_root *delayed_refs;
 869        struct btrfs_delayed_ref_head *ref_head;
 870        int ret = 0;
 871
 872        if (!fs_info->quota_enabled || !is_fstree(ref_root))
 873                return 0;
 874
 875        delayed_refs = &trans->transaction->delayed_refs;
 876
 877        spin_lock(&delayed_refs->lock);
 878        ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0);
 879        if (!ref_head) {
 880                ret = -ENOENT;
 881                goto out;
 882        }
 883        WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root);
 884        ref_head->qgroup_ref_root = ref_root;
 885        ref_head->qgroup_reserved = num_bytes;
 886out:
 887        spin_unlock(&delayed_refs->lock);
 888        return ret;
 889}
 890
 891int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
 892                                struct btrfs_trans_handle *trans,
 893                                u64 bytenr, u64 num_bytes,
 894                                struct btrfs_delayed_extent_op *extent_op)
 895{
 896        struct btrfs_delayed_ref_head *head_ref;
 897        struct btrfs_delayed_ref_root *delayed_refs;
 898
 899        head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
 900        if (!head_ref)
 901                return -ENOMEM;
 902
 903        head_ref->extent_op = extent_op;
 904
 905        delayed_refs = &trans->transaction->delayed_refs;
 906        spin_lock(&delayed_refs->lock);
 907
 908        add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
 909                             num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
 910                             extent_op->is_data);
 911
 912        spin_unlock(&delayed_refs->lock);
 913        return 0;
 914}
 915
 916/*
 917 * this does a simple search for the head node for a given extent.
 918 * It must be called with the delayed ref spinlock held, and it returns
 919 * the head node if any where found, or NULL if not.
 920 */
 921struct btrfs_delayed_ref_head *
 922btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
 923{
 924        struct btrfs_delayed_ref_root *delayed_refs;
 925
 926        delayed_refs = &trans->transaction->delayed_refs;
 927        return find_ref_head(&delayed_refs->href_root, bytenr, 0);
 928}
 929
 930void btrfs_delayed_ref_exit(void)
 931{
 932        if (btrfs_delayed_ref_head_cachep)
 933                kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
 934        if (btrfs_delayed_tree_ref_cachep)
 935                kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
 936        if (btrfs_delayed_data_ref_cachep)
 937                kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
 938        if (btrfs_delayed_extent_op_cachep)
 939                kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
 940}
 941
 942int btrfs_delayed_ref_init(void)
 943{
 944        btrfs_delayed_ref_head_cachep = kmem_cache_create(
 945                                "btrfs_delayed_ref_head",
 946                                sizeof(struct btrfs_delayed_ref_head), 0,
 947                                SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
 948        if (!btrfs_delayed_ref_head_cachep)
 949                goto fail;
 950
 951        btrfs_delayed_tree_ref_cachep = kmem_cache_create(
 952                                "btrfs_delayed_tree_ref",
 953                                sizeof(struct btrfs_delayed_tree_ref), 0,
 954                                SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
 955        if (!btrfs_delayed_tree_ref_cachep)
 956                goto fail;
 957
 958        btrfs_delayed_data_ref_cachep = kmem_cache_create(
 959                                "btrfs_delayed_data_ref",
 960                                sizeof(struct btrfs_delayed_data_ref), 0,
 961                                SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
 962        if (!btrfs_delayed_data_ref_cachep)
 963                goto fail;
 964
 965        btrfs_delayed_extent_op_cachep = kmem_cache_create(
 966                                "btrfs_delayed_extent_op",
 967                                sizeof(struct btrfs_delayed_extent_op), 0,
 968                                SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
 969        if (!btrfs_delayed_extent_op_cachep)
 970                goto fail;
 971
 972        return 0;
 973fail:
 974        btrfs_delayed_ref_exit();
 975        return -ENOMEM;
 976}
 977