linux/fs/btrfs/delayed-ref.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2009 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/sched.h>
  20#include <linux/slab.h>
  21#include <linux/sort.h>
  22#include "ctree.h"
  23#include "delayed-ref.h"
  24#include "transaction.h"
  25
  26/*
  27 * delayed back reference update tracking.  For subvolume trees
  28 * we queue up extent allocations and backref maintenance for
  29 * delayed processing.   This avoids deep call chains where we
  30 * add extents in the middle of btrfs_search_slot, and it allows
  31 * us to buffer up frequently modified backrefs in an rb tree instead
  32 * of hammering updates on the extent allocation tree.
  33 */
  34
  35/*
  36 * compare two delayed tree backrefs with same bytenr and type
  37 */
  38static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
  39                          struct btrfs_delayed_tree_ref *ref1)
  40{
  41        if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
  42                if (ref1->root < ref2->root)
  43                        return -1;
  44                if (ref1->root > ref2->root)
  45                        return 1;
  46        } else {
  47                if (ref1->parent < ref2->parent)
  48                        return -1;
  49                if (ref1->parent > ref2->parent)
  50                        return 1;
  51        }
  52        return 0;
  53}
  54
  55/*
  56 * compare two delayed data backrefs with same bytenr and type
  57 */
  58static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
  59                          struct btrfs_delayed_data_ref *ref1)
  60{
  61        if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
  62                if (ref1->root < ref2->root)
  63                        return -1;
  64                if (ref1->root > ref2->root)
  65                        return 1;
  66                if (ref1->objectid < ref2->objectid)
  67                        return -1;
  68                if (ref1->objectid > ref2->objectid)
  69                        return 1;
  70                if (ref1->offset < ref2->offset)
  71                        return -1;
  72                if (ref1->offset > ref2->offset)
  73                        return 1;
  74        } else {
  75                if (ref1->parent < ref2->parent)
  76                        return -1;
  77                if (ref1->parent > ref2->parent)
  78                        return 1;
  79        }
  80        return 0;
  81}
  82
  83/*
  84 * entries in the rb tree are ordered by the byte number of the extent,
  85 * type of the delayed backrefs and content of delayed backrefs.
  86 */
  87static int comp_entry(struct btrfs_delayed_ref_node *ref2,
  88                      struct btrfs_delayed_ref_node *ref1)
  89{
  90        if (ref1->bytenr < ref2->bytenr)
  91                return -1;
  92        if (ref1->bytenr > ref2->bytenr)
  93                return 1;
  94        if (ref1->is_head && ref2->is_head)
  95                return 0;
  96        if (ref2->is_head)
  97                return -1;
  98        if (ref1->is_head)
  99                return 1;
 100        if (ref1->type < ref2->type)
 101                return -1;
 102        if (ref1->type > ref2->type)
 103                return 1;
 104        if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
 105            ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
 106                return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
 107                                      btrfs_delayed_node_to_tree_ref(ref1));
 108        } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
 109                   ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
 110                return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
 111                                      btrfs_delayed_node_to_data_ref(ref1));
 112        }
 113        BUG();
 114        return 0;
 115}
 116
 117/*
 118 * insert a new ref into the rbtree.  This returns any existing refs
 119 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
 120 * inserted.
 121 */
 122static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
 123                                                  struct rb_node *node)
 124{
 125        struct rb_node **p = &root->rb_node;
 126        struct rb_node *parent_node = NULL;
 127        struct btrfs_delayed_ref_node *entry;
 128        struct btrfs_delayed_ref_node *ins;
 129        int cmp;
 130
 131        ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
 132        while (*p) {
 133                parent_node = *p;
 134                entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
 135                                 rb_node);
 136
 137                cmp = comp_entry(entry, ins);
 138                if (cmp < 0)
 139                        p = &(*p)->rb_left;
 140                else if (cmp > 0)
 141                        p = &(*p)->rb_right;
 142                else
 143                        return entry;
 144        }
 145
 146        rb_link_node(node, parent_node, p);
 147        rb_insert_color(node, root);
 148        return NULL;
 149}
 150
 151/*
 152 * find an head entry based on bytenr. This returns the delayed ref
 153 * head if it was able to find one, or NULL if nothing was in that spot
 154 */
 155static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
 156                                  u64 bytenr,
 157                                  struct btrfs_delayed_ref_node **last)
 158{
 159        struct rb_node *n = root->rb_node;
 160        struct btrfs_delayed_ref_node *entry;
 161        int cmp;
 162
 163        while (n) {
 164                entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
 165                WARN_ON(!entry->in_tree);
 166                if (last)
 167                        *last = entry;
 168
 169                if (bytenr < entry->bytenr)
 170                        cmp = -1;
 171                else if (bytenr > entry->bytenr)
 172                        cmp = 1;
 173                else if (!btrfs_delayed_ref_is_head(entry))
 174                        cmp = 1;
 175                else
 176                        cmp = 0;
 177
 178                if (cmp < 0)
 179                        n = n->rb_left;
 180                else if (cmp > 0)
 181                        n = n->rb_right;
 182                else
 183                        return entry;
 184        }
 185        return NULL;
 186}
 187
 188int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
 189                           struct btrfs_delayed_ref_head *head)
 190{
 191        struct btrfs_delayed_ref_root *delayed_refs;
 192
 193        delayed_refs = &trans->transaction->delayed_refs;
 194        assert_spin_locked(&delayed_refs->lock);
 195        if (mutex_trylock(&head->mutex))
 196                return 0;
 197
 198        atomic_inc(&head->node.refs);
 199        spin_unlock(&delayed_refs->lock);
 200
 201        mutex_lock(&head->mutex);
 202        spin_lock(&delayed_refs->lock);
 203        if (!head->node.in_tree) {
 204                mutex_unlock(&head->mutex);
 205                btrfs_put_delayed_ref(&head->node);
 206                return -EAGAIN;
 207        }
 208        btrfs_put_delayed_ref(&head->node);
 209        return 0;
 210}
 211
 212int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
 213                           struct list_head *cluster, u64 start)
 214{
 215        int count = 0;
 216        struct btrfs_delayed_ref_root *delayed_refs;
 217        struct rb_node *node;
 218        struct btrfs_delayed_ref_node *ref;
 219        struct btrfs_delayed_ref_head *head;
 220
 221        delayed_refs = &trans->transaction->delayed_refs;
 222        if (start == 0) {
 223                node = rb_first(&delayed_refs->root);
 224        } else {
 225                ref = NULL;
 226                find_ref_head(&delayed_refs->root, start, &ref);
 227                if (ref) {
 228                        struct btrfs_delayed_ref_node *tmp;
 229
 230                        node = rb_prev(&ref->rb_node);
 231                        while (node) {
 232                                tmp = rb_entry(node,
 233                                               struct btrfs_delayed_ref_node,
 234                                               rb_node);
 235                                if (tmp->bytenr < start)
 236                                        break;
 237                                ref = tmp;
 238                                node = rb_prev(&ref->rb_node);
 239                        }
 240                        node = &ref->rb_node;
 241                } else
 242                        node = rb_first(&delayed_refs->root);
 243        }
 244again:
 245        while (node && count < 32) {
 246                ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
 247                if (btrfs_delayed_ref_is_head(ref)) {
 248                        head = btrfs_delayed_node_to_head(ref);
 249                        if (list_empty(&head->cluster)) {
 250                                list_add_tail(&head->cluster, cluster);
 251                                delayed_refs->run_delayed_start =
 252                                        head->node.bytenr;
 253                                count++;
 254
 255                                WARN_ON(delayed_refs->num_heads_ready == 0);
 256                                delayed_refs->num_heads_ready--;
 257                        } else if (count) {
 258                                /* the goal of the clustering is to find extents
 259                                 * that are likely to end up in the same extent
 260                                 * leaf on disk.  So, we don't want them spread
 261                                 * all over the tree.  Stop now if we've hit
 262                                 * a head that was already in use
 263                                 */
 264                                break;
 265                        }
 266                }
 267                node = rb_next(node);
 268        }
 269        if (count) {
 270                return 0;
 271        } else if (start) {
 272                /*
 273                 * we've gone to the end of the rbtree without finding any
 274                 * clusters.  start from the beginning and try again
 275                 */
 276                start = 0;
 277                node = rb_first(&delayed_refs->root);
 278                goto again;
 279        }
 280        return 1;
 281}
 282
 283/*
 284 * This checks to see if there are any delayed refs in the
 285 * btree for a given bytenr.  It returns one if it finds any
 286 * and zero otherwise.
 287 *
 288 * If it only finds a head node, it returns 0.
 289 *
 290 * The idea is to use this when deciding if you can safely delete an
 291 * extent from the extent allocation tree.  There may be a pending
 292 * ref in the rbtree that adds or removes references, so as long as this
 293 * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
 294 * allocation tree.
 295 */
 296int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
 297{
 298        struct btrfs_delayed_ref_node *ref;
 299        struct btrfs_delayed_ref_root *delayed_refs;
 300        struct rb_node *prev_node;
 301        int ret = 0;
 302
 303        delayed_refs = &trans->transaction->delayed_refs;
 304        spin_lock(&delayed_refs->lock);
 305
 306        ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
 307        if (ref) {
 308                prev_node = rb_prev(&ref->rb_node);
 309                if (!prev_node)
 310                        goto out;
 311                ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
 312                               rb_node);
 313                if (ref->bytenr == bytenr)
 314                        ret = 1;
 315        }
 316out:
 317        spin_unlock(&delayed_refs->lock);
 318        return ret;
 319}
 320
 321/*
 322 * helper function to update an extent delayed ref in the
 323 * rbtree.  existing and update must both have the same
 324 * bytenr and parent
 325 *
 326 * This may free existing if the update cancels out whatever
 327 * operation it was doing.
 328 */
 329static noinline void
 330update_existing_ref(struct btrfs_trans_handle *trans,
 331                    struct btrfs_delayed_ref_root *delayed_refs,
 332                    struct btrfs_delayed_ref_node *existing,
 333                    struct btrfs_delayed_ref_node *update)
 334{
 335        if (update->action != existing->action) {
 336                /*
 337                 * this is effectively undoing either an add or a
 338                 * drop.  We decrement the ref_mod, and if it goes
 339                 * down to zero we just delete the entry without
 340                 * every changing the extent allocation tree.
 341                 */
 342                existing->ref_mod--;
 343                if (existing->ref_mod == 0) {
 344                        rb_erase(&existing->rb_node,
 345                                 &delayed_refs->root);
 346                        existing->in_tree = 0;
 347                        btrfs_put_delayed_ref(existing);
 348                        delayed_refs->num_entries--;
 349                        if (trans->delayed_ref_updates)
 350                                trans->delayed_ref_updates--;
 351                } else {
 352                        WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
 353                                existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
 354                }
 355        } else {
 356                WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
 357                        existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
 358                /*
 359                 * the action on the existing ref matches
 360                 * the action on the ref we're trying to add.
 361                 * Bump the ref_mod by one so the backref that
 362                 * is eventually added/removed has the correct
 363                 * reference count
 364                 */
 365                existing->ref_mod += update->ref_mod;
 366        }
 367}
 368
 369/*
 370 * helper function to update the accounting in the head ref
 371 * existing and update must have the same bytenr
 372 */
 373static noinline void
 374update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
 375                         struct btrfs_delayed_ref_node *update)
 376{
 377        struct btrfs_delayed_ref_head *existing_ref;
 378        struct btrfs_delayed_ref_head *ref;
 379
 380        existing_ref = btrfs_delayed_node_to_head(existing);
 381        ref = btrfs_delayed_node_to_head(update);
 382        BUG_ON(existing_ref->is_data != ref->is_data);
 383
 384        if (ref->must_insert_reserved) {
 385                /* if the extent was freed and then
 386                 * reallocated before the delayed ref
 387                 * entries were processed, we can end up
 388                 * with an existing head ref without
 389                 * the must_insert_reserved flag set.
 390                 * Set it again here
 391                 */
 392                existing_ref->must_insert_reserved = ref->must_insert_reserved;
 393
 394                /*
 395                 * update the num_bytes so we make sure the accounting
 396                 * is done correctly
 397                 */
 398                existing->num_bytes = update->num_bytes;
 399
 400        }
 401
 402        if (ref->extent_op) {
 403                if (!existing_ref->extent_op) {
 404                        existing_ref->extent_op = ref->extent_op;
 405                } else {
 406                        if (ref->extent_op->update_key) {
 407                                memcpy(&existing_ref->extent_op->key,
 408                                       &ref->extent_op->key,
 409                                       sizeof(ref->extent_op->key));
 410                                existing_ref->extent_op->update_key = 1;
 411                        }
 412                        if (ref->extent_op->update_flags) {
 413                                existing_ref->extent_op->flags_to_set |=
 414                                        ref->extent_op->flags_to_set;
 415                                existing_ref->extent_op->update_flags = 1;
 416                        }
 417                        kfree(ref->extent_op);
 418                }
 419        }
 420        /*
 421         * update the reference mod on the head to reflect this new operation
 422         */
 423        existing->ref_mod += update->ref_mod;
 424}
 425
 426/*
 427 * helper function to actually insert a head node into the rbtree.
 428 * this does all the dirty work in terms of maintaining the correct
 429 * overall modification count.
 430 */
 431static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
 432                                        struct btrfs_delayed_ref_node *ref,
 433                                        u64 bytenr, u64 num_bytes,
 434                                        int action, int is_data)
 435{
 436        struct btrfs_delayed_ref_node *existing;
 437        struct btrfs_delayed_ref_head *head_ref = NULL;
 438        struct btrfs_delayed_ref_root *delayed_refs;
 439        int count_mod = 1;
 440        int must_insert_reserved = 0;
 441
 442        /*
 443         * the head node stores the sum of all the mods, so dropping a ref
 444         * should drop the sum in the head node by one.
 445         */
 446        if (action == BTRFS_UPDATE_DELAYED_HEAD)
 447                count_mod = 0;
 448        else if (action == BTRFS_DROP_DELAYED_REF)
 449                count_mod = -1;
 450
 451        /*
 452         * BTRFS_ADD_DELAYED_EXTENT means that we need to update
 453         * the reserved accounting when the extent is finally added, or
 454         * if a later modification deletes the delayed ref without ever
 455         * inserting the extent into the extent allocation tree.
 456         * ref->must_insert_reserved is the flag used to record
 457         * that accounting mods are required.
 458         *
 459         * Once we record must_insert_reserved, switch the action to
 460         * BTRFS_ADD_DELAYED_REF because other special casing is not required.
 461         */
 462        if (action == BTRFS_ADD_DELAYED_EXTENT)
 463                must_insert_reserved = 1;
 464        else
 465                must_insert_reserved = 0;
 466
 467        delayed_refs = &trans->transaction->delayed_refs;
 468
 469        /* first set the basic ref node struct up */
 470        atomic_set(&ref->refs, 1);
 471        ref->bytenr = bytenr;
 472        ref->num_bytes = num_bytes;
 473        ref->ref_mod = count_mod;
 474        ref->type  = 0;
 475        ref->action  = 0;
 476        ref->is_head = 1;
 477        ref->in_tree = 1;
 478
 479        head_ref = btrfs_delayed_node_to_head(ref);
 480        head_ref->must_insert_reserved = must_insert_reserved;
 481        head_ref->is_data = is_data;
 482
 483        INIT_LIST_HEAD(&head_ref->cluster);
 484        mutex_init(&head_ref->mutex);
 485
 486        existing = tree_insert(&delayed_refs->root, &ref->rb_node);
 487
 488        if (existing) {
 489                update_existing_head_ref(existing, ref);
 490                /*
 491                 * we've updated the existing ref, free the newly
 492                 * allocated ref
 493                 */
 494                kfree(ref);
 495        } else {
 496                delayed_refs->num_heads++;
 497                delayed_refs->num_heads_ready++;
 498                delayed_refs->num_entries++;
 499                trans->delayed_ref_updates++;
 500        }
 501        return 0;
 502}
 503
 504/*
 505 * helper to insert a delayed tree ref into the rbtree.
 506 */
 507static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 508                                         struct btrfs_delayed_ref_node *ref,
 509                                         u64 bytenr, u64 num_bytes, u64 parent,
 510                                         u64 ref_root, int level, int action)
 511{
 512        struct btrfs_delayed_ref_node *existing;
 513        struct btrfs_delayed_tree_ref *full_ref;
 514        struct btrfs_delayed_ref_root *delayed_refs;
 515
 516        if (action == BTRFS_ADD_DELAYED_EXTENT)
 517                action = BTRFS_ADD_DELAYED_REF;
 518
 519        delayed_refs = &trans->transaction->delayed_refs;
 520
 521        /* first set the basic ref node struct up */
 522        atomic_set(&ref->refs, 1);
 523        ref->bytenr = bytenr;
 524        ref->num_bytes = num_bytes;
 525        ref->ref_mod = 1;
 526        ref->action = action;
 527        ref->is_head = 0;
 528        ref->in_tree = 1;
 529
 530        full_ref = btrfs_delayed_node_to_tree_ref(ref);
 531        if (parent) {
 532                full_ref->parent = parent;
 533                ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
 534        } else {
 535                full_ref->root = ref_root;
 536                ref->type = BTRFS_TREE_BLOCK_REF_KEY;
 537        }
 538        full_ref->level = level;
 539
 540        existing = tree_insert(&delayed_refs->root, &ref->rb_node);
 541
 542        if (existing) {
 543                update_existing_ref(trans, delayed_refs, existing, ref);
 544                /*
 545                 * we've updated the existing ref, free the newly
 546                 * allocated ref
 547                 */
 548                kfree(ref);
 549        } else {
 550                delayed_refs->num_entries++;
 551                trans->delayed_ref_updates++;
 552        }
 553        return 0;
 554}
 555
 556/*
 557 * helper to insert a delayed data ref into the rbtree.
 558 */
 559static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
 560                                         struct btrfs_delayed_ref_node *ref,
 561                                         u64 bytenr, u64 num_bytes, u64 parent,
 562                                         u64 ref_root, u64 owner, u64 offset,
 563                                         int action)
 564{
 565        struct btrfs_delayed_ref_node *existing;
 566        struct btrfs_delayed_data_ref *full_ref;
 567        struct btrfs_delayed_ref_root *delayed_refs;
 568
 569        if (action == BTRFS_ADD_DELAYED_EXTENT)
 570                action = BTRFS_ADD_DELAYED_REF;
 571
 572        delayed_refs = &trans->transaction->delayed_refs;
 573
 574        /* first set the basic ref node struct up */
 575        atomic_set(&ref->refs, 1);
 576        ref->bytenr = bytenr;
 577        ref->num_bytes = num_bytes;
 578        ref->ref_mod = 1;
 579        ref->action = action;
 580        ref->is_head = 0;
 581        ref->in_tree = 1;
 582
 583        full_ref = btrfs_delayed_node_to_data_ref(ref);
 584        if (parent) {
 585                full_ref->parent = parent;
 586                ref->type = BTRFS_SHARED_DATA_REF_KEY;
 587        } else {
 588                full_ref->root = ref_root;
 589                ref->type = BTRFS_EXTENT_DATA_REF_KEY;
 590        }
 591        full_ref->objectid = owner;
 592        full_ref->offset = offset;
 593
 594        existing = tree_insert(&delayed_refs->root, &ref->rb_node);
 595
 596        if (existing) {
 597                update_existing_ref(trans, delayed_refs, existing, ref);
 598                /*
 599                 * we've updated the existing ref, free the newly
 600                 * allocated ref
 601                 */
 602                kfree(ref);
 603        } else {
 604                delayed_refs->num_entries++;
 605                trans->delayed_ref_updates++;
 606        }
 607        return 0;
 608}
 609
 610/*
 611 * add a delayed tree ref.  This does all of the accounting required
 612 * to make sure the delayed ref is eventually processed before this
 613 * transaction commits.
 614 */
 615int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 616                               u64 bytenr, u64 num_bytes, u64 parent,
 617                               u64 ref_root,  int level, int action,
 618                               struct btrfs_delayed_extent_op *extent_op)
 619{
 620        struct btrfs_delayed_tree_ref *ref;
 621        struct btrfs_delayed_ref_head *head_ref;
 622        struct btrfs_delayed_ref_root *delayed_refs;
 623        int ret;
 624
 625        BUG_ON(extent_op && extent_op->is_data);
 626        ref = kmalloc(sizeof(*ref), GFP_NOFS);
 627        if (!ref)
 628                return -ENOMEM;
 629
 630        head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
 631        if (!head_ref) {
 632                kfree(ref);
 633                return -ENOMEM;
 634        }
 635
 636        head_ref->extent_op = extent_op;
 637
 638        delayed_refs = &trans->transaction->delayed_refs;
 639        spin_lock(&delayed_refs->lock);
 640
 641        /*
 642         * insert both the head node and the new ref without dropping
 643         * the spin lock
 644         */
 645        ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
 646                                   action, 0);
 647        BUG_ON(ret);
 648
 649        ret = add_delayed_tree_ref(trans, &ref->node, bytenr, num_bytes,
 650                                   parent, ref_root, level, action);
 651        BUG_ON(ret);
 652        spin_unlock(&delayed_refs->lock);
 653        return 0;
 654}
 655
 656/*
 657 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
 658 */
 659int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
 660                               u64 bytenr, u64 num_bytes,
 661                               u64 parent, u64 ref_root,
 662                               u64 owner, u64 offset, int action,
 663                               struct btrfs_delayed_extent_op *extent_op)
 664{
 665        struct btrfs_delayed_data_ref *ref;
 666        struct btrfs_delayed_ref_head *head_ref;
 667        struct btrfs_delayed_ref_root *delayed_refs;
 668        int ret;
 669
 670        BUG_ON(extent_op && !extent_op->is_data);
 671        ref = kmalloc(sizeof(*ref), GFP_NOFS);
 672        if (!ref)
 673                return -ENOMEM;
 674
 675        head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
 676        if (!head_ref) {
 677                kfree(ref);
 678                return -ENOMEM;
 679        }
 680
 681        head_ref->extent_op = extent_op;
 682
 683        delayed_refs = &trans->transaction->delayed_refs;
 684        spin_lock(&delayed_refs->lock);
 685
 686        /*
 687         * insert both the head node and the new ref without dropping
 688         * the spin lock
 689         */
 690        ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
 691                                   action, 1);
 692        BUG_ON(ret);
 693
 694        ret = add_delayed_data_ref(trans, &ref->node, bytenr, num_bytes,
 695                                   parent, ref_root, owner, offset, action);
 696        BUG_ON(ret);
 697        spin_unlock(&delayed_refs->lock);
 698        return 0;
 699}
 700
 701int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
 702                                u64 bytenr, u64 num_bytes,
 703                                struct btrfs_delayed_extent_op *extent_op)
 704{
 705        struct btrfs_delayed_ref_head *head_ref;
 706        struct btrfs_delayed_ref_root *delayed_refs;
 707        int ret;
 708
 709        head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
 710        if (!head_ref)
 711                return -ENOMEM;
 712
 713        head_ref->extent_op = extent_op;
 714
 715        delayed_refs = &trans->transaction->delayed_refs;
 716        spin_lock(&delayed_refs->lock);
 717
 718        ret = add_delayed_ref_head(trans, &head_ref->node, bytenr,
 719                                   num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
 720                                   extent_op->is_data);
 721        BUG_ON(ret);
 722
 723        spin_unlock(&delayed_refs->lock);
 724        return 0;
 725}
 726
 727/*
 728 * this does a simple search for the head node for a given extent.
 729 * It must be called with the delayed ref spinlock held, and it returns
 730 * the head node if any where found, or NULL if not.
 731 */
 732struct btrfs_delayed_ref_head *
 733btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
 734{
 735        struct btrfs_delayed_ref_node *ref;
 736        struct btrfs_delayed_ref_root *delayed_refs;
 737
 738        delayed_refs = &trans->transaction->delayed_refs;
 739        ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
 740        if (ref)
 741                return btrfs_delayed_node_to_head(ref);
 742        return NULL;
 743}
 744
 745/*
 746 * add a delayed ref to the tree.  This does all of the accounting required
 747 * to make sure the delayed ref is eventually processed before this
 748 * transaction commits.
 749 *
 750 * The main point of this call is to add and remove a backreference in a single
 751 * shot, taking the lock only once, and only searching for the head node once.
 752 *
 753 * It is the same as doing a ref add and delete in two separate calls.
 754 */
 755#if 0
 756int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
 757                          u64 bytenr, u64 num_bytes, u64 orig_parent,
 758                          u64 parent, u64 orig_ref_root, u64 ref_root,
 759                          u64 orig_ref_generation, u64 ref_generation,
 760                          u64 owner_objectid, int pin)
 761{
 762        struct btrfs_delayed_ref *ref;
 763        struct btrfs_delayed_ref *old_ref;
 764        struct btrfs_delayed_ref_head *head_ref;
 765        struct btrfs_delayed_ref_root *delayed_refs;
 766        int ret;
 767
 768        ref = kmalloc(sizeof(*ref), GFP_NOFS);
 769        if (!ref)
 770                return -ENOMEM;
 771
 772        old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
 773        if (!old_ref) {
 774                kfree(ref);
 775                return -ENOMEM;
 776        }
 777
 778        /*
 779         * the parent = 0 case comes from cases where we don't actually
 780         * know the parent yet.  It will get updated later via a add/drop
 781         * pair.
 782         */
 783        if (parent == 0)
 784                parent = bytenr;
 785        if (orig_parent == 0)
 786                orig_parent = bytenr;
 787
 788        head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
 789        if (!head_ref) {
 790                kfree(ref);
 791                kfree(old_ref);
 792                return -ENOMEM;
 793        }
 794        delayed_refs = &trans->transaction->delayed_refs;
 795        spin_lock(&delayed_refs->lock);
 796
 797        /*
 798         * insert both the head node and the new ref without dropping
 799         * the spin lock
 800         */
 801        ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
 802                                      (u64)-1, 0, 0, 0,
 803                                      BTRFS_UPDATE_DELAYED_HEAD, 0);
 804        BUG_ON(ret);
 805
 806        ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
 807                                      parent, ref_root, ref_generation,
 808                                      owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
 809        BUG_ON(ret);
 810
 811        ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
 812                                      orig_parent, orig_ref_root,
 813                                      orig_ref_generation, owner_objectid,
 814                                      BTRFS_DROP_DELAYED_REF, pin);
 815        BUG_ON(ret);
 816        spin_unlock(&delayed_refs->lock);
 817        return 0;
 818}
 819#endif
 820