linux/fs/btrfs/delayed-inode.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011 Fujitsu.  All rights reserved.
   3 * Written by Miao Xie <miaox@cn.fujitsu.com>
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public
   7 * License v2 as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  12 * General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public
  15 * License along with this program; if not, write to the
  16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17 * Boston, MA 021110-1307, USA.
  18 */
  19
  20#include <linux/slab.h>
  21#include "delayed-inode.h"
  22#include "disk-io.h"
  23#include "transaction.h"
  24#include "ctree.h"
  25
  26#define BTRFS_DELAYED_WRITEBACK         512
  27#define BTRFS_DELAYED_BACKGROUND        128
  28#define BTRFS_DELAYED_BATCH             16
  29
  30static struct kmem_cache *delayed_node_cache;
  31
  32int __init btrfs_delayed_inode_init(void)
  33{
  34        delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  35                                        sizeof(struct btrfs_delayed_node),
  36                                        0,
  37                                        SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
  38                                        NULL);
  39        if (!delayed_node_cache)
  40                return -ENOMEM;
  41        return 0;
  42}
  43
  44void btrfs_delayed_inode_exit(void)
  45{
  46        if (delayed_node_cache)
  47                kmem_cache_destroy(delayed_node_cache);
  48}
  49
  50static inline void btrfs_init_delayed_node(
  51                                struct btrfs_delayed_node *delayed_node,
  52                                struct btrfs_root *root, u64 inode_id)
  53{
  54        delayed_node->root = root;
  55        delayed_node->inode_id = inode_id;
  56        atomic_set(&delayed_node->refs, 0);
  57        delayed_node->count = 0;
  58        delayed_node->flags = 0;
  59        delayed_node->ins_root = RB_ROOT;
  60        delayed_node->del_root = RB_ROOT;
  61        mutex_init(&delayed_node->mutex);
  62        delayed_node->index_cnt = 0;
  63        INIT_LIST_HEAD(&delayed_node->n_list);
  64        INIT_LIST_HEAD(&delayed_node->p_list);
  65        delayed_node->bytes_reserved = 0;
  66        memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item));
  67}
  68
  69static inline int btrfs_is_continuous_delayed_item(
  70                                        struct btrfs_delayed_item *item1,
  71                                        struct btrfs_delayed_item *item2)
  72{
  73        if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  74            item1->key.objectid == item2->key.objectid &&
  75            item1->key.type == item2->key.type &&
  76            item1->key.offset + 1 == item2->key.offset)
  77                return 1;
  78        return 0;
  79}
  80
  81static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
  82                                                        struct btrfs_root *root)
  83{
  84        return root->fs_info->delayed_root;
  85}
  86
  87static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
  88{
  89        struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
  90        struct btrfs_root *root = btrfs_inode->root;
  91        u64 ino = btrfs_ino(inode);
  92        struct btrfs_delayed_node *node;
  93
  94        node = ACCESS_ONCE(btrfs_inode->delayed_node);
  95        if (node) {
  96                atomic_inc(&node->refs);
  97                return node;
  98        }
  99
 100        spin_lock(&root->inode_lock);
 101        node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
 102        if (node) {
 103                if (btrfs_inode->delayed_node) {
 104                        atomic_inc(&node->refs);        /* can be accessed */
 105                        BUG_ON(btrfs_inode->delayed_node != node);
 106                        spin_unlock(&root->inode_lock);
 107                        return node;
 108                }
 109                btrfs_inode->delayed_node = node;
 110                /* can be accessed and cached in the inode */
 111                atomic_add(2, &node->refs);
 112                spin_unlock(&root->inode_lock);
 113                return node;
 114        }
 115        spin_unlock(&root->inode_lock);
 116
 117        return NULL;
 118}
 119
 120/* Will return either the node or PTR_ERR(-ENOMEM) */
 121static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 122                                                        struct inode *inode)
 123{
 124        struct btrfs_delayed_node *node;
 125        struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
 126        struct btrfs_root *root = btrfs_inode->root;
 127        u64 ino = btrfs_ino(inode);
 128        int ret;
 129
 130again:
 131        node = btrfs_get_delayed_node(inode);
 132        if (node)
 133                return node;
 134
 135        node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
 136        if (!node)
 137                return ERR_PTR(-ENOMEM);
 138        btrfs_init_delayed_node(node, root, ino);
 139
 140        /* cached in the btrfs inode and can be accessed */
 141        atomic_add(2, &node->refs);
 142
 143        ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
 144        if (ret) {
 145                kmem_cache_free(delayed_node_cache, node);
 146                return ERR_PTR(ret);
 147        }
 148
 149        spin_lock(&root->inode_lock);
 150        ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
 151        if (ret == -EEXIST) {
 152                kmem_cache_free(delayed_node_cache, node);
 153                spin_unlock(&root->inode_lock);
 154                radix_tree_preload_end();
 155                goto again;
 156        }
 157        btrfs_inode->delayed_node = node;
 158        spin_unlock(&root->inode_lock);
 159        radix_tree_preload_end();
 160
 161        return node;
 162}
 163
 164/*
 165 * Call it when holding delayed_node->mutex
 166 *
 167 * If mod = 1, add this node into the prepared list.
 168 */
 169static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
 170                                     struct btrfs_delayed_node *node,
 171                                     int mod)
 172{
 173        spin_lock(&root->lock);
 174        if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 175                if (!list_empty(&node->p_list))
 176                        list_move_tail(&node->p_list, &root->prepare_list);
 177                else if (mod)
 178                        list_add_tail(&node->p_list, &root->prepare_list);
 179        } else {
 180                list_add_tail(&node->n_list, &root->node_list);
 181                list_add_tail(&node->p_list, &root->prepare_list);
 182                atomic_inc(&node->refs);        /* inserted into list */
 183                root->nodes++;
 184                set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 185        }
 186        spin_unlock(&root->lock);
 187}
 188
 189/* Call it when holding delayed_node->mutex */
 190static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
 191                                       struct btrfs_delayed_node *node)
 192{
 193        spin_lock(&root->lock);
 194        if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 195                root->nodes--;
 196                atomic_dec(&node->refs);        /* not in the list */
 197                list_del_init(&node->n_list);
 198                if (!list_empty(&node->p_list))
 199                        list_del_init(&node->p_list);
 200                clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 201        }
 202        spin_unlock(&root->lock);
 203}
 204
 205static struct btrfs_delayed_node *btrfs_first_delayed_node(
 206                        struct btrfs_delayed_root *delayed_root)
 207{
 208        struct list_head *p;
 209        struct btrfs_delayed_node *node = NULL;
 210
 211        spin_lock(&delayed_root->lock);
 212        if (list_empty(&delayed_root->node_list))
 213                goto out;
 214
 215        p = delayed_root->node_list.next;
 216        node = list_entry(p, struct btrfs_delayed_node, n_list);
 217        atomic_inc(&node->refs);
 218out:
 219        spin_unlock(&delayed_root->lock);
 220
 221        return node;
 222}
 223
 224static struct btrfs_delayed_node *btrfs_next_delayed_node(
 225                                                struct btrfs_delayed_node *node)
 226{
 227        struct btrfs_delayed_root *delayed_root;
 228        struct list_head *p;
 229        struct btrfs_delayed_node *next = NULL;
 230
 231        delayed_root = node->root->fs_info->delayed_root;
 232        spin_lock(&delayed_root->lock);
 233        if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 234                /* not in the list */
 235                if (list_empty(&delayed_root->node_list))
 236                        goto out;
 237                p = delayed_root->node_list.next;
 238        } else if (list_is_last(&node->n_list, &delayed_root->node_list))
 239                goto out;
 240        else
 241                p = node->n_list.next;
 242
 243        next = list_entry(p, struct btrfs_delayed_node, n_list);
 244        atomic_inc(&next->refs);
 245out:
 246        spin_unlock(&delayed_root->lock);
 247
 248        return next;
 249}
 250
 251static void __btrfs_release_delayed_node(
 252                                struct btrfs_delayed_node *delayed_node,
 253                                int mod)
 254{
 255        struct btrfs_delayed_root *delayed_root;
 256
 257        if (!delayed_node)
 258                return;
 259
 260        delayed_root = delayed_node->root->fs_info->delayed_root;
 261
 262        mutex_lock(&delayed_node->mutex);
 263        if (delayed_node->count)
 264                btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
 265        else
 266                btrfs_dequeue_delayed_node(delayed_root, delayed_node);
 267        mutex_unlock(&delayed_node->mutex);
 268
 269        if (atomic_dec_and_test(&delayed_node->refs)) {
 270                struct btrfs_root *root = delayed_node->root;
 271                spin_lock(&root->inode_lock);
 272                if (atomic_read(&delayed_node->refs) == 0) {
 273                        radix_tree_delete(&root->delayed_nodes_tree,
 274                                          delayed_node->inode_id);
 275                        kmem_cache_free(delayed_node_cache, delayed_node);
 276                }
 277                spin_unlock(&root->inode_lock);
 278        }
 279}
 280
 281static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
 282{
 283        __btrfs_release_delayed_node(node, 0);
 284}
 285
 286static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 287                                        struct btrfs_delayed_root *delayed_root)
 288{
 289        struct list_head *p;
 290        struct btrfs_delayed_node *node = NULL;
 291
 292        spin_lock(&delayed_root->lock);
 293        if (list_empty(&delayed_root->prepare_list))
 294                goto out;
 295
 296        p = delayed_root->prepare_list.next;
 297        list_del_init(p);
 298        node = list_entry(p, struct btrfs_delayed_node, p_list);
 299        atomic_inc(&node->refs);
 300out:
 301        spin_unlock(&delayed_root->lock);
 302
 303        return node;
 304}
 305
 306static inline void btrfs_release_prepared_delayed_node(
 307                                        struct btrfs_delayed_node *node)
 308{
 309        __btrfs_release_delayed_node(node, 1);
 310}
 311
 312static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 313{
 314        struct btrfs_delayed_item *item;
 315        item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
 316        if (item) {
 317                item->data_len = data_len;
 318                item->ins_or_del = 0;
 319                item->bytes_reserved = 0;
 320                item->delayed_node = NULL;
 321                atomic_set(&item->refs, 1);
 322        }
 323        return item;
 324}
 325
 326/*
 327 * __btrfs_lookup_delayed_item - look up the delayed item by key
 328 * @delayed_node: pointer to the delayed node
 329 * @key:          the key to look up
 330 * @prev:         used to store the prev item if the right item isn't found
 331 * @next:         used to store the next item if the right item isn't found
 332 *
 333 * Note: if we don't find the right item, we will return the prev item and
 334 * the next item.
 335 */
 336static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
 337                                struct rb_root *root,
 338                                struct btrfs_key *key,
 339                                struct btrfs_delayed_item **prev,
 340                                struct btrfs_delayed_item **next)
 341{
 342        struct rb_node *node, *prev_node = NULL;
 343        struct btrfs_delayed_item *delayed_item = NULL;
 344        int ret = 0;
 345
 346        node = root->rb_node;
 347
 348        while (node) {
 349                delayed_item = rb_entry(node, struct btrfs_delayed_item,
 350                                        rb_node);
 351                prev_node = node;
 352                ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
 353                if (ret < 0)
 354                        node = node->rb_right;
 355                else if (ret > 0)
 356                        node = node->rb_left;
 357                else
 358                        return delayed_item;
 359        }
 360
 361        if (prev) {
 362                if (!prev_node)
 363                        *prev = NULL;
 364                else if (ret < 0)
 365                        *prev = delayed_item;
 366                else if ((node = rb_prev(prev_node)) != NULL) {
 367                        *prev = rb_entry(node, struct btrfs_delayed_item,
 368                                         rb_node);
 369                } else
 370                        *prev = NULL;
 371        }
 372
 373        if (next) {
 374                if (!prev_node)
 375                        *next = NULL;
 376                else if (ret > 0)
 377                        *next = delayed_item;
 378                else if ((node = rb_next(prev_node)) != NULL) {
 379                        *next = rb_entry(node, struct btrfs_delayed_item,
 380                                         rb_node);
 381                } else
 382                        *next = NULL;
 383        }
 384        return NULL;
 385}
 386
 387static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 388                                        struct btrfs_delayed_node *delayed_node,
 389                                        struct btrfs_key *key)
 390{
 391        struct btrfs_delayed_item *item;
 392
 393        item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
 394                                           NULL, NULL);
 395        return item;
 396}
 397
 398static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 399                                    struct btrfs_delayed_item *ins,
 400                                    int action)
 401{
 402        struct rb_node **p, *node;
 403        struct rb_node *parent_node = NULL;
 404        struct rb_root *root;
 405        struct btrfs_delayed_item *item;
 406        int cmp;
 407
 408        if (action == BTRFS_DELAYED_INSERTION_ITEM)
 409                root = &delayed_node->ins_root;
 410        else if (action == BTRFS_DELAYED_DELETION_ITEM)
 411                root = &delayed_node->del_root;
 412        else
 413                BUG();
 414        p = &root->rb_node;
 415        node = &ins->rb_node;
 416
 417        while (*p) {
 418                parent_node = *p;
 419                item = rb_entry(parent_node, struct btrfs_delayed_item,
 420                                 rb_node);
 421
 422                cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
 423                if (cmp < 0)
 424                        p = &(*p)->rb_right;
 425                else if (cmp > 0)
 426                        p = &(*p)->rb_left;
 427                else
 428                        return -EEXIST;
 429        }
 430
 431        rb_link_node(node, parent_node, p);
 432        rb_insert_color(node, root);
 433        ins->delayed_node = delayed_node;
 434        ins->ins_or_del = action;
 435
 436        if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
 437            action == BTRFS_DELAYED_INSERTION_ITEM &&
 438            ins->key.offset >= delayed_node->index_cnt)
 439                        delayed_node->index_cnt = ins->key.offset + 1;
 440
 441        delayed_node->count++;
 442        atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
 443        return 0;
 444}
 445
 446static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
 447                                              struct btrfs_delayed_item *item)
 448{
 449        return __btrfs_add_delayed_item(node, item,
 450                                        BTRFS_DELAYED_INSERTION_ITEM);
 451}
 452
 453static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 454                                             struct btrfs_delayed_item *item)
 455{
 456        return __btrfs_add_delayed_item(node, item,
 457                                        BTRFS_DELAYED_DELETION_ITEM);
 458}
 459
 460static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 461{
 462        int seq = atomic_inc_return(&delayed_root->items_seq);
 463        if ((atomic_dec_return(&delayed_root->items) <
 464            BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
 465            waitqueue_active(&delayed_root->wait))
 466                wake_up(&delayed_root->wait);
 467}
 468
 469static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 470{
 471        struct rb_root *root;
 472        struct btrfs_delayed_root *delayed_root;
 473
 474        delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
 475
 476        BUG_ON(!delayed_root);
 477        BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
 478               delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
 479
 480        if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
 481                root = &delayed_item->delayed_node->ins_root;
 482        else
 483                root = &delayed_item->delayed_node->del_root;
 484
 485        rb_erase(&delayed_item->rb_node, root);
 486        delayed_item->delayed_node->count--;
 487
 488        finish_one_item(delayed_root);
 489}
 490
 491static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 492{
 493        if (item) {
 494                __btrfs_remove_delayed_item(item);
 495                if (atomic_dec_and_test(&item->refs))
 496                        kfree(item);
 497        }
 498}
 499
 500static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 501                                        struct btrfs_delayed_node *delayed_node)
 502{
 503        struct rb_node *p;
 504        struct btrfs_delayed_item *item = NULL;
 505
 506        p = rb_first(&delayed_node->ins_root);
 507        if (p)
 508                item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 509
 510        return item;
 511}
 512
 513static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 514                                        struct btrfs_delayed_node *delayed_node)
 515{
 516        struct rb_node *p;
 517        struct btrfs_delayed_item *item = NULL;
 518
 519        p = rb_first(&delayed_node->del_root);
 520        if (p)
 521                item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 522
 523        return item;
 524}
 525
 526static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 527                                                struct btrfs_delayed_item *item)
 528{
 529        struct rb_node *p;
 530        struct btrfs_delayed_item *next = NULL;
 531
 532        p = rb_next(&item->rb_node);
 533        if (p)
 534                next = rb_entry(p, struct btrfs_delayed_item, rb_node);
 535
 536        return next;
 537}
 538
 539static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 540                                               struct btrfs_root *root,
 541                                               struct btrfs_delayed_item *item)
 542{
 543        struct btrfs_block_rsv *src_rsv;
 544        struct btrfs_block_rsv *dst_rsv;
 545        u64 num_bytes;
 546        int ret;
 547
 548        if (!trans->bytes_reserved)
 549                return 0;
 550
 551        src_rsv = trans->block_rsv;
 552        dst_rsv = &root->fs_info->delayed_block_rsv;
 553
 554        num_bytes = btrfs_calc_trans_metadata_size(root, 1);
 555        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
 556        if (!ret) {
 557                trace_btrfs_space_reservation(root->fs_info, "delayed_item",
 558                                              item->key.objectid,
 559                                              num_bytes, 1);
 560                item->bytes_reserved = num_bytes;
 561        }
 562
 563        return ret;
 564}
 565
 566static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
 567                                                struct btrfs_delayed_item *item)
 568{
 569        struct btrfs_block_rsv *rsv;
 570
 571        if (!item->bytes_reserved)
 572                return;
 573
 574        rsv = &root->fs_info->delayed_block_rsv;
 575        trace_btrfs_space_reservation(root->fs_info, "delayed_item",
 576                                      item->key.objectid, item->bytes_reserved,
 577                                      0);
 578        btrfs_block_rsv_release(root, rsv,
 579                                item->bytes_reserved);
 580}
 581
 582static int btrfs_delayed_inode_reserve_metadata(
 583                                        struct btrfs_trans_handle *trans,
 584                                        struct btrfs_root *root,
 585                                        struct inode *inode,
 586                                        struct btrfs_delayed_node *node)
 587{
 588        struct btrfs_block_rsv *src_rsv;
 589        struct btrfs_block_rsv *dst_rsv;
 590        u64 num_bytes;
 591        int ret;
 592        bool release = false;
 593
 594        src_rsv = trans->block_rsv;
 595        dst_rsv = &root->fs_info->delayed_block_rsv;
 596
 597        num_bytes = btrfs_calc_trans_metadata_size(root, 1);
 598
 599        /*
 600         * btrfs_dirty_inode will update the inode under btrfs_join_transaction
 601         * which doesn't reserve space for speed.  This is a problem since we
 602         * still need to reserve space for this update, so try to reserve the
 603         * space.
 604         *
 605         * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 606         * we're accounted for.
 607         */
 608        if (!src_rsv || (!trans->bytes_reserved &&
 609                         src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
 610                ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 611                                          BTRFS_RESERVE_NO_FLUSH);
 612                /*
 613                 * Since we're under a transaction reserve_metadata_bytes could
 614                 * try to commit the transaction which will make it return
 615                 * EAGAIN to make us stop the transaction we have, so return
 616                 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
 617                 */
 618                if (ret == -EAGAIN)
 619                        ret = -ENOSPC;
 620                if (!ret) {
 621                        node->bytes_reserved = num_bytes;
 622                        trace_btrfs_space_reservation(root->fs_info,
 623                                                      "delayed_inode",
 624                                                      btrfs_ino(inode),
 625                                                      num_bytes, 1);
 626                }
 627                return ret;
 628        } else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
 629                spin_lock(&BTRFS_I(inode)->lock);
 630                if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
 631                                       &BTRFS_I(inode)->runtime_flags)) {
 632                        spin_unlock(&BTRFS_I(inode)->lock);
 633                        release = true;
 634                        goto migrate;
 635                }
 636                spin_unlock(&BTRFS_I(inode)->lock);
 637
 638                /* Ok we didn't have space pre-reserved.  This shouldn't happen
 639                 * too often but it can happen if we do delalloc to an existing
 640                 * inode which gets dirtied because of the time update, and then
 641                 * isn't touched again until after the transaction commits and
 642                 * then we try to write out the data.  First try to be nice and
 643                 * reserve something strictly for us.  If not be a pain and try
 644                 * to steal from the delalloc block rsv.
 645                 */
 646                ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 647                                          BTRFS_RESERVE_NO_FLUSH);
 648                if (!ret)
 649                        goto out;
 650
 651                ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
 652                if (!WARN_ON(ret))
 653                        goto out;
 654
 655                /*
 656                 * Ok this is a problem, let's just steal from the global rsv
 657                 * since this really shouldn't happen that often.
 658                 */
 659                ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
 660                                              dst_rsv, num_bytes);
 661                goto out;
 662        }
 663
 664migrate:
 665        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
 666
 667out:
 668        /*
 669         * Migrate only takes a reservation, it doesn't touch the size of the
 670         * block_rsv.  This is to simplify people who don't normally have things
 671         * migrated from their block rsv.  If they go to release their
 672         * reservation, that will decrease the size as well, so if migrate
 673         * reduced size we'd end up with a negative size.  But for the
 674         * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
 675         * but we could in fact do this reserve/migrate dance several times
 676         * between the time we did the original reservation and we'd clean it
 677         * up.  So to take care of this, release the space for the meta
 678         * reservation here.  I think it may be time for a documentation page on
 679         * how block rsvs. work.
 680         */
 681        if (!ret) {
 682                trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
 683                                              btrfs_ino(inode), num_bytes, 1);
 684                node->bytes_reserved = num_bytes;
 685        }
 686
 687        if (release) {
 688                trace_btrfs_space_reservation(root->fs_info, "delalloc",
 689                                              btrfs_ino(inode), num_bytes, 0);
 690                btrfs_block_rsv_release(root, src_rsv, num_bytes);
 691        }
 692
 693        return ret;
 694}
 695
 696static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
 697                                                struct btrfs_delayed_node *node)
 698{
 699        struct btrfs_block_rsv *rsv;
 700
 701        if (!node->bytes_reserved)
 702                return;
 703
 704        rsv = &root->fs_info->delayed_block_rsv;
 705        trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
 706                                      node->inode_id, node->bytes_reserved, 0);
 707        btrfs_block_rsv_release(root, rsv,
 708                                node->bytes_reserved);
 709        node->bytes_reserved = 0;
 710}
 711
 712/*
 713 * This helper will insert some continuous items into the same leaf according
 714 * to the free space of the leaf.
 715 */
 716static int btrfs_batch_insert_items(struct btrfs_root *root,
 717                                    struct btrfs_path *path,
 718                                    struct btrfs_delayed_item *item)
 719{
 720        struct btrfs_delayed_item *curr, *next;
 721        int free_space;
 722        int total_data_size = 0, total_size = 0;
 723        struct extent_buffer *leaf;
 724        char *data_ptr;
 725        struct btrfs_key *keys;
 726        u32 *data_size;
 727        struct list_head head;
 728        int slot;
 729        int nitems;
 730        int i;
 731        int ret = 0;
 732
 733        BUG_ON(!path->nodes[0]);
 734
 735        leaf = path->nodes[0];
 736        free_space = btrfs_leaf_free_space(root, leaf);
 737        INIT_LIST_HEAD(&head);
 738
 739        next = item;
 740        nitems = 0;
 741
 742        /*
 743         * count the number of the continuous items that we can insert in batch
 744         */
 745        while (total_size + next->data_len + sizeof(struct btrfs_item) <=
 746               free_space) {
 747                total_data_size += next->data_len;
 748                total_size += next->data_len + sizeof(struct btrfs_item);
 749                list_add_tail(&next->tree_list, &head);
 750                nitems++;
 751
 752                curr = next;
 753                next = __btrfs_next_delayed_item(curr);
 754                if (!next)
 755                        break;
 756
 757                if (!btrfs_is_continuous_delayed_item(curr, next))
 758                        break;
 759        }
 760
 761        if (!nitems) {
 762                ret = 0;
 763                goto out;
 764        }
 765
 766        /*
 767         * we need allocate some memory space, but it might cause the task
 768         * to sleep, so we set all locked nodes in the path to blocking locks
 769         * first.
 770         */
 771        btrfs_set_path_blocking(path);
 772
 773        keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
 774        if (!keys) {
 775                ret = -ENOMEM;
 776                goto out;
 777        }
 778
 779        data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
 780        if (!data_size) {
 781                ret = -ENOMEM;
 782                goto error;
 783        }
 784
 785        /* get keys of all the delayed items */
 786        i = 0;
 787        list_for_each_entry(next, &head, tree_list) {
 788                keys[i] = next->key;
 789                data_size[i] = next->data_len;
 790                i++;
 791        }
 792
 793        /* reset all the locked nodes in the patch to spinning locks. */
 794        btrfs_clear_path_blocking(path, NULL, 0);
 795
 796        /* insert the keys of the items */
 797        setup_items_for_insert(root, path, keys, data_size,
 798                               total_data_size, total_size, nitems);
 799
 800        /* insert the dir index items */
 801        slot = path->slots[0];
 802        list_for_each_entry_safe(curr, next, &head, tree_list) {
 803                data_ptr = btrfs_item_ptr(leaf, slot, char);
 804                write_extent_buffer(leaf, &curr->data,
 805                                    (unsigned long)data_ptr,
 806                                    curr->data_len);
 807                slot++;
 808
 809                btrfs_delayed_item_release_metadata(root, curr);
 810
 811                list_del(&curr->tree_list);
 812                btrfs_release_delayed_item(curr);
 813        }
 814
 815error:
 816        kfree(data_size);
 817        kfree(keys);
 818out:
 819        return ret;
 820}
 821
 822/*
 823 * This helper can just do simple insertion that needn't extend item for new
 824 * data, such as directory name index insertion, inode insertion.
 825 */
 826static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 827                                     struct btrfs_root *root,
 828                                     struct btrfs_path *path,
 829                                     struct btrfs_delayed_item *delayed_item)
 830{
 831        struct extent_buffer *leaf;
 832        char *ptr;
 833        int ret;
 834
 835        ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
 836                                      delayed_item->data_len);
 837        if (ret < 0 && ret != -EEXIST)
 838                return ret;
 839
 840        leaf = path->nodes[0];
 841
 842        ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 843
 844        write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
 845                            delayed_item->data_len);
 846        btrfs_mark_buffer_dirty(leaf);
 847
 848        btrfs_delayed_item_release_metadata(root, delayed_item);
 849        return 0;
 850}
 851
 852/*
 853 * we insert an item first, then if there are some continuous items, we try
 854 * to insert those items into the same leaf.
 855 */
 856static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
 857                                      struct btrfs_path *path,
 858                                      struct btrfs_root *root,
 859                                      struct btrfs_delayed_node *node)
 860{
 861        struct btrfs_delayed_item *curr, *prev;
 862        int ret = 0;
 863
 864do_again:
 865        mutex_lock(&node->mutex);
 866        curr = __btrfs_first_delayed_insertion_item(node);
 867        if (!curr)
 868                goto insert_end;
 869
 870        ret = btrfs_insert_delayed_item(trans, root, path, curr);
 871        if (ret < 0) {
 872                btrfs_release_path(path);
 873                goto insert_end;
 874        }
 875
 876        prev = curr;
 877        curr = __btrfs_next_delayed_item(prev);
 878        if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 879                /* insert the continuous items into the same leaf */
 880                path->slots[0]++;
 881                btrfs_batch_insert_items(root, path, curr);
 882        }
 883        btrfs_release_delayed_item(prev);
 884        btrfs_mark_buffer_dirty(path->nodes[0]);
 885
 886        btrfs_release_path(path);
 887        mutex_unlock(&node->mutex);
 888        goto do_again;
 889
 890insert_end:
 891        mutex_unlock(&node->mutex);
 892        return ret;
 893}
 894
 895static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 896                                    struct btrfs_root *root,
 897                                    struct btrfs_path *path,
 898                                    struct btrfs_delayed_item *item)
 899{
 900        struct btrfs_delayed_item *curr, *next;
 901        struct extent_buffer *leaf;
 902        struct btrfs_key key;
 903        struct list_head head;
 904        int nitems, i, last_item;
 905        int ret = 0;
 906
 907        BUG_ON(!path->nodes[0]);
 908
 909        leaf = path->nodes[0];
 910
 911        i = path->slots[0];
 912        last_item = btrfs_header_nritems(leaf) - 1;
 913        if (i > last_item)
 914                return -ENOENT; /* FIXME: Is errno suitable? */
 915
 916        next = item;
 917        INIT_LIST_HEAD(&head);
 918        btrfs_item_key_to_cpu(leaf, &key, i);
 919        nitems = 0;
 920        /*
 921         * count the number of the dir index items that we can delete in batch
 922         */
 923        while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
 924                list_add_tail(&next->tree_list, &head);
 925                nitems++;
 926
 927                curr = next;
 928                next = __btrfs_next_delayed_item(curr);
 929                if (!next)
 930                        break;
 931
 932                if (!btrfs_is_continuous_delayed_item(curr, next))
 933                        break;
 934
 935                i++;
 936                if (i > last_item)
 937                        break;
 938                btrfs_item_key_to_cpu(leaf, &key, i);
 939        }
 940
 941        if (!nitems)
 942                return 0;
 943
 944        ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
 945        if (ret)
 946                goto out;
 947
 948        list_for_each_entry_safe(curr, next, &head, tree_list) {
 949                btrfs_delayed_item_release_metadata(root, curr);
 950                list_del(&curr->tree_list);
 951                btrfs_release_delayed_item(curr);
 952        }
 953
 954out:
 955        return ret;
 956}
 957
 958static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
 959                                      struct btrfs_path *path,
 960                                      struct btrfs_root *root,
 961                                      struct btrfs_delayed_node *node)
 962{
 963        struct btrfs_delayed_item *curr, *prev;
 964        int ret = 0;
 965
 966do_again:
 967        mutex_lock(&node->mutex);
 968        curr = __btrfs_first_delayed_deletion_item(node);
 969        if (!curr)
 970                goto delete_fail;
 971
 972        ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
 973        if (ret < 0)
 974                goto delete_fail;
 975        else if (ret > 0) {
 976                /*
 977                 * can't find the item which the node points to, so this node
 978                 * is invalid, just drop it.
 979                 */
 980                prev = curr;
 981                curr = __btrfs_next_delayed_item(prev);
 982                btrfs_release_delayed_item(prev);
 983                ret = 0;
 984                btrfs_release_path(path);
 985                if (curr) {
 986                        mutex_unlock(&node->mutex);
 987                        goto do_again;
 988                } else
 989                        goto delete_fail;
 990        }
 991
 992        btrfs_batch_delete_items(trans, root, path, curr);
 993        btrfs_release_path(path);
 994        mutex_unlock(&node->mutex);
 995        goto do_again;
 996
 997delete_fail:
 998        btrfs_release_path(path);
 999        mutex_unlock(&node->mutex);
1000        return ret;
1001}
1002
1003static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1004{
1005        struct btrfs_delayed_root *delayed_root;
1006
1007        if (delayed_node &&
1008            test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1009                BUG_ON(!delayed_node->root);
1010                clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1011                delayed_node->count--;
1012
1013                delayed_root = delayed_node->root->fs_info->delayed_root;
1014                finish_one_item(delayed_root);
1015        }
1016}
1017
1018static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1019{
1020        struct btrfs_delayed_root *delayed_root;
1021
1022        ASSERT(delayed_node->root);
1023        clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1024        delayed_node->count--;
1025
1026        delayed_root = delayed_node->root->fs_info->delayed_root;
1027        finish_one_item(delayed_root);
1028}
1029
1030static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1031                                        struct btrfs_root *root,
1032                                        struct btrfs_path *path,
1033                                        struct btrfs_delayed_node *node)
1034{
1035        struct btrfs_key key;
1036        struct btrfs_inode_item *inode_item;
1037        struct extent_buffer *leaf;
1038        int mod;
1039        int ret;
1040
1041        key.objectid = node->inode_id;
1042        btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1043        key.offset = 0;
1044
1045        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1046                mod = -1;
1047        else
1048                mod = 1;
1049
1050        ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1051        if (ret > 0) {
1052                btrfs_release_path(path);
1053                return -ENOENT;
1054        } else if (ret < 0) {
1055                return ret;
1056        }
1057
1058        leaf = path->nodes[0];
1059        inode_item = btrfs_item_ptr(leaf, path->slots[0],
1060                                    struct btrfs_inode_item);
1061        write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1062                            sizeof(struct btrfs_inode_item));
1063        btrfs_mark_buffer_dirty(leaf);
1064
1065        if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1066                goto no_iref;
1067
1068        path->slots[0]++;
1069        if (path->slots[0] >= btrfs_header_nritems(leaf))
1070                goto search;
1071again:
1072        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1073        if (key.objectid != node->inode_id)
1074                goto out;
1075
1076        if (key.type != BTRFS_INODE_REF_KEY &&
1077            key.type != BTRFS_INODE_EXTREF_KEY)
1078                goto out;
1079
1080        /*
1081         * Delayed iref deletion is for the inode who has only one link,
1082         * so there is only one iref. The case that several irefs are
1083         * in the same item doesn't exist.
1084         */
1085        btrfs_del_item(trans, root, path);
1086out:
1087        btrfs_release_delayed_iref(node);
1088no_iref:
1089        btrfs_release_path(path);
1090err_out:
1091        btrfs_delayed_inode_release_metadata(root, node);
1092        btrfs_release_delayed_inode(node);
1093
1094        return ret;
1095
1096search:
1097        btrfs_release_path(path);
1098
1099        btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
1100        key.offset = -1;
1101        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1102        if (ret < 0)
1103                goto err_out;
1104        ASSERT(ret);
1105
1106        ret = 0;
1107        leaf = path->nodes[0];
1108        path->slots[0]--;
1109        goto again;
1110}
1111
1112static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1113                                             struct btrfs_root *root,
1114                                             struct btrfs_path *path,
1115                                             struct btrfs_delayed_node *node)
1116{
1117        int ret;
1118
1119        mutex_lock(&node->mutex);
1120        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1121                mutex_unlock(&node->mutex);
1122                return 0;
1123        }
1124
1125        ret = __btrfs_update_delayed_inode(trans, root, path, node);
1126        mutex_unlock(&node->mutex);
1127        return ret;
1128}
1129
1130static inline int
1131__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1132                                   struct btrfs_path *path,
1133                                   struct btrfs_delayed_node *node)
1134{
1135        int ret;
1136
1137        ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1138        if (ret)
1139                return ret;
1140
1141        ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1142        if (ret)
1143                return ret;
1144
1145        ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1146        return ret;
1147}
1148
1149/*
1150 * Called when committing the transaction.
1151 * Returns 0 on success.
1152 * Returns < 0 on error and returns with an aborted transaction with any
1153 * outstanding delayed items cleaned up.
1154 */
1155static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1156                                     struct btrfs_root *root, int nr)
1157{
1158        struct btrfs_delayed_root *delayed_root;
1159        struct btrfs_delayed_node *curr_node, *prev_node;
1160        struct btrfs_path *path;
1161        struct btrfs_block_rsv *block_rsv;
1162        int ret = 0;
1163        bool count = (nr > 0);
1164
1165        if (trans->aborted)
1166                return -EIO;
1167
1168        path = btrfs_alloc_path();
1169        if (!path)
1170                return -ENOMEM;
1171        path->leave_spinning = 1;
1172
1173        block_rsv = trans->block_rsv;
1174        trans->block_rsv = &root->fs_info->delayed_block_rsv;
1175
1176        delayed_root = btrfs_get_delayed_root(root);
1177
1178        curr_node = btrfs_first_delayed_node(delayed_root);
1179        while (curr_node && (!count || (count && nr--))) {
1180                ret = __btrfs_commit_inode_delayed_items(trans, path,
1181                                                         curr_node);
1182                if (ret) {
1183                        btrfs_release_delayed_node(curr_node);
1184                        curr_node = NULL;
1185                        btrfs_abort_transaction(trans, root, ret);
1186                        break;
1187                }
1188
1189                prev_node = curr_node;
1190                curr_node = btrfs_next_delayed_node(curr_node);
1191                btrfs_release_delayed_node(prev_node);
1192        }
1193
1194        if (curr_node)
1195                btrfs_release_delayed_node(curr_node);
1196        btrfs_free_path(path);
1197        trans->block_rsv = block_rsv;
1198
1199        return ret;
1200}
1201
1202int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1203                            struct btrfs_root *root)
1204{
1205        return __btrfs_run_delayed_items(trans, root, -1);
1206}
1207
1208int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1209                               struct btrfs_root *root, int nr)
1210{
1211        return __btrfs_run_delayed_items(trans, root, nr);
1212}
1213
1214int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1215                                     struct inode *inode)
1216{
1217        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1218        struct btrfs_path *path;
1219        struct btrfs_block_rsv *block_rsv;
1220        int ret;
1221
1222        if (!delayed_node)
1223                return 0;
1224
1225        mutex_lock(&delayed_node->mutex);
1226        if (!delayed_node->count) {
1227                mutex_unlock(&delayed_node->mutex);
1228                btrfs_release_delayed_node(delayed_node);
1229                return 0;
1230        }
1231        mutex_unlock(&delayed_node->mutex);
1232
1233        path = btrfs_alloc_path();
1234        if (!path) {
1235                btrfs_release_delayed_node(delayed_node);
1236                return -ENOMEM;
1237        }
1238        path->leave_spinning = 1;
1239
1240        block_rsv = trans->block_rsv;
1241        trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1242
1243        ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1244
1245        btrfs_release_delayed_node(delayed_node);
1246        btrfs_free_path(path);
1247        trans->block_rsv = block_rsv;
1248
1249        return ret;
1250}
1251
1252int btrfs_commit_inode_delayed_inode(struct inode *inode)
1253{
1254        struct btrfs_trans_handle *trans;
1255        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1256        struct btrfs_path *path;
1257        struct btrfs_block_rsv *block_rsv;
1258        int ret;
1259
1260        if (!delayed_node)
1261                return 0;
1262
1263        mutex_lock(&delayed_node->mutex);
1264        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1265                mutex_unlock(&delayed_node->mutex);
1266                btrfs_release_delayed_node(delayed_node);
1267                return 0;
1268        }
1269        mutex_unlock(&delayed_node->mutex);
1270
1271        trans = btrfs_join_transaction(delayed_node->root);
1272        if (IS_ERR(trans)) {
1273                ret = PTR_ERR(trans);
1274                goto out;
1275        }
1276
1277        path = btrfs_alloc_path();
1278        if (!path) {
1279                ret = -ENOMEM;
1280                goto trans_out;
1281        }
1282        path->leave_spinning = 1;
1283
1284        block_rsv = trans->block_rsv;
1285        trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1286
1287        mutex_lock(&delayed_node->mutex);
1288        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1289                ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1290                                                   path, delayed_node);
1291        else
1292                ret = 0;
1293        mutex_unlock(&delayed_node->mutex);
1294
1295        btrfs_free_path(path);
1296        trans->block_rsv = block_rsv;
1297trans_out:
1298        btrfs_end_transaction(trans, delayed_node->root);
1299        btrfs_btree_balance_dirty(delayed_node->root);
1300out:
1301        btrfs_release_delayed_node(delayed_node);
1302
1303        return ret;
1304}
1305
1306void btrfs_remove_delayed_node(struct inode *inode)
1307{
1308        struct btrfs_delayed_node *delayed_node;
1309
1310        delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1311        if (!delayed_node)
1312                return;
1313
1314        BTRFS_I(inode)->delayed_node = NULL;
1315        btrfs_release_delayed_node(delayed_node);
1316}
1317
1318struct btrfs_async_delayed_work {
1319        struct btrfs_delayed_root *delayed_root;
1320        int nr;
1321        struct btrfs_work work;
1322};
1323
1324static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1325{
1326        struct btrfs_async_delayed_work *async_work;
1327        struct btrfs_delayed_root *delayed_root;
1328        struct btrfs_trans_handle *trans;
1329        struct btrfs_path *path;
1330        struct btrfs_delayed_node *delayed_node = NULL;
1331        struct btrfs_root *root;
1332        struct btrfs_block_rsv *block_rsv;
1333        int total_done = 0;
1334
1335        async_work = container_of(work, struct btrfs_async_delayed_work, work);
1336        delayed_root = async_work->delayed_root;
1337
1338        path = btrfs_alloc_path();
1339        if (!path)
1340                goto out;
1341
1342again:
1343        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1344                goto free_path;
1345
1346        delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1347        if (!delayed_node)
1348                goto free_path;
1349
1350        path->leave_spinning = 1;
1351        root = delayed_node->root;
1352
1353        trans = btrfs_join_transaction(root);
1354        if (IS_ERR(trans))
1355                goto release_path;
1356
1357        block_rsv = trans->block_rsv;
1358        trans->block_rsv = &root->fs_info->delayed_block_rsv;
1359
1360        __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1361
1362        trans->block_rsv = block_rsv;
1363        btrfs_end_transaction(trans, root);
1364        btrfs_btree_balance_dirty_nodelay(root);
1365
1366release_path:
1367        btrfs_release_path(path);
1368        total_done++;
1369
1370        btrfs_release_prepared_delayed_node(delayed_node);
1371        if (async_work->nr == 0 || total_done < async_work->nr)
1372                goto again;
1373
1374free_path:
1375        btrfs_free_path(path);
1376out:
1377        wake_up(&delayed_root->wait);
1378        kfree(async_work);
1379}
1380
1381
1382static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1383                                     struct btrfs_root *root, int nr)
1384{
1385        struct btrfs_async_delayed_work *async_work;
1386
1387        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1388                return 0;
1389
1390        async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1391        if (!async_work)
1392                return -ENOMEM;
1393
1394        async_work->delayed_root = delayed_root;
1395        btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root,
1396                        NULL, NULL);
1397        async_work->nr = nr;
1398
1399        btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work);
1400        return 0;
1401}
1402
1403void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1404{
1405        struct btrfs_delayed_root *delayed_root;
1406        delayed_root = btrfs_get_delayed_root(root);
1407        WARN_ON(btrfs_first_delayed_node(delayed_root));
1408}
1409
1410static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1411{
1412        int val = atomic_read(&delayed_root->items_seq);
1413
1414        if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1415                return 1;
1416
1417        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1418                return 1;
1419
1420        return 0;
1421}
1422
1423void btrfs_balance_delayed_items(struct btrfs_root *root)
1424{
1425        struct btrfs_delayed_root *delayed_root;
1426
1427        delayed_root = btrfs_get_delayed_root(root);
1428
1429        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1430                return;
1431
1432        if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1433                int seq;
1434                int ret;
1435
1436                seq = atomic_read(&delayed_root->items_seq);
1437
1438                ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
1439                if (ret)
1440                        return;
1441
1442                wait_event_interruptible(delayed_root->wait,
1443                                         could_end_wait(delayed_root, seq));
1444                return;
1445        }
1446
1447        btrfs_wq_run_delayed_node(delayed_root, root, BTRFS_DELAYED_BATCH);
1448}
1449
1450/* Will return 0 or -ENOMEM */
1451int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1452                                   struct btrfs_root *root, const char *name,
1453                                   int name_len, struct inode *dir,
1454                                   struct btrfs_disk_key *disk_key, u8 type,
1455                                   u64 index)
1456{
1457        struct btrfs_delayed_node *delayed_node;
1458        struct btrfs_delayed_item *delayed_item;
1459        struct btrfs_dir_item *dir_item;
1460        int ret;
1461
1462        delayed_node = btrfs_get_or_create_delayed_node(dir);
1463        if (IS_ERR(delayed_node))
1464                return PTR_ERR(delayed_node);
1465
1466        delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1467        if (!delayed_item) {
1468                ret = -ENOMEM;
1469                goto release_node;
1470        }
1471
1472        delayed_item->key.objectid = btrfs_ino(dir);
1473        btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
1474        delayed_item->key.offset = index;
1475
1476        dir_item = (struct btrfs_dir_item *)delayed_item->data;
1477        dir_item->location = *disk_key;
1478        btrfs_set_stack_dir_transid(dir_item, trans->transid);
1479        btrfs_set_stack_dir_data_len(dir_item, 0);
1480        btrfs_set_stack_dir_name_len(dir_item, name_len);
1481        btrfs_set_stack_dir_type(dir_item, type);
1482        memcpy((char *)(dir_item + 1), name, name_len);
1483
1484        ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1485        /*
1486         * we have reserved enough space when we start a new transaction,
1487         * so reserving metadata failure is impossible
1488         */
1489        BUG_ON(ret);
1490
1491
1492        mutex_lock(&delayed_node->mutex);
1493        ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1494        if (unlikely(ret)) {
1495                btrfs_err(root->fs_info, "err add delayed dir index item(name: %.*s) "
1496                                "into the insertion tree of the delayed node"
1497                                "(root id: %llu, inode id: %llu, errno: %d)",
1498                                name_len, name, delayed_node->root->objectid,
1499                                delayed_node->inode_id, ret);
1500                BUG();
1501        }
1502        mutex_unlock(&delayed_node->mutex);
1503
1504release_node:
1505        btrfs_release_delayed_node(delayed_node);
1506        return ret;
1507}
1508
1509static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1510                                               struct btrfs_delayed_node *node,
1511                                               struct btrfs_key *key)
1512{
1513        struct btrfs_delayed_item *item;
1514
1515        mutex_lock(&node->mutex);
1516        item = __btrfs_lookup_delayed_insertion_item(node, key);
1517        if (!item) {
1518                mutex_unlock(&node->mutex);
1519                return 1;
1520        }
1521
1522        btrfs_delayed_item_release_metadata(root, item);
1523        btrfs_release_delayed_item(item);
1524        mutex_unlock(&node->mutex);
1525        return 0;
1526}
1527
1528int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1529                                   struct btrfs_root *root, struct inode *dir,
1530                                   u64 index)
1531{
1532        struct btrfs_delayed_node *node;
1533        struct btrfs_delayed_item *item;
1534        struct btrfs_key item_key;
1535        int ret;
1536
1537        node = btrfs_get_or_create_delayed_node(dir);
1538        if (IS_ERR(node))
1539                return PTR_ERR(node);
1540
1541        item_key.objectid = btrfs_ino(dir);
1542        btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
1543        item_key.offset = index;
1544
1545        ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1546        if (!ret)
1547                goto end;
1548
1549        item = btrfs_alloc_delayed_item(0);
1550        if (!item) {
1551                ret = -ENOMEM;
1552                goto end;
1553        }
1554
1555        item->key = item_key;
1556
1557        ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1558        /*
1559         * we have reserved enough space when we start a new transaction,
1560         * so reserving metadata failure is impossible.
1561         */
1562        BUG_ON(ret);
1563
1564        mutex_lock(&node->mutex);
1565        ret = __btrfs_add_delayed_deletion_item(node, item);
1566        if (unlikely(ret)) {
1567                btrfs_err(root->fs_info, "err add delayed dir index item(index: %llu) "
1568                                "into the deletion tree of the delayed node"
1569                                "(root id: %llu, inode id: %llu, errno: %d)",
1570                                index, node->root->objectid, node->inode_id,
1571                                ret);
1572                BUG();
1573        }
1574        mutex_unlock(&node->mutex);
1575end:
1576        btrfs_release_delayed_node(node);
1577        return ret;
1578}
1579
1580int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1581{
1582        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1583
1584        if (!delayed_node)
1585                return -ENOENT;
1586
1587        /*
1588         * Since we have held i_mutex of this directory, it is impossible that
1589         * a new directory index is added into the delayed node and index_cnt
1590         * is updated now. So we needn't lock the delayed node.
1591         */
1592        if (!delayed_node->index_cnt) {
1593                btrfs_release_delayed_node(delayed_node);
1594                return -EINVAL;
1595        }
1596
1597        BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1598        btrfs_release_delayed_node(delayed_node);
1599        return 0;
1600}
1601
1602void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1603                             struct list_head *del_list)
1604{
1605        struct btrfs_delayed_node *delayed_node;
1606        struct btrfs_delayed_item *item;
1607
1608        delayed_node = btrfs_get_delayed_node(inode);
1609        if (!delayed_node)
1610                return;
1611
1612        mutex_lock(&delayed_node->mutex);
1613        item = __btrfs_first_delayed_insertion_item(delayed_node);
1614        while (item) {
1615                atomic_inc(&item->refs);
1616                list_add_tail(&item->readdir_list, ins_list);
1617                item = __btrfs_next_delayed_item(item);
1618        }
1619
1620        item = __btrfs_first_delayed_deletion_item(delayed_node);
1621        while (item) {
1622                atomic_inc(&item->refs);
1623                list_add_tail(&item->readdir_list, del_list);
1624                item = __btrfs_next_delayed_item(item);
1625        }
1626        mutex_unlock(&delayed_node->mutex);
1627        /*
1628         * This delayed node is still cached in the btrfs inode, so refs
1629         * must be > 1 now, and we needn't check it is going to be freed
1630         * or not.
1631         *
1632         * Besides that, this function is used to read dir, we do not
1633         * insert/delete delayed items in this period. So we also needn't
1634         * requeue or dequeue this delayed node.
1635         */
1636        atomic_dec(&delayed_node->refs);
1637}
1638
1639void btrfs_put_delayed_items(struct list_head *ins_list,
1640                             struct list_head *del_list)
1641{
1642        struct btrfs_delayed_item *curr, *next;
1643
1644        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1645                list_del(&curr->readdir_list);
1646                if (atomic_dec_and_test(&curr->refs))
1647                        kfree(curr);
1648        }
1649
1650        list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1651                list_del(&curr->readdir_list);
1652                if (atomic_dec_and_test(&curr->refs))
1653                        kfree(curr);
1654        }
1655}
1656
1657int btrfs_should_delete_dir_index(struct list_head *del_list,
1658                                  u64 index)
1659{
1660        struct btrfs_delayed_item *curr, *next;
1661        int ret;
1662
1663        if (list_empty(del_list))
1664                return 0;
1665
1666        list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1667                if (curr->key.offset > index)
1668                        break;
1669
1670                list_del(&curr->readdir_list);
1671                ret = (curr->key.offset == index);
1672
1673                if (atomic_dec_and_test(&curr->refs))
1674                        kfree(curr);
1675
1676                if (ret)
1677                        return 1;
1678                else
1679                        continue;
1680        }
1681        return 0;
1682}
1683
1684/*
1685 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1686 *
1687 */
1688int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1689                                    struct list_head *ins_list)
1690{
1691        struct btrfs_dir_item *di;
1692        struct btrfs_delayed_item *curr, *next;
1693        struct btrfs_key location;
1694        char *name;
1695        int name_len;
1696        int over = 0;
1697        unsigned char d_type;
1698
1699        if (list_empty(ins_list))
1700                return 0;
1701
1702        /*
1703         * Changing the data of the delayed item is impossible. So
1704         * we needn't lock them. And we have held i_mutex of the
1705         * directory, nobody can delete any directory indexes now.
1706         */
1707        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1708                list_del(&curr->readdir_list);
1709
1710                if (curr->key.offset < ctx->pos) {
1711                        if (atomic_dec_and_test(&curr->refs))
1712                                kfree(curr);
1713                        continue;
1714                }
1715
1716                ctx->pos = curr->key.offset;
1717
1718                di = (struct btrfs_dir_item *)curr->data;
1719                name = (char *)(di + 1);
1720                name_len = btrfs_stack_dir_name_len(di);
1721
1722                d_type = btrfs_filetype_table[di->type];
1723                btrfs_disk_key_to_cpu(&location, &di->location);
1724
1725                over = !dir_emit(ctx, name, name_len,
1726                               location.objectid, d_type);
1727
1728                if (atomic_dec_and_test(&curr->refs))
1729                        kfree(curr);
1730
1731                if (over)
1732                        return 1;
1733        }
1734        return 0;
1735}
1736
1737static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1738                                  struct btrfs_inode_item *inode_item,
1739                                  struct inode *inode)
1740{
1741        btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1742        btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1743        btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1744        btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1745        btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1746        btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1747        btrfs_set_stack_inode_generation(inode_item,
1748                                         BTRFS_I(inode)->generation);
1749        btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1750        btrfs_set_stack_inode_transid(inode_item, trans->transid);
1751        btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1752        btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1753        btrfs_set_stack_inode_block_group(inode_item, 0);
1754
1755        btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1756                                     inode->i_atime.tv_sec);
1757        btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
1758                                      inode->i_atime.tv_nsec);
1759
1760        btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
1761                                     inode->i_mtime.tv_sec);
1762        btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
1763                                      inode->i_mtime.tv_nsec);
1764
1765        btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
1766                                     inode->i_ctime.tv_sec);
1767        btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
1768                                      inode->i_ctime.tv_nsec);
1769}
1770
1771int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1772{
1773        struct btrfs_delayed_node *delayed_node;
1774        struct btrfs_inode_item *inode_item;
1775        struct btrfs_timespec *tspec;
1776
1777        delayed_node = btrfs_get_delayed_node(inode);
1778        if (!delayed_node)
1779                return -ENOENT;
1780
1781        mutex_lock(&delayed_node->mutex);
1782        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1783                mutex_unlock(&delayed_node->mutex);
1784                btrfs_release_delayed_node(delayed_node);
1785                return -ENOENT;
1786        }
1787
1788        inode_item = &delayed_node->inode_item;
1789
1790        i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1791        i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1792        btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1793        inode->i_mode = btrfs_stack_inode_mode(inode_item);
1794        set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1795        inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1796        BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1797        inode->i_version = btrfs_stack_inode_sequence(inode_item);
1798        inode->i_rdev = 0;
1799        *rdev = btrfs_stack_inode_rdev(inode_item);
1800        BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1801
1802        tspec = btrfs_inode_atime(inode_item);
1803        inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
1804        inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1805
1806        tspec = btrfs_inode_mtime(inode_item);
1807        inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
1808        inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1809
1810        tspec = btrfs_inode_ctime(inode_item);
1811        inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
1812        inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1813
1814        inode->i_generation = BTRFS_I(inode)->generation;
1815        BTRFS_I(inode)->index_cnt = (u64)-1;
1816
1817        mutex_unlock(&delayed_node->mutex);
1818        btrfs_release_delayed_node(delayed_node);
1819        return 0;
1820}
1821
1822int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1823                               struct btrfs_root *root, struct inode *inode)
1824{
1825        struct btrfs_delayed_node *delayed_node;
1826        int ret = 0;
1827
1828        delayed_node = btrfs_get_or_create_delayed_node(inode);
1829        if (IS_ERR(delayed_node))
1830                return PTR_ERR(delayed_node);
1831
1832        mutex_lock(&delayed_node->mutex);
1833        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1834                fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1835                goto release_node;
1836        }
1837
1838        ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1839                                                   delayed_node);
1840        if (ret)
1841                goto release_node;
1842
1843        fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1844        set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1845        delayed_node->count++;
1846        atomic_inc(&root->fs_info->delayed_root->items);
1847release_node:
1848        mutex_unlock(&delayed_node->mutex);
1849        btrfs_release_delayed_node(delayed_node);
1850        return ret;
1851}
1852
1853int btrfs_delayed_delete_inode_ref(struct inode *inode)
1854{
1855        struct btrfs_delayed_node *delayed_node;
1856
1857        delayed_node = btrfs_get_or_create_delayed_node(inode);
1858        if (IS_ERR(delayed_node))
1859                return PTR_ERR(delayed_node);
1860
1861        /*
1862         * We don't reserve space for inode ref deletion is because:
1863         * - We ONLY do async inode ref deletion for the inode who has only
1864         *   one link(i_nlink == 1), it means there is only one inode ref.
1865         *   And in most case, the inode ref and the inode item are in the
1866         *   same leaf, and we will deal with them at the same time.
1867         *   Since we are sure we will reserve the space for the inode item,
1868         *   it is unnecessary to reserve space for inode ref deletion.
1869         * - If the inode ref and the inode item are not in the same leaf,
1870         *   We also needn't worry about enospc problem, because we reserve
1871         *   much more space for the inode update than it needs.
1872         * - At the worst, we can steal some space from the global reservation.
1873         *   It is very rare.
1874         */
1875        mutex_lock(&delayed_node->mutex);
1876        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1877                goto release_node;
1878
1879        set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1880        delayed_node->count++;
1881        atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items);
1882release_node:
1883        mutex_unlock(&delayed_node->mutex);
1884        btrfs_release_delayed_node(delayed_node);
1885        return 0;
1886}
1887
1888static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1889{
1890        struct btrfs_root *root = delayed_node->root;
1891        struct btrfs_delayed_item *curr_item, *prev_item;
1892
1893        mutex_lock(&delayed_node->mutex);
1894        curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1895        while (curr_item) {
1896                btrfs_delayed_item_release_metadata(root, curr_item);
1897                prev_item = curr_item;
1898                curr_item = __btrfs_next_delayed_item(prev_item);
1899                btrfs_release_delayed_item(prev_item);
1900        }
1901
1902        curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1903        while (curr_item) {
1904                btrfs_delayed_item_release_metadata(root, curr_item);
1905                prev_item = curr_item;
1906                curr_item = __btrfs_next_delayed_item(prev_item);
1907                btrfs_release_delayed_item(prev_item);
1908        }
1909
1910        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1911                btrfs_release_delayed_iref(delayed_node);
1912
1913        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1914                btrfs_delayed_inode_release_metadata(root, delayed_node);
1915                btrfs_release_delayed_inode(delayed_node);
1916        }
1917        mutex_unlock(&delayed_node->mutex);
1918}
1919
1920void btrfs_kill_delayed_inode_items(struct inode *inode)
1921{
1922        struct btrfs_delayed_node *delayed_node;
1923
1924        delayed_node = btrfs_get_delayed_node(inode);
1925        if (!delayed_node)
1926                return;
1927
1928        __btrfs_kill_delayed_node(delayed_node);
1929        btrfs_release_delayed_node(delayed_node);
1930}
1931
1932void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1933{
1934        u64 inode_id = 0;
1935        struct btrfs_delayed_node *delayed_nodes[8];
1936        int i, n;
1937
1938        while (1) {
1939                spin_lock(&root->inode_lock);
1940                n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1941                                           (void **)delayed_nodes, inode_id,
1942                                           ARRAY_SIZE(delayed_nodes));
1943                if (!n) {
1944                        spin_unlock(&root->inode_lock);
1945                        break;
1946                }
1947
1948                inode_id = delayed_nodes[n - 1]->inode_id + 1;
1949
1950                for (i = 0; i < n; i++)
1951                        atomic_inc(&delayed_nodes[i]->refs);
1952                spin_unlock(&root->inode_lock);
1953
1954                for (i = 0; i < n; i++) {
1955                        __btrfs_kill_delayed_node(delayed_nodes[i]);
1956                        btrfs_release_delayed_node(delayed_nodes[i]);
1957                }
1958        }
1959}
1960
1961void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
1962{
1963        struct btrfs_delayed_root *delayed_root;
1964        struct btrfs_delayed_node *curr_node, *prev_node;
1965
1966        delayed_root = btrfs_get_delayed_root(root);
1967
1968        curr_node = btrfs_first_delayed_node(delayed_root);
1969        while (curr_node) {
1970                __btrfs_kill_delayed_node(curr_node);
1971
1972                prev_node = curr_node;
1973                curr_node = btrfs_next_delayed_node(curr_node);
1974                btrfs_release_delayed_node(prev_node);
1975        }
1976}
1977
1978