linux/fs/btrfs/delayed-inode.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011 Fujitsu.  All rights reserved.
   3 * Written by Miao Xie <miaox@cn.fujitsu.com>
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public
   7 * License v2 as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  12 * General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public
  15 * License along with this program; if not, write to the
  16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17 * Boston, MA 021110-1307, USA.
  18 */
  19
  20#include <linux/slab.h>
  21#include "delayed-inode.h"
  22#include "disk-io.h"
  23#include "transaction.h"
  24#include "ctree.h"
  25
  26#define BTRFS_DELAYED_WRITEBACK         512
  27#define BTRFS_DELAYED_BACKGROUND        128
  28#define BTRFS_DELAYED_BATCH             16
  29
  30static struct kmem_cache *delayed_node_cache;
  31
  32int __init btrfs_delayed_inode_init(void)
  33{
  34        delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  35                                        sizeof(struct btrfs_delayed_node),
  36                                        0,
  37                                        SLAB_MEM_SPREAD,
  38                                        NULL);
  39        if (!delayed_node_cache)
  40                return -ENOMEM;
  41        return 0;
  42}
  43
  44void btrfs_delayed_inode_exit(void)
  45{
  46        kmem_cache_destroy(delayed_node_cache);
  47}
  48
  49static inline void btrfs_init_delayed_node(
  50                                struct btrfs_delayed_node *delayed_node,
  51                                struct btrfs_root *root, u64 inode_id)
  52{
  53        delayed_node->root = root;
  54        delayed_node->inode_id = inode_id;
  55        atomic_set(&delayed_node->refs, 0);
  56        delayed_node->ins_root = RB_ROOT;
  57        delayed_node->del_root = RB_ROOT;
  58        mutex_init(&delayed_node->mutex);
  59        INIT_LIST_HEAD(&delayed_node->n_list);
  60        INIT_LIST_HEAD(&delayed_node->p_list);
  61}
  62
  63static inline int btrfs_is_continuous_delayed_item(
  64                                        struct btrfs_delayed_item *item1,
  65                                        struct btrfs_delayed_item *item2)
  66{
  67        if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  68            item1->key.objectid == item2->key.objectid &&
  69            item1->key.type == item2->key.type &&
  70            item1->key.offset + 1 == item2->key.offset)
  71                return 1;
  72        return 0;
  73}
  74
  75static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
  76                                                        struct btrfs_root *root)
  77{
  78        return root->fs_info->delayed_root;
  79}
  80
  81static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
  82{
  83        struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
  84        struct btrfs_root *root = btrfs_inode->root;
  85        u64 ino = btrfs_ino(inode);
  86        struct btrfs_delayed_node *node;
  87
  88        node = ACCESS_ONCE(btrfs_inode->delayed_node);
  89        if (node) {
  90                atomic_inc(&node->refs);
  91                return node;
  92        }
  93
  94        spin_lock(&root->inode_lock);
  95        node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
  96        if (node) {
  97                if (btrfs_inode->delayed_node) {
  98                        atomic_inc(&node->refs);        /* can be accessed */
  99                        BUG_ON(btrfs_inode->delayed_node != node);
 100                        spin_unlock(&root->inode_lock);
 101                        return node;
 102                }
 103                btrfs_inode->delayed_node = node;
 104                /* can be accessed and cached in the inode */
 105                atomic_add(2, &node->refs);
 106                spin_unlock(&root->inode_lock);
 107                return node;
 108        }
 109        spin_unlock(&root->inode_lock);
 110
 111        return NULL;
 112}
 113
 114/* Will return either the node or PTR_ERR(-ENOMEM) */
 115static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 116                                                        struct inode *inode)
 117{
 118        struct btrfs_delayed_node *node;
 119        struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
 120        struct btrfs_root *root = btrfs_inode->root;
 121        u64 ino = btrfs_ino(inode);
 122        int ret;
 123
 124again:
 125        node = btrfs_get_delayed_node(inode);
 126        if (node)
 127                return node;
 128
 129        node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
 130        if (!node)
 131                return ERR_PTR(-ENOMEM);
 132        btrfs_init_delayed_node(node, root, ino);
 133
 134        /* cached in the btrfs inode and can be accessed */
 135        atomic_add(2, &node->refs);
 136
 137        ret = radix_tree_preload(GFP_NOFS);
 138        if (ret) {
 139                kmem_cache_free(delayed_node_cache, node);
 140                return ERR_PTR(ret);
 141        }
 142
 143        spin_lock(&root->inode_lock);
 144        ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
 145        if (ret == -EEXIST) {
 146                spin_unlock(&root->inode_lock);
 147                kmem_cache_free(delayed_node_cache, node);
 148                radix_tree_preload_end();
 149                goto again;
 150        }
 151        btrfs_inode->delayed_node = node;
 152        spin_unlock(&root->inode_lock);
 153        radix_tree_preload_end();
 154
 155        return node;
 156}
 157
 158/*
 159 * Call it when holding delayed_node->mutex
 160 *
 161 * If mod = 1, add this node into the prepared list.
 162 */
 163static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
 164                                     struct btrfs_delayed_node *node,
 165                                     int mod)
 166{
 167        spin_lock(&root->lock);
 168        if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 169                if (!list_empty(&node->p_list))
 170                        list_move_tail(&node->p_list, &root->prepare_list);
 171                else if (mod)
 172                        list_add_tail(&node->p_list, &root->prepare_list);
 173        } else {
 174                list_add_tail(&node->n_list, &root->node_list);
 175                list_add_tail(&node->p_list, &root->prepare_list);
 176                atomic_inc(&node->refs);        /* inserted into list */
 177                root->nodes++;
 178                set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 179        }
 180        spin_unlock(&root->lock);
 181}
 182
 183/* Call it when holding delayed_node->mutex */
 184static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
 185                                       struct btrfs_delayed_node *node)
 186{
 187        spin_lock(&root->lock);
 188        if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 189                root->nodes--;
 190                atomic_dec(&node->refs);        /* not in the list */
 191                list_del_init(&node->n_list);
 192                if (!list_empty(&node->p_list))
 193                        list_del_init(&node->p_list);
 194                clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 195        }
 196        spin_unlock(&root->lock);
 197}
 198
 199static struct btrfs_delayed_node *btrfs_first_delayed_node(
 200                        struct btrfs_delayed_root *delayed_root)
 201{
 202        struct list_head *p;
 203        struct btrfs_delayed_node *node = NULL;
 204
 205        spin_lock(&delayed_root->lock);
 206        if (list_empty(&delayed_root->node_list))
 207                goto out;
 208
 209        p = delayed_root->node_list.next;
 210        node = list_entry(p, struct btrfs_delayed_node, n_list);
 211        atomic_inc(&node->refs);
 212out:
 213        spin_unlock(&delayed_root->lock);
 214
 215        return node;
 216}
 217
 218static struct btrfs_delayed_node *btrfs_next_delayed_node(
 219                                                struct btrfs_delayed_node *node)
 220{
 221        struct btrfs_delayed_root *delayed_root;
 222        struct list_head *p;
 223        struct btrfs_delayed_node *next = NULL;
 224
 225        delayed_root = node->root->fs_info->delayed_root;
 226        spin_lock(&delayed_root->lock);
 227        if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 228                /* not in the list */
 229                if (list_empty(&delayed_root->node_list))
 230                        goto out;
 231                p = delayed_root->node_list.next;
 232        } else if (list_is_last(&node->n_list, &delayed_root->node_list))
 233                goto out;
 234        else
 235                p = node->n_list.next;
 236
 237        next = list_entry(p, struct btrfs_delayed_node, n_list);
 238        atomic_inc(&next->refs);
 239out:
 240        spin_unlock(&delayed_root->lock);
 241
 242        return next;
 243}
 244
 245static void __btrfs_release_delayed_node(
 246                                struct btrfs_delayed_node *delayed_node,
 247                                int mod)
 248{
 249        struct btrfs_delayed_root *delayed_root;
 250
 251        if (!delayed_node)
 252                return;
 253
 254        delayed_root = delayed_node->root->fs_info->delayed_root;
 255
 256        mutex_lock(&delayed_node->mutex);
 257        if (delayed_node->count)
 258                btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
 259        else
 260                btrfs_dequeue_delayed_node(delayed_root, delayed_node);
 261        mutex_unlock(&delayed_node->mutex);
 262
 263        if (atomic_dec_and_test(&delayed_node->refs)) {
 264                bool free = false;
 265                struct btrfs_root *root = delayed_node->root;
 266                spin_lock(&root->inode_lock);
 267                if (atomic_read(&delayed_node->refs) == 0) {
 268                        radix_tree_delete(&root->delayed_nodes_tree,
 269                                          delayed_node->inode_id);
 270                        free = true;
 271                }
 272                spin_unlock(&root->inode_lock);
 273                if (free)
 274                        kmem_cache_free(delayed_node_cache, delayed_node);
 275        }
 276}
 277
 278static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
 279{
 280        __btrfs_release_delayed_node(node, 0);
 281}
 282
 283static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 284                                        struct btrfs_delayed_root *delayed_root)
 285{
 286        struct list_head *p;
 287        struct btrfs_delayed_node *node = NULL;
 288
 289        spin_lock(&delayed_root->lock);
 290        if (list_empty(&delayed_root->prepare_list))
 291                goto out;
 292
 293        p = delayed_root->prepare_list.next;
 294        list_del_init(p);
 295        node = list_entry(p, struct btrfs_delayed_node, p_list);
 296        atomic_inc(&node->refs);
 297out:
 298        spin_unlock(&delayed_root->lock);
 299
 300        return node;
 301}
 302
 303static inline void btrfs_release_prepared_delayed_node(
 304                                        struct btrfs_delayed_node *node)
 305{
 306        __btrfs_release_delayed_node(node, 1);
 307}
 308
 309static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 310{
 311        struct btrfs_delayed_item *item;
 312        item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
 313        if (item) {
 314                item->data_len = data_len;
 315                item->ins_or_del = 0;
 316                item->bytes_reserved = 0;
 317                item->delayed_node = NULL;
 318                atomic_set(&item->refs, 1);
 319        }
 320        return item;
 321}
 322
 323/*
 324 * __btrfs_lookup_delayed_item - look up the delayed item by key
 325 * @delayed_node: pointer to the delayed node
 326 * @key:          the key to look up
 327 * @prev:         used to store the prev item if the right item isn't found
 328 * @next:         used to store the next item if the right item isn't found
 329 *
 330 * Note: if we don't find the right item, we will return the prev item and
 331 * the next item.
 332 */
 333static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
 334                                struct rb_root *root,
 335                                struct btrfs_key *key,
 336                                struct btrfs_delayed_item **prev,
 337                                struct btrfs_delayed_item **next)
 338{
 339        struct rb_node *node, *prev_node = NULL;
 340        struct btrfs_delayed_item *delayed_item = NULL;
 341        int ret = 0;
 342
 343        node = root->rb_node;
 344
 345        while (node) {
 346                delayed_item = rb_entry(node, struct btrfs_delayed_item,
 347                                        rb_node);
 348                prev_node = node;
 349                ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
 350                if (ret < 0)
 351                        node = node->rb_right;
 352                else if (ret > 0)
 353                        node = node->rb_left;
 354                else
 355                        return delayed_item;
 356        }
 357
 358        if (prev) {
 359                if (!prev_node)
 360                        *prev = NULL;
 361                else if (ret < 0)
 362                        *prev = delayed_item;
 363                else if ((node = rb_prev(prev_node)) != NULL) {
 364                        *prev = rb_entry(node, struct btrfs_delayed_item,
 365                                         rb_node);
 366                } else
 367                        *prev = NULL;
 368        }
 369
 370        if (next) {
 371                if (!prev_node)
 372                        *next = NULL;
 373                else if (ret > 0)
 374                        *next = delayed_item;
 375                else if ((node = rb_next(prev_node)) != NULL) {
 376                        *next = rb_entry(node, struct btrfs_delayed_item,
 377                                         rb_node);
 378                } else
 379                        *next = NULL;
 380        }
 381        return NULL;
 382}
 383
 384static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 385                                        struct btrfs_delayed_node *delayed_node,
 386                                        struct btrfs_key *key)
 387{
 388        struct btrfs_delayed_item *item;
 389
 390        item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
 391                                           NULL, NULL);
 392        return item;
 393}
 394
 395static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 396                                    struct btrfs_delayed_item *ins,
 397                                    int action)
 398{
 399        struct rb_node **p, *node;
 400        struct rb_node *parent_node = NULL;
 401        struct rb_root *root;
 402        struct btrfs_delayed_item *item;
 403        int cmp;
 404
 405        if (action == BTRFS_DELAYED_INSERTION_ITEM)
 406                root = &delayed_node->ins_root;
 407        else if (action == BTRFS_DELAYED_DELETION_ITEM)
 408                root = &delayed_node->del_root;
 409        else
 410                BUG();
 411        p = &root->rb_node;
 412        node = &ins->rb_node;
 413
 414        while (*p) {
 415                parent_node = *p;
 416                item = rb_entry(parent_node, struct btrfs_delayed_item,
 417                                 rb_node);
 418
 419                cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
 420                if (cmp < 0)
 421                        p = &(*p)->rb_right;
 422                else if (cmp > 0)
 423                        p = &(*p)->rb_left;
 424                else
 425                        return -EEXIST;
 426        }
 427
 428        rb_link_node(node, parent_node, p);
 429        rb_insert_color(node, root);
 430        ins->delayed_node = delayed_node;
 431        ins->ins_or_del = action;
 432
 433        if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
 434            action == BTRFS_DELAYED_INSERTION_ITEM &&
 435            ins->key.offset >= delayed_node->index_cnt)
 436                        delayed_node->index_cnt = ins->key.offset + 1;
 437
 438        delayed_node->count++;
 439        atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
 440        return 0;
 441}
 442
 443static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
 444                                              struct btrfs_delayed_item *item)
 445{
 446        return __btrfs_add_delayed_item(node, item,
 447                                        BTRFS_DELAYED_INSERTION_ITEM);
 448}
 449
 450static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 451                                             struct btrfs_delayed_item *item)
 452{
 453        return __btrfs_add_delayed_item(node, item,
 454                                        BTRFS_DELAYED_DELETION_ITEM);
 455}
 456
 457static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 458{
 459        int seq = atomic_inc_return(&delayed_root->items_seq);
 460
 461        /*
 462         * atomic_dec_return implies a barrier for waitqueue_active
 463         */
 464        if ((atomic_dec_return(&delayed_root->items) <
 465            BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
 466            waitqueue_active(&delayed_root->wait))
 467                wake_up(&delayed_root->wait);
 468}
 469
 470static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 471{
 472        struct rb_root *root;
 473        struct btrfs_delayed_root *delayed_root;
 474
 475        delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
 476
 477        BUG_ON(!delayed_root);
 478        BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
 479               delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
 480
 481        if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
 482                root = &delayed_item->delayed_node->ins_root;
 483        else
 484                root = &delayed_item->delayed_node->del_root;
 485
 486        rb_erase(&delayed_item->rb_node, root);
 487        delayed_item->delayed_node->count--;
 488
 489        finish_one_item(delayed_root);
 490}
 491
 492static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 493{
 494        if (item) {
 495                __btrfs_remove_delayed_item(item);
 496                if (atomic_dec_and_test(&item->refs))
 497                        kfree(item);
 498        }
 499}
 500
 501static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 502                                        struct btrfs_delayed_node *delayed_node)
 503{
 504        struct rb_node *p;
 505        struct btrfs_delayed_item *item = NULL;
 506
 507        p = rb_first(&delayed_node->ins_root);
 508        if (p)
 509                item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 510
 511        return item;
 512}
 513
 514static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 515                                        struct btrfs_delayed_node *delayed_node)
 516{
 517        struct rb_node *p;
 518        struct btrfs_delayed_item *item = NULL;
 519
 520        p = rb_first(&delayed_node->del_root);
 521        if (p)
 522                item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 523
 524        return item;
 525}
 526
 527static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 528                                                struct btrfs_delayed_item *item)
 529{
 530        struct rb_node *p;
 531        struct btrfs_delayed_item *next = NULL;
 532
 533        p = rb_next(&item->rb_node);
 534        if (p)
 535                next = rb_entry(p, struct btrfs_delayed_item, rb_node);
 536
 537        return next;
 538}
 539
 540static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 541                                               struct btrfs_root *root,
 542                                               struct btrfs_delayed_item *item)
 543{
 544        struct btrfs_block_rsv *src_rsv;
 545        struct btrfs_block_rsv *dst_rsv;
 546        u64 num_bytes;
 547        int ret;
 548
 549        if (!trans->bytes_reserved)
 550                return 0;
 551
 552        src_rsv = trans->block_rsv;
 553        dst_rsv = &root->fs_info->delayed_block_rsv;
 554
 555        num_bytes = btrfs_calc_trans_metadata_size(root, 1);
 556        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
 557        if (!ret) {
 558                trace_btrfs_space_reservation(root->fs_info, "delayed_item",
 559                                              item->key.objectid,
 560                                              num_bytes, 1);
 561                item->bytes_reserved = num_bytes;
 562        }
 563
 564        return ret;
 565}
 566
 567static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
 568                                                struct btrfs_delayed_item *item)
 569{
 570        struct btrfs_block_rsv *rsv;
 571
 572        if (!item->bytes_reserved)
 573                return;
 574
 575        rsv = &root->fs_info->delayed_block_rsv;
 576        trace_btrfs_space_reservation(root->fs_info, "delayed_item",
 577                                      item->key.objectid, item->bytes_reserved,
 578                                      0);
 579        btrfs_block_rsv_release(root, rsv,
 580                                item->bytes_reserved);
 581}
 582
 583static int btrfs_delayed_inode_reserve_metadata(
 584                                        struct btrfs_trans_handle *trans,
 585                                        struct btrfs_root *root,
 586                                        struct inode *inode,
 587                                        struct btrfs_delayed_node *node)
 588{
 589        struct btrfs_block_rsv *src_rsv;
 590        struct btrfs_block_rsv *dst_rsv;
 591        u64 num_bytes;
 592        int ret;
 593        bool release = false;
 594
 595        src_rsv = trans->block_rsv;
 596        dst_rsv = &root->fs_info->delayed_block_rsv;
 597
 598        num_bytes = btrfs_calc_trans_metadata_size(root, 1);
 599
 600        /*
 601         * If our block_rsv is the delalloc block reserve then check and see if
 602         * we have our extra reservation for updating the inode.  If not fall
 603         * through and try to reserve space quickly.
 604         *
 605         * We used to try and steal from the delalloc block rsv or the global
 606         * reserve, but we'd steal a full reservation, which isn't kind.  We are
 607         * here through delalloc which means we've likely just cowed down close
 608         * to the leaf that contains the inode, so we would steal less just
 609         * doing the fallback inode update, so if we do end up having to steal
 610         * from the global block rsv we hopefully only steal one or two blocks
 611         * worth which is less likely to hurt us.
 612         */
 613        if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
 614                spin_lock(&BTRFS_I(inode)->lock);
 615                if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
 616                                       &BTRFS_I(inode)->runtime_flags))
 617                        release = true;
 618                else
 619                        src_rsv = NULL;
 620                spin_unlock(&BTRFS_I(inode)->lock);
 621        }
 622
 623        /*
 624         * btrfs_dirty_inode will update the inode under btrfs_join_transaction
 625         * which doesn't reserve space for speed.  This is a problem since we
 626         * still need to reserve space for this update, so try to reserve the
 627         * space.
 628         *
 629         * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 630         * we're accounted for.
 631         */
 632        if (!src_rsv || (!trans->bytes_reserved &&
 633                         src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
 634                ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 635                                          BTRFS_RESERVE_NO_FLUSH);
 636                /*
 637                 * Since we're under a transaction reserve_metadata_bytes could
 638                 * try to commit the transaction which will make it return
 639                 * EAGAIN to make us stop the transaction we have, so return
 640                 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
 641                 */
 642                if (ret == -EAGAIN)
 643                        ret = -ENOSPC;
 644                if (!ret) {
 645                        node->bytes_reserved = num_bytes;
 646                        trace_btrfs_space_reservation(root->fs_info,
 647                                                      "delayed_inode",
 648                                                      btrfs_ino(inode),
 649                                                      num_bytes, 1);
 650                }
 651                return ret;
 652        }
 653
 654        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
 655
 656        /*
 657         * Migrate only takes a reservation, it doesn't touch the size of the
 658         * block_rsv.  This is to simplify people who don't normally have things
 659         * migrated from their block rsv.  If they go to release their
 660         * reservation, that will decrease the size as well, so if migrate
 661         * reduced size we'd end up with a negative size.  But for the
 662         * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
 663         * but we could in fact do this reserve/migrate dance several times
 664         * between the time we did the original reservation and we'd clean it
 665         * up.  So to take care of this, release the space for the meta
 666         * reservation here.  I think it may be time for a documentation page on
 667         * how block rsvs. work.
 668         */
 669        if (!ret) {
 670                trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
 671                                              btrfs_ino(inode), num_bytes, 1);
 672                node->bytes_reserved = num_bytes;
 673        }
 674
 675        if (release) {
 676                trace_btrfs_space_reservation(root->fs_info, "delalloc",
 677                                              btrfs_ino(inode), num_bytes, 0);
 678                btrfs_block_rsv_release(root, src_rsv, num_bytes);
 679        }
 680
 681        return ret;
 682}
 683
 684static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
 685                                                struct btrfs_delayed_node *node)
 686{
 687        struct btrfs_block_rsv *rsv;
 688
 689        if (!node->bytes_reserved)
 690                return;
 691
 692        rsv = &root->fs_info->delayed_block_rsv;
 693        trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
 694                                      node->inode_id, node->bytes_reserved, 0);
 695        btrfs_block_rsv_release(root, rsv,
 696                                node->bytes_reserved);
 697        node->bytes_reserved = 0;
 698}
 699
 700/*
 701 * This helper will insert some continuous items into the same leaf according
 702 * to the free space of the leaf.
 703 */
 704static int btrfs_batch_insert_items(struct btrfs_root *root,
 705                                    struct btrfs_path *path,
 706                                    struct btrfs_delayed_item *item)
 707{
 708        struct btrfs_delayed_item *curr, *next;
 709        int free_space;
 710        int total_data_size = 0, total_size = 0;
 711        struct extent_buffer *leaf;
 712        char *data_ptr;
 713        struct btrfs_key *keys;
 714        u32 *data_size;
 715        struct list_head head;
 716        int slot;
 717        int nitems;
 718        int i;
 719        int ret = 0;
 720
 721        BUG_ON(!path->nodes[0]);
 722
 723        leaf = path->nodes[0];
 724        free_space = btrfs_leaf_free_space(root, leaf);
 725        INIT_LIST_HEAD(&head);
 726
 727        next = item;
 728        nitems = 0;
 729
 730        /*
 731         * count the number of the continuous items that we can insert in batch
 732         */
 733        while (total_size + next->data_len + sizeof(struct btrfs_item) <=
 734               free_space) {
 735                total_data_size += next->data_len;
 736                total_size += next->data_len + sizeof(struct btrfs_item);
 737                list_add_tail(&next->tree_list, &head);
 738                nitems++;
 739
 740                curr = next;
 741                next = __btrfs_next_delayed_item(curr);
 742                if (!next)
 743                        break;
 744
 745                if (!btrfs_is_continuous_delayed_item(curr, next))
 746                        break;
 747        }
 748
 749        if (!nitems) {
 750                ret = 0;
 751                goto out;
 752        }
 753
 754        /*
 755         * we need allocate some memory space, but it might cause the task
 756         * to sleep, so we set all locked nodes in the path to blocking locks
 757         * first.
 758         */
 759        btrfs_set_path_blocking(path);
 760
 761        keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
 762        if (!keys) {
 763                ret = -ENOMEM;
 764                goto out;
 765        }
 766
 767        data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
 768        if (!data_size) {
 769                ret = -ENOMEM;
 770                goto error;
 771        }
 772
 773        /* get keys of all the delayed items */
 774        i = 0;
 775        list_for_each_entry(next, &head, tree_list) {
 776                keys[i] = next->key;
 777                data_size[i] = next->data_len;
 778                i++;
 779        }
 780
 781        /* reset all the locked nodes in the patch to spinning locks. */
 782        btrfs_clear_path_blocking(path, NULL, 0);
 783
 784        /* insert the keys of the items */
 785        setup_items_for_insert(root, path, keys, data_size,
 786                               total_data_size, total_size, nitems);
 787
 788        /* insert the dir index items */
 789        slot = path->slots[0];
 790        list_for_each_entry_safe(curr, next, &head, tree_list) {
 791                data_ptr = btrfs_item_ptr(leaf, slot, char);
 792                write_extent_buffer(leaf, &curr->data,
 793                                    (unsigned long)data_ptr,
 794                                    curr->data_len);
 795                slot++;
 796
 797                btrfs_delayed_item_release_metadata(root, curr);
 798
 799                list_del(&curr->tree_list);
 800                btrfs_release_delayed_item(curr);
 801        }
 802
 803error:
 804        kfree(data_size);
 805        kfree(keys);
 806out:
 807        return ret;
 808}
 809
 810/*
 811 * This helper can just do simple insertion that needn't extend item for new
 812 * data, such as directory name index insertion, inode insertion.
 813 */
 814static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 815                                     struct btrfs_root *root,
 816                                     struct btrfs_path *path,
 817                                     struct btrfs_delayed_item *delayed_item)
 818{
 819        struct extent_buffer *leaf;
 820        char *ptr;
 821        int ret;
 822
 823        ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
 824                                      delayed_item->data_len);
 825        if (ret < 0 && ret != -EEXIST)
 826                return ret;
 827
 828        leaf = path->nodes[0];
 829
 830        ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 831
 832        write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
 833                            delayed_item->data_len);
 834        btrfs_mark_buffer_dirty(leaf);
 835
 836        btrfs_delayed_item_release_metadata(root, delayed_item);
 837        return 0;
 838}
 839
 840/*
 841 * we insert an item first, then if there are some continuous items, we try
 842 * to insert those items into the same leaf.
 843 */
 844static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
 845                                      struct btrfs_path *path,
 846                                      struct btrfs_root *root,
 847                                      struct btrfs_delayed_node *node)
 848{
 849        struct btrfs_delayed_item *curr, *prev;
 850        int ret = 0;
 851
 852do_again:
 853        mutex_lock(&node->mutex);
 854        curr = __btrfs_first_delayed_insertion_item(node);
 855        if (!curr)
 856                goto insert_end;
 857
 858        ret = btrfs_insert_delayed_item(trans, root, path, curr);
 859        if (ret < 0) {
 860                btrfs_release_path(path);
 861                goto insert_end;
 862        }
 863
 864        prev = curr;
 865        curr = __btrfs_next_delayed_item(prev);
 866        if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 867                /* insert the continuous items into the same leaf */
 868                path->slots[0]++;
 869                btrfs_batch_insert_items(root, path, curr);
 870        }
 871        btrfs_release_delayed_item(prev);
 872        btrfs_mark_buffer_dirty(path->nodes[0]);
 873
 874        btrfs_release_path(path);
 875        mutex_unlock(&node->mutex);
 876        goto do_again;
 877
 878insert_end:
 879        mutex_unlock(&node->mutex);
 880        return ret;
 881}
 882
 883static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 884                                    struct btrfs_root *root,
 885                                    struct btrfs_path *path,
 886                                    struct btrfs_delayed_item *item)
 887{
 888        struct btrfs_delayed_item *curr, *next;
 889        struct extent_buffer *leaf;
 890        struct btrfs_key key;
 891        struct list_head head;
 892        int nitems, i, last_item;
 893        int ret = 0;
 894
 895        BUG_ON(!path->nodes[0]);
 896
 897        leaf = path->nodes[0];
 898
 899        i = path->slots[0];
 900        last_item = btrfs_header_nritems(leaf) - 1;
 901        if (i > last_item)
 902                return -ENOENT; /* FIXME: Is errno suitable? */
 903
 904        next = item;
 905        INIT_LIST_HEAD(&head);
 906        btrfs_item_key_to_cpu(leaf, &key, i);
 907        nitems = 0;
 908        /*
 909         * count the number of the dir index items that we can delete in batch
 910         */
 911        while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
 912                list_add_tail(&next->tree_list, &head);
 913                nitems++;
 914
 915                curr = next;
 916                next = __btrfs_next_delayed_item(curr);
 917                if (!next)
 918                        break;
 919
 920                if (!btrfs_is_continuous_delayed_item(curr, next))
 921                        break;
 922
 923                i++;
 924                if (i > last_item)
 925                        break;
 926                btrfs_item_key_to_cpu(leaf, &key, i);
 927        }
 928
 929        if (!nitems)
 930                return 0;
 931
 932        ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
 933        if (ret)
 934                goto out;
 935
 936        list_for_each_entry_safe(curr, next, &head, tree_list) {
 937                btrfs_delayed_item_release_metadata(root, curr);
 938                list_del(&curr->tree_list);
 939                btrfs_release_delayed_item(curr);
 940        }
 941
 942out:
 943        return ret;
 944}
 945
 946static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
 947                                      struct btrfs_path *path,
 948                                      struct btrfs_root *root,
 949                                      struct btrfs_delayed_node *node)
 950{
 951        struct btrfs_delayed_item *curr, *prev;
 952        int ret = 0;
 953
 954do_again:
 955        mutex_lock(&node->mutex);
 956        curr = __btrfs_first_delayed_deletion_item(node);
 957        if (!curr)
 958                goto delete_fail;
 959
 960        ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
 961        if (ret < 0)
 962                goto delete_fail;
 963        else if (ret > 0) {
 964                /*
 965                 * can't find the item which the node points to, so this node
 966                 * is invalid, just drop it.
 967                 */
 968                prev = curr;
 969                curr = __btrfs_next_delayed_item(prev);
 970                btrfs_release_delayed_item(prev);
 971                ret = 0;
 972                btrfs_release_path(path);
 973                if (curr) {
 974                        mutex_unlock(&node->mutex);
 975                        goto do_again;
 976                } else
 977                        goto delete_fail;
 978        }
 979
 980        btrfs_batch_delete_items(trans, root, path, curr);
 981        btrfs_release_path(path);
 982        mutex_unlock(&node->mutex);
 983        goto do_again;
 984
 985delete_fail:
 986        btrfs_release_path(path);
 987        mutex_unlock(&node->mutex);
 988        return ret;
 989}
 990
 991static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
 992{
 993        struct btrfs_delayed_root *delayed_root;
 994
 995        if (delayed_node &&
 996            test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 997                BUG_ON(!delayed_node->root);
 998                clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
 999                delayed_node->count--;
1000
1001                delayed_root = delayed_node->root->fs_info->delayed_root;
1002                finish_one_item(delayed_root);
1003        }
1004}
1005
1006static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1007{
1008        struct btrfs_delayed_root *delayed_root;
1009
1010        ASSERT(delayed_node->root);
1011        clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1012        delayed_node->count--;
1013
1014        delayed_root = delayed_node->root->fs_info->delayed_root;
1015        finish_one_item(delayed_root);
1016}
1017
1018static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1019                                        struct btrfs_root *root,
1020                                        struct btrfs_path *path,
1021                                        struct btrfs_delayed_node *node)
1022{
1023        struct btrfs_key key;
1024        struct btrfs_inode_item *inode_item;
1025        struct extent_buffer *leaf;
1026        int mod;
1027        int ret;
1028
1029        key.objectid = node->inode_id;
1030        key.type = BTRFS_INODE_ITEM_KEY;
1031        key.offset = 0;
1032
1033        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1034                mod = -1;
1035        else
1036                mod = 1;
1037
1038        ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1039        if (ret > 0) {
1040                btrfs_release_path(path);
1041                return -ENOENT;
1042        } else if (ret < 0) {
1043                return ret;
1044        }
1045
1046        leaf = path->nodes[0];
1047        inode_item = btrfs_item_ptr(leaf, path->slots[0],
1048                                    struct btrfs_inode_item);
1049        write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1050                            sizeof(struct btrfs_inode_item));
1051        btrfs_mark_buffer_dirty(leaf);
1052
1053        if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1054                goto no_iref;
1055
1056        path->slots[0]++;
1057        if (path->slots[0] >= btrfs_header_nritems(leaf))
1058                goto search;
1059again:
1060        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1061        if (key.objectid != node->inode_id)
1062                goto out;
1063
1064        if (key.type != BTRFS_INODE_REF_KEY &&
1065            key.type != BTRFS_INODE_EXTREF_KEY)
1066                goto out;
1067
1068        /*
1069         * Delayed iref deletion is for the inode who has only one link,
1070         * so there is only one iref. The case that several irefs are
1071         * in the same item doesn't exist.
1072         */
1073        btrfs_del_item(trans, root, path);
1074out:
1075        btrfs_release_delayed_iref(node);
1076no_iref:
1077        btrfs_release_path(path);
1078err_out:
1079        btrfs_delayed_inode_release_metadata(root, node);
1080        btrfs_release_delayed_inode(node);
1081
1082        return ret;
1083
1084search:
1085        btrfs_release_path(path);
1086
1087        key.type = BTRFS_INODE_EXTREF_KEY;
1088        key.offset = -1;
1089        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1090        if (ret < 0)
1091                goto err_out;
1092        ASSERT(ret);
1093
1094        ret = 0;
1095        leaf = path->nodes[0];
1096        path->slots[0]--;
1097        goto again;
1098}
1099
1100static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1101                                             struct btrfs_root *root,
1102                                             struct btrfs_path *path,
1103                                             struct btrfs_delayed_node *node)
1104{
1105        int ret;
1106
1107        mutex_lock(&node->mutex);
1108        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1109                mutex_unlock(&node->mutex);
1110                return 0;
1111        }
1112
1113        ret = __btrfs_update_delayed_inode(trans, root, path, node);
1114        mutex_unlock(&node->mutex);
1115        return ret;
1116}
1117
1118static inline int
1119__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1120                                   struct btrfs_path *path,
1121                                   struct btrfs_delayed_node *node)
1122{
1123        int ret;
1124
1125        ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1126        if (ret)
1127                return ret;
1128
1129        ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1130        if (ret)
1131                return ret;
1132
1133        ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1134        return ret;
1135}
1136
1137/*
1138 * Called when committing the transaction.
1139 * Returns 0 on success.
1140 * Returns < 0 on error and returns with an aborted transaction with any
1141 * outstanding delayed items cleaned up.
1142 */
1143static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1144                                     struct btrfs_root *root, int nr)
1145{
1146        struct btrfs_delayed_root *delayed_root;
1147        struct btrfs_delayed_node *curr_node, *prev_node;
1148        struct btrfs_path *path;
1149        struct btrfs_block_rsv *block_rsv;
1150        int ret = 0;
1151        bool count = (nr > 0);
1152
1153        if (trans->aborted)
1154                return -EIO;
1155
1156        path = btrfs_alloc_path();
1157        if (!path)
1158                return -ENOMEM;
1159        path->leave_spinning = 1;
1160
1161        block_rsv = trans->block_rsv;
1162        trans->block_rsv = &root->fs_info->delayed_block_rsv;
1163
1164        delayed_root = btrfs_get_delayed_root(root);
1165
1166        curr_node = btrfs_first_delayed_node(delayed_root);
1167        while (curr_node && (!count || (count && nr--))) {
1168                ret = __btrfs_commit_inode_delayed_items(trans, path,
1169                                                         curr_node);
1170                if (ret) {
1171                        btrfs_release_delayed_node(curr_node);
1172                        curr_node = NULL;
1173                        btrfs_abort_transaction(trans, ret);
1174                        break;
1175                }
1176
1177                prev_node = curr_node;
1178                curr_node = btrfs_next_delayed_node(curr_node);
1179                btrfs_release_delayed_node(prev_node);
1180        }
1181
1182        if (curr_node)
1183                btrfs_release_delayed_node(curr_node);
1184        btrfs_free_path(path);
1185        trans->block_rsv = block_rsv;
1186
1187        return ret;
1188}
1189
1190int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1191                            struct btrfs_root *root)
1192{
1193        return __btrfs_run_delayed_items(trans, root, -1);
1194}
1195
1196int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1197                               struct btrfs_root *root, int nr)
1198{
1199        return __btrfs_run_delayed_items(trans, root, nr);
1200}
1201
1202int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1203                                     struct inode *inode)
1204{
1205        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1206        struct btrfs_path *path;
1207        struct btrfs_block_rsv *block_rsv;
1208        int ret;
1209
1210        if (!delayed_node)
1211                return 0;
1212
1213        mutex_lock(&delayed_node->mutex);
1214        if (!delayed_node->count) {
1215                mutex_unlock(&delayed_node->mutex);
1216                btrfs_release_delayed_node(delayed_node);
1217                return 0;
1218        }
1219        mutex_unlock(&delayed_node->mutex);
1220
1221        path = btrfs_alloc_path();
1222        if (!path) {
1223                btrfs_release_delayed_node(delayed_node);
1224                return -ENOMEM;
1225        }
1226        path->leave_spinning = 1;
1227
1228        block_rsv = trans->block_rsv;
1229        trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1230
1231        ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1232
1233        btrfs_release_delayed_node(delayed_node);
1234        btrfs_free_path(path);
1235        trans->block_rsv = block_rsv;
1236
1237        return ret;
1238}
1239
1240int btrfs_commit_inode_delayed_inode(struct inode *inode)
1241{
1242        struct btrfs_trans_handle *trans;
1243        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1244        struct btrfs_path *path;
1245        struct btrfs_block_rsv *block_rsv;
1246        int ret;
1247
1248        if (!delayed_node)
1249                return 0;
1250
1251        mutex_lock(&delayed_node->mutex);
1252        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1253                mutex_unlock(&delayed_node->mutex);
1254                btrfs_release_delayed_node(delayed_node);
1255                return 0;
1256        }
1257        mutex_unlock(&delayed_node->mutex);
1258
1259        trans = btrfs_join_transaction(delayed_node->root);
1260        if (IS_ERR(trans)) {
1261                ret = PTR_ERR(trans);
1262                goto out;
1263        }
1264
1265        path = btrfs_alloc_path();
1266        if (!path) {
1267                ret = -ENOMEM;
1268                goto trans_out;
1269        }
1270        path->leave_spinning = 1;
1271
1272        block_rsv = trans->block_rsv;
1273        trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1274
1275        mutex_lock(&delayed_node->mutex);
1276        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1277                ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1278                                                   path, delayed_node);
1279        else
1280                ret = 0;
1281        mutex_unlock(&delayed_node->mutex);
1282
1283        btrfs_free_path(path);
1284        trans->block_rsv = block_rsv;
1285trans_out:
1286        btrfs_end_transaction(trans, delayed_node->root);
1287        btrfs_btree_balance_dirty(delayed_node->root);
1288out:
1289        btrfs_release_delayed_node(delayed_node);
1290
1291        return ret;
1292}
1293
1294void btrfs_remove_delayed_node(struct inode *inode)
1295{
1296        struct btrfs_delayed_node *delayed_node;
1297
1298        delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1299        if (!delayed_node)
1300                return;
1301
1302        BTRFS_I(inode)->delayed_node = NULL;
1303        btrfs_release_delayed_node(delayed_node);
1304}
1305
1306struct btrfs_async_delayed_work {
1307        struct btrfs_delayed_root *delayed_root;
1308        int nr;
1309        struct btrfs_work work;
1310};
1311
1312static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1313{
1314        struct btrfs_async_delayed_work *async_work;
1315        struct btrfs_delayed_root *delayed_root;
1316        struct btrfs_trans_handle *trans;
1317        struct btrfs_path *path;
1318        struct btrfs_delayed_node *delayed_node = NULL;
1319        struct btrfs_root *root;
1320        struct btrfs_block_rsv *block_rsv;
1321        int total_done = 0;
1322
1323        async_work = container_of(work, struct btrfs_async_delayed_work, work);
1324        delayed_root = async_work->delayed_root;
1325
1326        path = btrfs_alloc_path();
1327        if (!path)
1328                goto out;
1329
1330again:
1331        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1332                goto free_path;
1333
1334        delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1335        if (!delayed_node)
1336                goto free_path;
1337
1338        path->leave_spinning = 1;
1339        root = delayed_node->root;
1340
1341        trans = btrfs_join_transaction(root);
1342        if (IS_ERR(trans))
1343                goto release_path;
1344
1345        block_rsv = trans->block_rsv;
1346        trans->block_rsv = &root->fs_info->delayed_block_rsv;
1347
1348        __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1349
1350        trans->block_rsv = block_rsv;
1351        btrfs_end_transaction(trans, root);
1352        btrfs_btree_balance_dirty_nodelay(root);
1353
1354release_path:
1355        btrfs_release_path(path);
1356        total_done++;
1357
1358        btrfs_release_prepared_delayed_node(delayed_node);
1359        if (async_work->nr == 0 || total_done < async_work->nr)
1360                goto again;
1361
1362free_path:
1363        btrfs_free_path(path);
1364out:
1365        wake_up(&delayed_root->wait);
1366        kfree(async_work);
1367}
1368
1369
1370static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1371                                     struct btrfs_fs_info *fs_info, int nr)
1372{
1373        struct btrfs_async_delayed_work *async_work;
1374
1375        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1376                return 0;
1377
1378        async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1379        if (!async_work)
1380                return -ENOMEM;
1381
1382        async_work->delayed_root = delayed_root;
1383        btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1384                        btrfs_async_run_delayed_root, NULL, NULL);
1385        async_work->nr = nr;
1386
1387        btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1388        return 0;
1389}
1390
1391void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1392{
1393        struct btrfs_delayed_root *delayed_root;
1394        delayed_root = btrfs_get_delayed_root(root);
1395        WARN_ON(btrfs_first_delayed_node(delayed_root));
1396}
1397
1398static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1399{
1400        int val = atomic_read(&delayed_root->items_seq);
1401
1402        if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1403                return 1;
1404
1405        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1406                return 1;
1407
1408        return 0;
1409}
1410
1411void btrfs_balance_delayed_items(struct btrfs_root *root)
1412{
1413        struct btrfs_delayed_root *delayed_root;
1414        struct btrfs_fs_info *fs_info = root->fs_info;
1415
1416        delayed_root = btrfs_get_delayed_root(root);
1417
1418        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1419                return;
1420
1421        if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1422                int seq;
1423                int ret;
1424
1425                seq = atomic_read(&delayed_root->items_seq);
1426
1427                ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1428                if (ret)
1429                        return;
1430
1431                wait_event_interruptible(delayed_root->wait,
1432                                         could_end_wait(delayed_root, seq));
1433                return;
1434        }
1435
1436        btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1437}
1438
1439/* Will return 0 or -ENOMEM */
1440int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1441                                   struct btrfs_root *root, const char *name,
1442                                   int name_len, struct inode *dir,
1443                                   struct btrfs_disk_key *disk_key, u8 type,
1444                                   u64 index)
1445{
1446        struct btrfs_delayed_node *delayed_node;
1447        struct btrfs_delayed_item *delayed_item;
1448        struct btrfs_dir_item *dir_item;
1449        int ret;
1450
1451        delayed_node = btrfs_get_or_create_delayed_node(dir);
1452        if (IS_ERR(delayed_node))
1453                return PTR_ERR(delayed_node);
1454
1455        delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1456        if (!delayed_item) {
1457                ret = -ENOMEM;
1458                goto release_node;
1459        }
1460
1461        delayed_item->key.objectid = btrfs_ino(dir);
1462        delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1463        delayed_item->key.offset = index;
1464
1465        dir_item = (struct btrfs_dir_item *)delayed_item->data;
1466        dir_item->location = *disk_key;
1467        btrfs_set_stack_dir_transid(dir_item, trans->transid);
1468        btrfs_set_stack_dir_data_len(dir_item, 0);
1469        btrfs_set_stack_dir_name_len(dir_item, name_len);
1470        btrfs_set_stack_dir_type(dir_item, type);
1471        memcpy((char *)(dir_item + 1), name, name_len);
1472
1473        ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1474        /*
1475         * we have reserved enough space when we start a new transaction,
1476         * so reserving metadata failure is impossible
1477         */
1478        BUG_ON(ret);
1479
1480
1481        mutex_lock(&delayed_node->mutex);
1482        ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1483        if (unlikely(ret)) {
1484                btrfs_err(root->fs_info, "err add delayed dir index item(name: %.*s) "
1485                                "into the insertion tree of the delayed node"
1486                                "(root id: %llu, inode id: %llu, errno: %d)",
1487                                name_len, name, delayed_node->root->objectid,
1488                                delayed_node->inode_id, ret);
1489                BUG();
1490        }
1491        mutex_unlock(&delayed_node->mutex);
1492
1493release_node:
1494        btrfs_release_delayed_node(delayed_node);
1495        return ret;
1496}
1497
1498static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1499                                               struct btrfs_delayed_node *node,
1500                                               struct btrfs_key *key)
1501{
1502        struct btrfs_delayed_item *item;
1503
1504        mutex_lock(&node->mutex);
1505        item = __btrfs_lookup_delayed_insertion_item(node, key);
1506        if (!item) {
1507                mutex_unlock(&node->mutex);
1508                return 1;
1509        }
1510
1511        btrfs_delayed_item_release_metadata(root, item);
1512        btrfs_release_delayed_item(item);
1513        mutex_unlock(&node->mutex);
1514        return 0;
1515}
1516
1517int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1518                                   struct btrfs_root *root, struct inode *dir,
1519                                   u64 index)
1520{
1521        struct btrfs_delayed_node *node;
1522        struct btrfs_delayed_item *item;
1523        struct btrfs_key item_key;
1524        int ret;
1525
1526        node = btrfs_get_or_create_delayed_node(dir);
1527        if (IS_ERR(node))
1528                return PTR_ERR(node);
1529
1530        item_key.objectid = btrfs_ino(dir);
1531        item_key.type = BTRFS_DIR_INDEX_KEY;
1532        item_key.offset = index;
1533
1534        ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1535        if (!ret)
1536                goto end;
1537
1538        item = btrfs_alloc_delayed_item(0);
1539        if (!item) {
1540                ret = -ENOMEM;
1541                goto end;
1542        }
1543
1544        item->key = item_key;
1545
1546        ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1547        /*
1548         * we have reserved enough space when we start a new transaction,
1549         * so reserving metadata failure is impossible.
1550         */
1551        BUG_ON(ret);
1552
1553        mutex_lock(&node->mutex);
1554        ret = __btrfs_add_delayed_deletion_item(node, item);
1555        if (unlikely(ret)) {
1556                btrfs_err(root->fs_info, "err add delayed dir index item(index: %llu) "
1557                                "into the deletion tree of the delayed node"
1558                                "(root id: %llu, inode id: %llu, errno: %d)",
1559                                index, node->root->objectid, node->inode_id,
1560                                ret);
1561                BUG();
1562        }
1563        mutex_unlock(&node->mutex);
1564end:
1565        btrfs_release_delayed_node(node);
1566        return ret;
1567}
1568
1569int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1570{
1571        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1572
1573        if (!delayed_node)
1574                return -ENOENT;
1575
1576        /*
1577         * Since we have held i_mutex of this directory, it is impossible that
1578         * a new directory index is added into the delayed node and index_cnt
1579         * is updated now. So we needn't lock the delayed node.
1580         */
1581        if (!delayed_node->index_cnt) {
1582                btrfs_release_delayed_node(delayed_node);
1583                return -EINVAL;
1584        }
1585
1586        BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1587        btrfs_release_delayed_node(delayed_node);
1588        return 0;
1589}
1590
1591bool btrfs_readdir_get_delayed_items(struct inode *inode,
1592                                     struct list_head *ins_list,
1593                                     struct list_head *del_list)
1594{
1595        struct btrfs_delayed_node *delayed_node;
1596        struct btrfs_delayed_item *item;
1597
1598        delayed_node = btrfs_get_delayed_node(inode);
1599        if (!delayed_node)
1600                return false;
1601
1602        /*
1603         * We can only do one readdir with delayed items at a time because of
1604         * item->readdir_list.
1605         */
1606        inode_unlock_shared(inode);
1607        inode_lock(inode);
1608
1609        mutex_lock(&delayed_node->mutex);
1610        item = __btrfs_first_delayed_insertion_item(delayed_node);
1611        while (item) {
1612                atomic_inc(&item->refs);
1613                list_add_tail(&item->readdir_list, ins_list);
1614                item = __btrfs_next_delayed_item(item);
1615        }
1616
1617        item = __btrfs_first_delayed_deletion_item(delayed_node);
1618        while (item) {
1619                atomic_inc(&item->refs);
1620                list_add_tail(&item->readdir_list, del_list);
1621                item = __btrfs_next_delayed_item(item);
1622        }
1623        mutex_unlock(&delayed_node->mutex);
1624        /*
1625         * This delayed node is still cached in the btrfs inode, so refs
1626         * must be > 1 now, and we needn't check it is going to be freed
1627         * or not.
1628         *
1629         * Besides that, this function is used to read dir, we do not
1630         * insert/delete delayed items in this period. So we also needn't
1631         * requeue or dequeue this delayed node.
1632         */
1633        atomic_dec(&delayed_node->refs);
1634
1635        return true;
1636}
1637
1638void btrfs_readdir_put_delayed_items(struct inode *inode,
1639                                     struct list_head *ins_list,
1640                                     struct list_head *del_list)
1641{
1642        struct btrfs_delayed_item *curr, *next;
1643
1644        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1645                list_del(&curr->readdir_list);
1646                if (atomic_dec_and_test(&curr->refs))
1647                        kfree(curr);
1648        }
1649
1650        list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1651                list_del(&curr->readdir_list);
1652                if (atomic_dec_and_test(&curr->refs))
1653                        kfree(curr);
1654        }
1655
1656        /*
1657         * The VFS is going to do up_read(), so we need to downgrade back to a
1658         * read lock.
1659         */
1660        downgrade_write(&inode->i_rwsem);
1661}
1662
1663int btrfs_should_delete_dir_index(struct list_head *del_list,
1664                                  u64 index)
1665{
1666        struct btrfs_delayed_item *curr, *next;
1667        int ret;
1668
1669        if (list_empty(del_list))
1670                return 0;
1671
1672        list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1673                if (curr->key.offset > index)
1674                        break;
1675
1676                list_del(&curr->readdir_list);
1677                ret = (curr->key.offset == index);
1678
1679                if (atomic_dec_and_test(&curr->refs))
1680                        kfree(curr);
1681
1682                if (ret)
1683                        return 1;
1684                else
1685                        continue;
1686        }
1687        return 0;
1688}
1689
1690/*
1691 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1692 *
1693 */
1694int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1695                                    struct list_head *ins_list, bool *emitted)
1696{
1697        struct btrfs_dir_item *di;
1698        struct btrfs_delayed_item *curr, *next;
1699        struct btrfs_key location;
1700        char *name;
1701        int name_len;
1702        int over = 0;
1703        unsigned char d_type;
1704
1705        if (list_empty(ins_list))
1706                return 0;
1707
1708        /*
1709         * Changing the data of the delayed item is impossible. So
1710         * we needn't lock them. And we have held i_mutex of the
1711         * directory, nobody can delete any directory indexes now.
1712         */
1713        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1714                list_del(&curr->readdir_list);
1715
1716                if (curr->key.offset < ctx->pos) {
1717                        if (atomic_dec_and_test(&curr->refs))
1718                                kfree(curr);
1719                        continue;
1720                }
1721
1722                ctx->pos = curr->key.offset;
1723
1724                di = (struct btrfs_dir_item *)curr->data;
1725                name = (char *)(di + 1);
1726                name_len = btrfs_stack_dir_name_len(di);
1727
1728                d_type = btrfs_filetype_table[di->type];
1729                btrfs_disk_key_to_cpu(&location, &di->location);
1730
1731                over = !dir_emit(ctx, name, name_len,
1732                               location.objectid, d_type);
1733
1734                if (atomic_dec_and_test(&curr->refs))
1735                        kfree(curr);
1736
1737                if (over)
1738                        return 1;
1739                *emitted = true;
1740        }
1741        return 0;
1742}
1743
1744static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1745                                  struct btrfs_inode_item *inode_item,
1746                                  struct inode *inode)
1747{
1748        btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1749        btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1750        btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1751        btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1752        btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1753        btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1754        btrfs_set_stack_inode_generation(inode_item,
1755                                         BTRFS_I(inode)->generation);
1756        btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1757        btrfs_set_stack_inode_transid(inode_item, trans->transid);
1758        btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1759        btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1760        btrfs_set_stack_inode_block_group(inode_item, 0);
1761
1762        btrfs_set_stack_timespec_sec(&inode_item->atime,
1763                                     inode->i_atime.tv_sec);
1764        btrfs_set_stack_timespec_nsec(&inode_item->atime,
1765                                      inode->i_atime.tv_nsec);
1766
1767        btrfs_set_stack_timespec_sec(&inode_item->mtime,
1768                                     inode->i_mtime.tv_sec);
1769        btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1770                                      inode->i_mtime.tv_nsec);
1771
1772        btrfs_set_stack_timespec_sec(&inode_item->ctime,
1773                                     inode->i_ctime.tv_sec);
1774        btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1775                                      inode->i_ctime.tv_nsec);
1776
1777        btrfs_set_stack_timespec_sec(&inode_item->otime,
1778                                     BTRFS_I(inode)->i_otime.tv_sec);
1779        btrfs_set_stack_timespec_nsec(&inode_item->otime,
1780                                     BTRFS_I(inode)->i_otime.tv_nsec);
1781}
1782
1783int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1784{
1785        struct btrfs_delayed_node *delayed_node;
1786        struct btrfs_inode_item *inode_item;
1787
1788        delayed_node = btrfs_get_delayed_node(inode);
1789        if (!delayed_node)
1790                return -ENOENT;
1791
1792        mutex_lock(&delayed_node->mutex);
1793        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1794                mutex_unlock(&delayed_node->mutex);
1795                btrfs_release_delayed_node(delayed_node);
1796                return -ENOENT;
1797        }
1798
1799        inode_item = &delayed_node->inode_item;
1800
1801        i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1802        i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1803        btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1804        inode->i_mode = btrfs_stack_inode_mode(inode_item);
1805        set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1806        inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1807        BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1808        BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1809
1810        inode->i_version = btrfs_stack_inode_sequence(inode_item);
1811        inode->i_rdev = 0;
1812        *rdev = btrfs_stack_inode_rdev(inode_item);
1813        BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1814
1815        inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1816        inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1817
1818        inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1819        inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1820
1821        inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1822        inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1823
1824        BTRFS_I(inode)->i_otime.tv_sec =
1825                btrfs_stack_timespec_sec(&inode_item->otime);
1826        BTRFS_I(inode)->i_otime.tv_nsec =
1827                btrfs_stack_timespec_nsec(&inode_item->otime);
1828
1829        inode->i_generation = BTRFS_I(inode)->generation;
1830        BTRFS_I(inode)->index_cnt = (u64)-1;
1831
1832        mutex_unlock(&delayed_node->mutex);
1833        btrfs_release_delayed_node(delayed_node);
1834        return 0;
1835}
1836
1837int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1838                               struct btrfs_root *root, struct inode *inode)
1839{
1840        struct btrfs_delayed_node *delayed_node;
1841        int ret = 0;
1842
1843        delayed_node = btrfs_get_or_create_delayed_node(inode);
1844        if (IS_ERR(delayed_node))
1845                return PTR_ERR(delayed_node);
1846
1847        mutex_lock(&delayed_node->mutex);
1848        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1849                fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1850                goto release_node;
1851        }
1852
1853        ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1854                                                   delayed_node);
1855        if (ret)
1856                goto release_node;
1857
1858        fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1859        set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1860        delayed_node->count++;
1861        atomic_inc(&root->fs_info->delayed_root->items);
1862release_node:
1863        mutex_unlock(&delayed_node->mutex);
1864        btrfs_release_delayed_node(delayed_node);
1865        return ret;
1866}
1867
1868int btrfs_delayed_delete_inode_ref(struct inode *inode)
1869{
1870        struct btrfs_delayed_node *delayed_node;
1871
1872        /*
1873         * we don't do delayed inode updates during log recovery because it
1874         * leads to enospc problems.  This means we also can't do
1875         * delayed inode refs
1876         */
1877        if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
1878                return -EAGAIN;
1879
1880        delayed_node = btrfs_get_or_create_delayed_node(inode);
1881        if (IS_ERR(delayed_node))
1882                return PTR_ERR(delayed_node);
1883
1884        /*
1885         * We don't reserve space for inode ref deletion is because:
1886         * - We ONLY do async inode ref deletion for the inode who has only
1887         *   one link(i_nlink == 1), it means there is only one inode ref.
1888         *   And in most case, the inode ref and the inode item are in the
1889         *   same leaf, and we will deal with them at the same time.
1890         *   Since we are sure we will reserve the space for the inode item,
1891         *   it is unnecessary to reserve space for inode ref deletion.
1892         * - If the inode ref and the inode item are not in the same leaf,
1893         *   We also needn't worry about enospc problem, because we reserve
1894         *   much more space for the inode update than it needs.
1895         * - At the worst, we can steal some space from the global reservation.
1896         *   It is very rare.
1897         */
1898        mutex_lock(&delayed_node->mutex);
1899        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1900                goto release_node;
1901
1902        set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1903        delayed_node->count++;
1904        atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items);
1905release_node:
1906        mutex_unlock(&delayed_node->mutex);
1907        btrfs_release_delayed_node(delayed_node);
1908        return 0;
1909}
1910
1911static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1912{
1913        struct btrfs_root *root = delayed_node->root;
1914        struct btrfs_delayed_item *curr_item, *prev_item;
1915
1916        mutex_lock(&delayed_node->mutex);
1917        curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1918        while (curr_item) {
1919                btrfs_delayed_item_release_metadata(root, curr_item);
1920                prev_item = curr_item;
1921                curr_item = __btrfs_next_delayed_item(prev_item);
1922                btrfs_release_delayed_item(prev_item);
1923        }
1924
1925        curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1926        while (curr_item) {
1927                btrfs_delayed_item_release_metadata(root, curr_item);
1928                prev_item = curr_item;
1929                curr_item = __btrfs_next_delayed_item(prev_item);
1930                btrfs_release_delayed_item(prev_item);
1931        }
1932
1933        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1934                btrfs_release_delayed_iref(delayed_node);
1935
1936        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1937                btrfs_delayed_inode_release_metadata(root, delayed_node);
1938                btrfs_release_delayed_inode(delayed_node);
1939        }
1940        mutex_unlock(&delayed_node->mutex);
1941}
1942
1943void btrfs_kill_delayed_inode_items(struct inode *inode)
1944{
1945        struct btrfs_delayed_node *delayed_node;
1946
1947        delayed_node = btrfs_get_delayed_node(inode);
1948        if (!delayed_node)
1949                return;
1950
1951        __btrfs_kill_delayed_node(delayed_node);
1952        btrfs_release_delayed_node(delayed_node);
1953}
1954
1955void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1956{
1957        u64 inode_id = 0;
1958        struct btrfs_delayed_node *delayed_nodes[8];
1959        int i, n;
1960
1961        while (1) {
1962                spin_lock(&root->inode_lock);
1963                n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1964                                           (void **)delayed_nodes, inode_id,
1965                                           ARRAY_SIZE(delayed_nodes));
1966                if (!n) {
1967                        spin_unlock(&root->inode_lock);
1968                        break;
1969                }
1970
1971                inode_id = delayed_nodes[n - 1]->inode_id + 1;
1972
1973                for (i = 0; i < n; i++)
1974                        atomic_inc(&delayed_nodes[i]->refs);
1975                spin_unlock(&root->inode_lock);
1976
1977                for (i = 0; i < n; i++) {
1978                        __btrfs_kill_delayed_node(delayed_nodes[i]);
1979                        btrfs_release_delayed_node(delayed_nodes[i]);
1980                }
1981        }
1982}
1983
1984void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
1985{
1986        struct btrfs_delayed_root *delayed_root;
1987        struct btrfs_delayed_node *curr_node, *prev_node;
1988
1989        delayed_root = btrfs_get_delayed_root(root);
1990
1991        curr_node = btrfs_first_delayed_node(delayed_root);
1992        while (curr_node) {
1993                __btrfs_kill_delayed_node(curr_node);
1994
1995                prev_node = curr_node;
1996                curr_node = btrfs_next_delayed_node(curr_node);
1997                btrfs_release_delayed_node(prev_node);
1998        }
1999}
2000
2001