linux/fs/btrfs/delayed-inode.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011 Fujitsu.  All rights reserved.
   3 * Written by Miao Xie <miaox@cn.fujitsu.com>
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public
   7 * License v2 as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  12 * General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public
  15 * License along with this program; if not, write to the
  16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17 * Boston, MA 021110-1307, USA.
  18 */
  19
  20#include <linux/slab.h>
  21#include "delayed-inode.h"
  22#include "disk-io.h"
  23#include "transaction.h"
  24#include "ctree.h"
  25
  26#define BTRFS_DELAYED_WRITEBACK         512
  27#define BTRFS_DELAYED_BACKGROUND        128
  28#define BTRFS_DELAYED_BATCH             16
  29
  30static struct kmem_cache *delayed_node_cache;
  31
  32int __init btrfs_delayed_inode_init(void)
  33{
  34        delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  35                                        sizeof(struct btrfs_delayed_node),
  36                                        0,
  37                                        SLAB_MEM_SPREAD,
  38                                        NULL);
  39        if (!delayed_node_cache)
  40                return -ENOMEM;
  41        return 0;
  42}
  43
  44void btrfs_delayed_inode_exit(void)
  45{
  46        kmem_cache_destroy(delayed_node_cache);
  47}
  48
  49static inline void btrfs_init_delayed_node(
  50                                struct btrfs_delayed_node *delayed_node,
  51                                struct btrfs_root *root, u64 inode_id)
  52{
  53        delayed_node->root = root;
  54        delayed_node->inode_id = inode_id;
  55        refcount_set(&delayed_node->refs, 0);
  56        delayed_node->ins_root = RB_ROOT;
  57        delayed_node->del_root = RB_ROOT;
  58        mutex_init(&delayed_node->mutex);
  59        INIT_LIST_HEAD(&delayed_node->n_list);
  60        INIT_LIST_HEAD(&delayed_node->p_list);
  61}
  62
  63static inline int btrfs_is_continuous_delayed_item(
  64                                        struct btrfs_delayed_item *item1,
  65                                        struct btrfs_delayed_item *item2)
  66{
  67        if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  68            item1->key.objectid == item2->key.objectid &&
  69            item1->key.type == item2->key.type &&
  70            item1->key.offset + 1 == item2->key.offset)
  71                return 1;
  72        return 0;
  73}
  74
  75static struct btrfs_delayed_node *btrfs_get_delayed_node(
  76                struct btrfs_inode *btrfs_inode)
  77{
  78        struct btrfs_root *root = btrfs_inode->root;
  79        u64 ino = btrfs_ino(btrfs_inode);
  80        struct btrfs_delayed_node *node;
  81
  82        node = READ_ONCE(btrfs_inode->delayed_node);
  83        if (node) {
  84                refcount_inc(&node->refs);
  85                return node;
  86        }
  87
  88        spin_lock(&root->inode_lock);
  89        node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
  90        if (node) {
  91                if (btrfs_inode->delayed_node) {
  92                        refcount_inc(&node->refs);      /* can be accessed */
  93                        BUG_ON(btrfs_inode->delayed_node != node);
  94                        spin_unlock(&root->inode_lock);
  95                        return node;
  96                }
  97                btrfs_inode->delayed_node = node;
  98                /* can be accessed and cached in the inode */
  99                refcount_add(2, &node->refs);
 100                spin_unlock(&root->inode_lock);
 101                return node;
 102        }
 103        spin_unlock(&root->inode_lock);
 104
 105        return NULL;
 106}
 107
 108/* Will return either the node or PTR_ERR(-ENOMEM) */
 109static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 110                struct btrfs_inode *btrfs_inode)
 111{
 112        struct btrfs_delayed_node *node;
 113        struct btrfs_root *root = btrfs_inode->root;
 114        u64 ino = btrfs_ino(btrfs_inode);
 115        int ret;
 116
 117again:
 118        node = btrfs_get_delayed_node(btrfs_inode);
 119        if (node)
 120                return node;
 121
 122        node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
 123        if (!node)
 124                return ERR_PTR(-ENOMEM);
 125        btrfs_init_delayed_node(node, root, ino);
 126
 127        /* cached in the btrfs inode and can be accessed */
 128        refcount_set(&node->refs, 2);
 129
 130        ret = radix_tree_preload(GFP_NOFS);
 131        if (ret) {
 132                kmem_cache_free(delayed_node_cache, node);
 133                return ERR_PTR(ret);
 134        }
 135
 136        spin_lock(&root->inode_lock);
 137        ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
 138        if (ret == -EEXIST) {
 139                spin_unlock(&root->inode_lock);
 140                kmem_cache_free(delayed_node_cache, node);
 141                radix_tree_preload_end();
 142                goto again;
 143        }
 144        btrfs_inode->delayed_node = node;
 145        spin_unlock(&root->inode_lock);
 146        radix_tree_preload_end();
 147
 148        return node;
 149}
 150
 151/*
 152 * Call it when holding delayed_node->mutex
 153 *
 154 * If mod = 1, add this node into the prepared list.
 155 */
 156static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
 157                                     struct btrfs_delayed_node *node,
 158                                     int mod)
 159{
 160        spin_lock(&root->lock);
 161        if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 162                if (!list_empty(&node->p_list))
 163                        list_move_tail(&node->p_list, &root->prepare_list);
 164                else if (mod)
 165                        list_add_tail(&node->p_list, &root->prepare_list);
 166        } else {
 167                list_add_tail(&node->n_list, &root->node_list);
 168                list_add_tail(&node->p_list, &root->prepare_list);
 169                refcount_inc(&node->refs);      /* inserted into list */
 170                root->nodes++;
 171                set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 172        }
 173        spin_unlock(&root->lock);
 174}
 175
 176/* Call it when holding delayed_node->mutex */
 177static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
 178                                       struct btrfs_delayed_node *node)
 179{
 180        spin_lock(&root->lock);
 181        if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 182                root->nodes--;
 183                refcount_dec(&node->refs);      /* not in the list */
 184                list_del_init(&node->n_list);
 185                if (!list_empty(&node->p_list))
 186                        list_del_init(&node->p_list);
 187                clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 188        }
 189        spin_unlock(&root->lock);
 190}
 191
 192static struct btrfs_delayed_node *btrfs_first_delayed_node(
 193                        struct btrfs_delayed_root *delayed_root)
 194{
 195        struct list_head *p;
 196        struct btrfs_delayed_node *node = NULL;
 197
 198        spin_lock(&delayed_root->lock);
 199        if (list_empty(&delayed_root->node_list))
 200                goto out;
 201
 202        p = delayed_root->node_list.next;
 203        node = list_entry(p, struct btrfs_delayed_node, n_list);
 204        refcount_inc(&node->refs);
 205out:
 206        spin_unlock(&delayed_root->lock);
 207
 208        return node;
 209}
 210
 211static struct btrfs_delayed_node *btrfs_next_delayed_node(
 212                                                struct btrfs_delayed_node *node)
 213{
 214        struct btrfs_delayed_root *delayed_root;
 215        struct list_head *p;
 216        struct btrfs_delayed_node *next = NULL;
 217
 218        delayed_root = node->root->fs_info->delayed_root;
 219        spin_lock(&delayed_root->lock);
 220        if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 221                /* not in the list */
 222                if (list_empty(&delayed_root->node_list))
 223                        goto out;
 224                p = delayed_root->node_list.next;
 225        } else if (list_is_last(&node->n_list, &delayed_root->node_list))
 226                goto out;
 227        else
 228                p = node->n_list.next;
 229
 230        next = list_entry(p, struct btrfs_delayed_node, n_list);
 231        refcount_inc(&next->refs);
 232out:
 233        spin_unlock(&delayed_root->lock);
 234
 235        return next;
 236}
 237
 238static void __btrfs_release_delayed_node(
 239                                struct btrfs_delayed_node *delayed_node,
 240                                int mod)
 241{
 242        struct btrfs_delayed_root *delayed_root;
 243
 244        if (!delayed_node)
 245                return;
 246
 247        delayed_root = delayed_node->root->fs_info->delayed_root;
 248
 249        mutex_lock(&delayed_node->mutex);
 250        if (delayed_node->count)
 251                btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
 252        else
 253                btrfs_dequeue_delayed_node(delayed_root, delayed_node);
 254        mutex_unlock(&delayed_node->mutex);
 255
 256        if (refcount_dec_and_test(&delayed_node->refs)) {
 257                bool free = false;
 258                struct btrfs_root *root = delayed_node->root;
 259                spin_lock(&root->inode_lock);
 260                if (refcount_read(&delayed_node->refs) == 0) {
 261                        radix_tree_delete(&root->delayed_nodes_tree,
 262                                          delayed_node->inode_id);
 263                        free = true;
 264                }
 265                spin_unlock(&root->inode_lock);
 266                if (free)
 267                        kmem_cache_free(delayed_node_cache, delayed_node);
 268        }
 269}
 270
 271static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
 272{
 273        __btrfs_release_delayed_node(node, 0);
 274}
 275
 276static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 277                                        struct btrfs_delayed_root *delayed_root)
 278{
 279        struct list_head *p;
 280        struct btrfs_delayed_node *node = NULL;
 281
 282        spin_lock(&delayed_root->lock);
 283        if (list_empty(&delayed_root->prepare_list))
 284                goto out;
 285
 286        p = delayed_root->prepare_list.next;
 287        list_del_init(p);
 288        node = list_entry(p, struct btrfs_delayed_node, p_list);
 289        refcount_inc(&node->refs);
 290out:
 291        spin_unlock(&delayed_root->lock);
 292
 293        return node;
 294}
 295
 296static inline void btrfs_release_prepared_delayed_node(
 297                                        struct btrfs_delayed_node *node)
 298{
 299        __btrfs_release_delayed_node(node, 1);
 300}
 301
 302static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 303{
 304        struct btrfs_delayed_item *item;
 305        item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
 306        if (item) {
 307                item->data_len = data_len;
 308                item->ins_or_del = 0;
 309                item->bytes_reserved = 0;
 310                item->delayed_node = NULL;
 311                refcount_set(&item->refs, 1);
 312        }
 313        return item;
 314}
 315
 316/*
 317 * __btrfs_lookup_delayed_item - look up the delayed item by key
 318 * @delayed_node: pointer to the delayed node
 319 * @key:          the key to look up
 320 * @prev:         used to store the prev item if the right item isn't found
 321 * @next:         used to store the next item if the right item isn't found
 322 *
 323 * Note: if we don't find the right item, we will return the prev item and
 324 * the next item.
 325 */
 326static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
 327                                struct rb_root *root,
 328                                struct btrfs_key *key,
 329                                struct btrfs_delayed_item **prev,
 330                                struct btrfs_delayed_item **next)
 331{
 332        struct rb_node *node, *prev_node = NULL;
 333        struct btrfs_delayed_item *delayed_item = NULL;
 334        int ret = 0;
 335
 336        node = root->rb_node;
 337
 338        while (node) {
 339                delayed_item = rb_entry(node, struct btrfs_delayed_item,
 340                                        rb_node);
 341                prev_node = node;
 342                ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
 343                if (ret < 0)
 344                        node = node->rb_right;
 345                else if (ret > 0)
 346                        node = node->rb_left;
 347                else
 348                        return delayed_item;
 349        }
 350
 351        if (prev) {
 352                if (!prev_node)
 353                        *prev = NULL;
 354                else if (ret < 0)
 355                        *prev = delayed_item;
 356                else if ((node = rb_prev(prev_node)) != NULL) {
 357                        *prev = rb_entry(node, struct btrfs_delayed_item,
 358                                         rb_node);
 359                } else
 360                        *prev = NULL;
 361        }
 362
 363        if (next) {
 364                if (!prev_node)
 365                        *next = NULL;
 366                else if (ret > 0)
 367                        *next = delayed_item;
 368                else if ((node = rb_next(prev_node)) != NULL) {
 369                        *next = rb_entry(node, struct btrfs_delayed_item,
 370                                         rb_node);
 371                } else
 372                        *next = NULL;
 373        }
 374        return NULL;
 375}
 376
 377static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 378                                        struct btrfs_delayed_node *delayed_node,
 379                                        struct btrfs_key *key)
 380{
 381        return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
 382                                           NULL, NULL);
 383}
 384
 385static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 386                                    struct btrfs_delayed_item *ins,
 387                                    int action)
 388{
 389        struct rb_node **p, *node;
 390        struct rb_node *parent_node = NULL;
 391        struct rb_root *root;
 392        struct btrfs_delayed_item *item;
 393        int cmp;
 394
 395        if (action == BTRFS_DELAYED_INSERTION_ITEM)
 396                root = &delayed_node->ins_root;
 397        else if (action == BTRFS_DELAYED_DELETION_ITEM)
 398                root = &delayed_node->del_root;
 399        else
 400                BUG();
 401        p = &root->rb_node;
 402        node = &ins->rb_node;
 403
 404        while (*p) {
 405                parent_node = *p;
 406                item = rb_entry(parent_node, struct btrfs_delayed_item,
 407                                 rb_node);
 408
 409                cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
 410                if (cmp < 0)
 411                        p = &(*p)->rb_right;
 412                else if (cmp > 0)
 413                        p = &(*p)->rb_left;
 414                else
 415                        return -EEXIST;
 416        }
 417
 418        rb_link_node(node, parent_node, p);
 419        rb_insert_color(node, root);
 420        ins->delayed_node = delayed_node;
 421        ins->ins_or_del = action;
 422
 423        if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
 424            action == BTRFS_DELAYED_INSERTION_ITEM &&
 425            ins->key.offset >= delayed_node->index_cnt)
 426                        delayed_node->index_cnt = ins->key.offset + 1;
 427
 428        delayed_node->count++;
 429        atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
 430        return 0;
 431}
 432
 433static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
 434                                              struct btrfs_delayed_item *item)
 435{
 436        return __btrfs_add_delayed_item(node, item,
 437                                        BTRFS_DELAYED_INSERTION_ITEM);
 438}
 439
 440static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 441                                             struct btrfs_delayed_item *item)
 442{
 443        return __btrfs_add_delayed_item(node, item,
 444                                        BTRFS_DELAYED_DELETION_ITEM);
 445}
 446
 447static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 448{
 449        int seq = atomic_inc_return(&delayed_root->items_seq);
 450
 451        /*
 452         * atomic_dec_return implies a barrier for waitqueue_active
 453         */
 454        if ((atomic_dec_return(&delayed_root->items) <
 455            BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
 456            waitqueue_active(&delayed_root->wait))
 457                wake_up(&delayed_root->wait);
 458}
 459
 460static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 461{
 462        struct rb_root *root;
 463        struct btrfs_delayed_root *delayed_root;
 464
 465        delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
 466
 467        BUG_ON(!delayed_root);
 468        BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
 469               delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
 470
 471        if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
 472                root = &delayed_item->delayed_node->ins_root;
 473        else
 474                root = &delayed_item->delayed_node->del_root;
 475
 476        rb_erase(&delayed_item->rb_node, root);
 477        delayed_item->delayed_node->count--;
 478
 479        finish_one_item(delayed_root);
 480}
 481
 482static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 483{
 484        if (item) {
 485                __btrfs_remove_delayed_item(item);
 486                if (refcount_dec_and_test(&item->refs))
 487                        kfree(item);
 488        }
 489}
 490
 491static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 492                                        struct btrfs_delayed_node *delayed_node)
 493{
 494        struct rb_node *p;
 495        struct btrfs_delayed_item *item = NULL;
 496
 497        p = rb_first(&delayed_node->ins_root);
 498        if (p)
 499                item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 500
 501        return item;
 502}
 503
 504static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 505                                        struct btrfs_delayed_node *delayed_node)
 506{
 507        struct rb_node *p;
 508        struct btrfs_delayed_item *item = NULL;
 509
 510        p = rb_first(&delayed_node->del_root);
 511        if (p)
 512                item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 513
 514        return item;
 515}
 516
 517static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 518                                                struct btrfs_delayed_item *item)
 519{
 520        struct rb_node *p;
 521        struct btrfs_delayed_item *next = NULL;
 522
 523        p = rb_next(&item->rb_node);
 524        if (p)
 525                next = rb_entry(p, struct btrfs_delayed_item, rb_node);
 526
 527        return next;
 528}
 529
 530static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 531                                               struct btrfs_fs_info *fs_info,
 532                                               struct btrfs_delayed_item *item)
 533{
 534        struct btrfs_block_rsv *src_rsv;
 535        struct btrfs_block_rsv *dst_rsv;
 536        u64 num_bytes;
 537        int ret;
 538
 539        if (!trans->bytes_reserved)
 540                return 0;
 541
 542        src_rsv = trans->block_rsv;
 543        dst_rsv = &fs_info->delayed_block_rsv;
 544
 545        num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
 546        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
 547        if (!ret) {
 548                trace_btrfs_space_reservation(fs_info, "delayed_item",
 549                                              item->key.objectid,
 550                                              num_bytes, 1);
 551                item->bytes_reserved = num_bytes;
 552        }
 553
 554        return ret;
 555}
 556
 557static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
 558                                                struct btrfs_delayed_item *item)
 559{
 560        struct btrfs_block_rsv *rsv;
 561
 562        if (!item->bytes_reserved)
 563                return;
 564
 565        rsv = &fs_info->delayed_block_rsv;
 566        trace_btrfs_space_reservation(fs_info, "delayed_item",
 567                                      item->key.objectid, item->bytes_reserved,
 568                                      0);
 569        btrfs_block_rsv_release(fs_info, rsv,
 570                                item->bytes_reserved);
 571}
 572
 573static int btrfs_delayed_inode_reserve_metadata(
 574                                        struct btrfs_trans_handle *trans,
 575                                        struct btrfs_root *root,
 576                                        struct btrfs_inode *inode,
 577                                        struct btrfs_delayed_node *node)
 578{
 579        struct btrfs_fs_info *fs_info = root->fs_info;
 580        struct btrfs_block_rsv *src_rsv;
 581        struct btrfs_block_rsv *dst_rsv;
 582        u64 num_bytes;
 583        int ret;
 584        bool release = false;
 585
 586        src_rsv = trans->block_rsv;
 587        dst_rsv = &fs_info->delayed_block_rsv;
 588
 589        num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
 590
 591        /*
 592         * If our block_rsv is the delalloc block reserve then check and see if
 593         * we have our extra reservation for updating the inode.  If not fall
 594         * through and try to reserve space quickly.
 595         *
 596         * We used to try and steal from the delalloc block rsv or the global
 597         * reserve, but we'd steal a full reservation, which isn't kind.  We are
 598         * here through delalloc which means we've likely just cowed down close
 599         * to the leaf that contains the inode, so we would steal less just
 600         * doing the fallback inode update, so if we do end up having to steal
 601         * from the global block rsv we hopefully only steal one or two blocks
 602         * worth which is less likely to hurt us.
 603         */
 604        if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
 605                spin_lock(&inode->lock);
 606                if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
 607                                       &inode->runtime_flags))
 608                        release = true;
 609                else
 610                        src_rsv = NULL;
 611                spin_unlock(&inode->lock);
 612        }
 613
 614        /*
 615         * btrfs_dirty_inode will update the inode under btrfs_join_transaction
 616         * which doesn't reserve space for speed.  This is a problem since we
 617         * still need to reserve space for this update, so try to reserve the
 618         * space.
 619         *
 620         * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 621         * we're accounted for.
 622         */
 623        if (!src_rsv || (!trans->bytes_reserved &&
 624                         src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
 625                ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 626                                          BTRFS_RESERVE_NO_FLUSH);
 627                /*
 628                 * Since we're under a transaction reserve_metadata_bytes could
 629                 * try to commit the transaction which will make it return
 630                 * EAGAIN to make us stop the transaction we have, so return
 631                 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
 632                 */
 633                if (ret == -EAGAIN)
 634                        ret = -ENOSPC;
 635                if (!ret) {
 636                        node->bytes_reserved = num_bytes;
 637                        trace_btrfs_space_reservation(fs_info,
 638                                                      "delayed_inode",
 639                                                      btrfs_ino(inode),
 640                                                      num_bytes, 1);
 641                }
 642                return ret;
 643        }
 644
 645        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
 646
 647        /*
 648         * Migrate only takes a reservation, it doesn't touch the size of the
 649         * block_rsv.  This is to simplify people who don't normally have things
 650         * migrated from their block rsv.  If they go to release their
 651         * reservation, that will decrease the size as well, so if migrate
 652         * reduced size we'd end up with a negative size.  But for the
 653         * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
 654         * but we could in fact do this reserve/migrate dance several times
 655         * between the time we did the original reservation and we'd clean it
 656         * up.  So to take care of this, release the space for the meta
 657         * reservation here.  I think it may be time for a documentation page on
 658         * how block rsvs. work.
 659         */
 660        if (!ret) {
 661                trace_btrfs_space_reservation(fs_info, "delayed_inode",
 662                                              btrfs_ino(inode), num_bytes, 1);
 663                node->bytes_reserved = num_bytes;
 664        }
 665
 666        if (release) {
 667                trace_btrfs_space_reservation(fs_info, "delalloc",
 668                                              btrfs_ino(inode), num_bytes, 0);
 669                btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
 670        }
 671
 672        return ret;
 673}
 674
 675static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
 676                                                struct btrfs_delayed_node *node)
 677{
 678        struct btrfs_block_rsv *rsv;
 679
 680        if (!node->bytes_reserved)
 681                return;
 682
 683        rsv = &fs_info->delayed_block_rsv;
 684        trace_btrfs_space_reservation(fs_info, "delayed_inode",
 685                                      node->inode_id, node->bytes_reserved, 0);
 686        btrfs_block_rsv_release(fs_info, rsv,
 687                                node->bytes_reserved);
 688        node->bytes_reserved = 0;
 689}
 690
 691/*
 692 * This helper will insert some continuous items into the same leaf according
 693 * to the free space of the leaf.
 694 */
 695static int btrfs_batch_insert_items(struct btrfs_root *root,
 696                                    struct btrfs_path *path,
 697                                    struct btrfs_delayed_item *item)
 698{
 699        struct btrfs_fs_info *fs_info = root->fs_info;
 700        struct btrfs_delayed_item *curr, *next;
 701        int free_space;
 702        int total_data_size = 0, total_size = 0;
 703        struct extent_buffer *leaf;
 704        char *data_ptr;
 705        struct btrfs_key *keys;
 706        u32 *data_size;
 707        struct list_head head;
 708        int slot;
 709        int nitems;
 710        int i;
 711        int ret = 0;
 712
 713        BUG_ON(!path->nodes[0]);
 714
 715        leaf = path->nodes[0];
 716        free_space = btrfs_leaf_free_space(fs_info, leaf);
 717        INIT_LIST_HEAD(&head);
 718
 719        next = item;
 720        nitems = 0;
 721
 722        /*
 723         * count the number of the continuous items that we can insert in batch
 724         */
 725        while (total_size + next->data_len + sizeof(struct btrfs_item) <=
 726               free_space) {
 727                total_data_size += next->data_len;
 728                total_size += next->data_len + sizeof(struct btrfs_item);
 729                list_add_tail(&next->tree_list, &head);
 730                nitems++;
 731
 732                curr = next;
 733                next = __btrfs_next_delayed_item(curr);
 734                if (!next)
 735                        break;
 736
 737                if (!btrfs_is_continuous_delayed_item(curr, next))
 738                        break;
 739        }
 740
 741        if (!nitems) {
 742                ret = 0;
 743                goto out;
 744        }
 745
 746        /*
 747         * we need allocate some memory space, but it might cause the task
 748         * to sleep, so we set all locked nodes in the path to blocking locks
 749         * first.
 750         */
 751        btrfs_set_path_blocking(path);
 752
 753        keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
 754        if (!keys) {
 755                ret = -ENOMEM;
 756                goto out;
 757        }
 758
 759        data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
 760        if (!data_size) {
 761                ret = -ENOMEM;
 762                goto error;
 763        }
 764
 765        /* get keys of all the delayed items */
 766        i = 0;
 767        list_for_each_entry(next, &head, tree_list) {
 768                keys[i] = next->key;
 769                data_size[i] = next->data_len;
 770                i++;
 771        }
 772
 773        /* reset all the locked nodes in the patch to spinning locks. */
 774        btrfs_clear_path_blocking(path, NULL, 0);
 775
 776        /* insert the keys of the items */
 777        setup_items_for_insert(root, path, keys, data_size,
 778                               total_data_size, total_size, nitems);
 779
 780        /* insert the dir index items */
 781        slot = path->slots[0];
 782        list_for_each_entry_safe(curr, next, &head, tree_list) {
 783                data_ptr = btrfs_item_ptr(leaf, slot, char);
 784                write_extent_buffer(leaf, &curr->data,
 785                                    (unsigned long)data_ptr,
 786                                    curr->data_len);
 787                slot++;
 788
 789                btrfs_delayed_item_release_metadata(fs_info, curr);
 790
 791                list_del(&curr->tree_list);
 792                btrfs_release_delayed_item(curr);
 793        }
 794
 795error:
 796        kfree(data_size);
 797        kfree(keys);
 798out:
 799        return ret;
 800}
 801
 802/*
 803 * This helper can just do simple insertion that needn't extend item for new
 804 * data, such as directory name index insertion, inode insertion.
 805 */
 806static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 807                                     struct btrfs_root *root,
 808                                     struct btrfs_path *path,
 809                                     struct btrfs_delayed_item *delayed_item)
 810{
 811        struct btrfs_fs_info *fs_info = root->fs_info;
 812        struct extent_buffer *leaf;
 813        char *ptr;
 814        int ret;
 815
 816        ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
 817                                      delayed_item->data_len);
 818        if (ret < 0 && ret != -EEXIST)
 819                return ret;
 820
 821        leaf = path->nodes[0];
 822
 823        ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 824
 825        write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
 826                            delayed_item->data_len);
 827        btrfs_mark_buffer_dirty(leaf);
 828
 829        btrfs_delayed_item_release_metadata(fs_info, delayed_item);
 830        return 0;
 831}
 832
 833/*
 834 * we insert an item first, then if there are some continuous items, we try
 835 * to insert those items into the same leaf.
 836 */
 837static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
 838                                      struct btrfs_path *path,
 839                                      struct btrfs_root *root,
 840                                      struct btrfs_delayed_node *node)
 841{
 842        struct btrfs_delayed_item *curr, *prev;
 843        int ret = 0;
 844
 845do_again:
 846        mutex_lock(&node->mutex);
 847        curr = __btrfs_first_delayed_insertion_item(node);
 848        if (!curr)
 849                goto insert_end;
 850
 851        ret = btrfs_insert_delayed_item(trans, root, path, curr);
 852        if (ret < 0) {
 853                btrfs_release_path(path);
 854                goto insert_end;
 855        }
 856
 857        prev = curr;
 858        curr = __btrfs_next_delayed_item(prev);
 859        if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 860                /* insert the continuous items into the same leaf */
 861                path->slots[0]++;
 862                btrfs_batch_insert_items(root, path, curr);
 863        }
 864        btrfs_release_delayed_item(prev);
 865        btrfs_mark_buffer_dirty(path->nodes[0]);
 866
 867        btrfs_release_path(path);
 868        mutex_unlock(&node->mutex);
 869        goto do_again;
 870
 871insert_end:
 872        mutex_unlock(&node->mutex);
 873        return ret;
 874}
 875
 876static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 877                                    struct btrfs_root *root,
 878                                    struct btrfs_path *path,
 879                                    struct btrfs_delayed_item *item)
 880{
 881        struct btrfs_fs_info *fs_info = root->fs_info;
 882        struct btrfs_delayed_item *curr, *next;
 883        struct extent_buffer *leaf;
 884        struct btrfs_key key;
 885        struct list_head head;
 886        int nitems, i, last_item;
 887        int ret = 0;
 888
 889        BUG_ON(!path->nodes[0]);
 890
 891        leaf = path->nodes[0];
 892
 893        i = path->slots[0];
 894        last_item = btrfs_header_nritems(leaf) - 1;
 895        if (i > last_item)
 896                return -ENOENT; /* FIXME: Is errno suitable? */
 897
 898        next = item;
 899        INIT_LIST_HEAD(&head);
 900        btrfs_item_key_to_cpu(leaf, &key, i);
 901        nitems = 0;
 902        /*
 903         * count the number of the dir index items that we can delete in batch
 904         */
 905        while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
 906                list_add_tail(&next->tree_list, &head);
 907                nitems++;
 908
 909                curr = next;
 910                next = __btrfs_next_delayed_item(curr);
 911                if (!next)
 912                        break;
 913
 914                if (!btrfs_is_continuous_delayed_item(curr, next))
 915                        break;
 916
 917                i++;
 918                if (i > last_item)
 919                        break;
 920                btrfs_item_key_to_cpu(leaf, &key, i);
 921        }
 922
 923        if (!nitems)
 924                return 0;
 925
 926        ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
 927        if (ret)
 928                goto out;
 929
 930        list_for_each_entry_safe(curr, next, &head, tree_list) {
 931                btrfs_delayed_item_release_metadata(fs_info, curr);
 932                list_del(&curr->tree_list);
 933                btrfs_release_delayed_item(curr);
 934        }
 935
 936out:
 937        return ret;
 938}
 939
 940static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
 941                                      struct btrfs_path *path,
 942                                      struct btrfs_root *root,
 943                                      struct btrfs_delayed_node *node)
 944{
 945        struct btrfs_delayed_item *curr, *prev;
 946        int ret = 0;
 947
 948do_again:
 949        mutex_lock(&node->mutex);
 950        curr = __btrfs_first_delayed_deletion_item(node);
 951        if (!curr)
 952                goto delete_fail;
 953
 954        ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
 955        if (ret < 0)
 956                goto delete_fail;
 957        else if (ret > 0) {
 958                /*
 959                 * can't find the item which the node points to, so this node
 960                 * is invalid, just drop it.
 961                 */
 962                prev = curr;
 963                curr = __btrfs_next_delayed_item(prev);
 964                btrfs_release_delayed_item(prev);
 965                ret = 0;
 966                btrfs_release_path(path);
 967                if (curr) {
 968                        mutex_unlock(&node->mutex);
 969                        goto do_again;
 970                } else
 971                        goto delete_fail;
 972        }
 973
 974        btrfs_batch_delete_items(trans, root, path, curr);
 975        btrfs_release_path(path);
 976        mutex_unlock(&node->mutex);
 977        goto do_again;
 978
 979delete_fail:
 980        btrfs_release_path(path);
 981        mutex_unlock(&node->mutex);
 982        return ret;
 983}
 984
 985static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
 986{
 987        struct btrfs_delayed_root *delayed_root;
 988
 989        if (delayed_node &&
 990            test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 991                BUG_ON(!delayed_node->root);
 992                clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
 993                delayed_node->count--;
 994
 995                delayed_root = delayed_node->root->fs_info->delayed_root;
 996                finish_one_item(delayed_root);
 997        }
 998}
 999
1000static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1001{
1002        struct btrfs_delayed_root *delayed_root;
1003
1004        ASSERT(delayed_node->root);
1005        clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1006        delayed_node->count--;
1007
1008        delayed_root = delayed_node->root->fs_info->delayed_root;
1009        finish_one_item(delayed_root);
1010}
1011
1012static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1013                                        struct btrfs_root *root,
1014                                        struct btrfs_path *path,
1015                                        struct btrfs_delayed_node *node)
1016{
1017        struct btrfs_fs_info *fs_info = root->fs_info;
1018        struct btrfs_key key;
1019        struct btrfs_inode_item *inode_item;
1020        struct extent_buffer *leaf;
1021        int mod;
1022        int ret;
1023
1024        key.objectid = node->inode_id;
1025        key.type = BTRFS_INODE_ITEM_KEY;
1026        key.offset = 0;
1027
1028        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1029                mod = -1;
1030        else
1031                mod = 1;
1032
1033        ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1034        if (ret > 0) {
1035                btrfs_release_path(path);
1036                return -ENOENT;
1037        } else if (ret < 0) {
1038                return ret;
1039        }
1040
1041        leaf = path->nodes[0];
1042        inode_item = btrfs_item_ptr(leaf, path->slots[0],
1043                                    struct btrfs_inode_item);
1044        write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1045                            sizeof(struct btrfs_inode_item));
1046        btrfs_mark_buffer_dirty(leaf);
1047
1048        if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1049                goto no_iref;
1050
1051        path->slots[0]++;
1052        if (path->slots[0] >= btrfs_header_nritems(leaf))
1053                goto search;
1054again:
1055        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1056        if (key.objectid != node->inode_id)
1057                goto out;
1058
1059        if (key.type != BTRFS_INODE_REF_KEY &&
1060            key.type != BTRFS_INODE_EXTREF_KEY)
1061                goto out;
1062
1063        /*
1064         * Delayed iref deletion is for the inode who has only one link,
1065         * so there is only one iref. The case that several irefs are
1066         * in the same item doesn't exist.
1067         */
1068        btrfs_del_item(trans, root, path);
1069out:
1070        btrfs_release_delayed_iref(node);
1071no_iref:
1072        btrfs_release_path(path);
1073err_out:
1074        btrfs_delayed_inode_release_metadata(fs_info, node);
1075        btrfs_release_delayed_inode(node);
1076
1077        return ret;
1078
1079search:
1080        btrfs_release_path(path);
1081
1082        key.type = BTRFS_INODE_EXTREF_KEY;
1083        key.offset = -1;
1084        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1085        if (ret < 0)
1086                goto err_out;
1087        ASSERT(ret);
1088
1089        ret = 0;
1090        leaf = path->nodes[0];
1091        path->slots[0]--;
1092        goto again;
1093}
1094
1095static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1096                                             struct btrfs_root *root,
1097                                             struct btrfs_path *path,
1098                                             struct btrfs_delayed_node *node)
1099{
1100        int ret;
1101
1102        mutex_lock(&node->mutex);
1103        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1104                mutex_unlock(&node->mutex);
1105                return 0;
1106        }
1107
1108        ret = __btrfs_update_delayed_inode(trans, root, path, node);
1109        mutex_unlock(&node->mutex);
1110        return ret;
1111}
1112
1113static inline int
1114__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1115                                   struct btrfs_path *path,
1116                                   struct btrfs_delayed_node *node)
1117{
1118        int ret;
1119
1120        ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1121        if (ret)
1122                return ret;
1123
1124        ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1125        if (ret)
1126                return ret;
1127
1128        ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1129        return ret;
1130}
1131
1132/*
1133 * Called when committing the transaction.
1134 * Returns 0 on success.
1135 * Returns < 0 on error and returns with an aborted transaction with any
1136 * outstanding delayed items cleaned up.
1137 */
1138static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1139                                     struct btrfs_fs_info *fs_info, int nr)
1140{
1141        struct btrfs_delayed_root *delayed_root;
1142        struct btrfs_delayed_node *curr_node, *prev_node;
1143        struct btrfs_path *path;
1144        struct btrfs_block_rsv *block_rsv;
1145        int ret = 0;
1146        bool count = (nr > 0);
1147
1148        if (trans->aborted)
1149                return -EIO;
1150
1151        path = btrfs_alloc_path();
1152        if (!path)
1153                return -ENOMEM;
1154        path->leave_spinning = 1;
1155
1156        block_rsv = trans->block_rsv;
1157        trans->block_rsv = &fs_info->delayed_block_rsv;
1158
1159        delayed_root = fs_info->delayed_root;
1160
1161        curr_node = btrfs_first_delayed_node(delayed_root);
1162        while (curr_node && (!count || (count && nr--))) {
1163                ret = __btrfs_commit_inode_delayed_items(trans, path,
1164                                                         curr_node);
1165                if (ret) {
1166                        btrfs_release_delayed_node(curr_node);
1167                        curr_node = NULL;
1168                        btrfs_abort_transaction(trans, ret);
1169                        break;
1170                }
1171
1172                prev_node = curr_node;
1173                curr_node = btrfs_next_delayed_node(curr_node);
1174                btrfs_release_delayed_node(prev_node);
1175        }
1176
1177        if (curr_node)
1178                btrfs_release_delayed_node(curr_node);
1179        btrfs_free_path(path);
1180        trans->block_rsv = block_rsv;
1181
1182        return ret;
1183}
1184
1185int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1186                            struct btrfs_fs_info *fs_info)
1187{
1188        return __btrfs_run_delayed_items(trans, fs_info, -1);
1189}
1190
1191int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1192                               struct btrfs_fs_info *fs_info, int nr)
1193{
1194        return __btrfs_run_delayed_items(trans, fs_info, nr);
1195}
1196
1197int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1198                                     struct btrfs_inode *inode)
1199{
1200        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1201        struct btrfs_path *path;
1202        struct btrfs_block_rsv *block_rsv;
1203        int ret;
1204
1205        if (!delayed_node)
1206                return 0;
1207
1208        mutex_lock(&delayed_node->mutex);
1209        if (!delayed_node->count) {
1210                mutex_unlock(&delayed_node->mutex);
1211                btrfs_release_delayed_node(delayed_node);
1212                return 0;
1213        }
1214        mutex_unlock(&delayed_node->mutex);
1215
1216        path = btrfs_alloc_path();
1217        if (!path) {
1218                btrfs_release_delayed_node(delayed_node);
1219                return -ENOMEM;
1220        }
1221        path->leave_spinning = 1;
1222
1223        block_rsv = trans->block_rsv;
1224        trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1225
1226        ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1227
1228        btrfs_release_delayed_node(delayed_node);
1229        btrfs_free_path(path);
1230        trans->block_rsv = block_rsv;
1231
1232        return ret;
1233}
1234
1235int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1236{
1237        struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1238        struct btrfs_trans_handle *trans;
1239        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1240        struct btrfs_path *path;
1241        struct btrfs_block_rsv *block_rsv;
1242        int ret;
1243
1244        if (!delayed_node)
1245                return 0;
1246
1247        mutex_lock(&delayed_node->mutex);
1248        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1249                mutex_unlock(&delayed_node->mutex);
1250                btrfs_release_delayed_node(delayed_node);
1251                return 0;
1252        }
1253        mutex_unlock(&delayed_node->mutex);
1254
1255        trans = btrfs_join_transaction(delayed_node->root);
1256        if (IS_ERR(trans)) {
1257                ret = PTR_ERR(trans);
1258                goto out;
1259        }
1260
1261        path = btrfs_alloc_path();
1262        if (!path) {
1263                ret = -ENOMEM;
1264                goto trans_out;
1265        }
1266        path->leave_spinning = 1;
1267
1268        block_rsv = trans->block_rsv;
1269        trans->block_rsv = &fs_info->delayed_block_rsv;
1270
1271        mutex_lock(&delayed_node->mutex);
1272        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1273                ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1274                                                   path, delayed_node);
1275        else
1276                ret = 0;
1277        mutex_unlock(&delayed_node->mutex);
1278
1279        btrfs_free_path(path);
1280        trans->block_rsv = block_rsv;
1281trans_out:
1282        btrfs_end_transaction(trans);
1283        btrfs_btree_balance_dirty(fs_info);
1284out:
1285        btrfs_release_delayed_node(delayed_node);
1286
1287        return ret;
1288}
1289
1290void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1291{
1292        struct btrfs_delayed_node *delayed_node;
1293
1294        delayed_node = READ_ONCE(inode->delayed_node);
1295        if (!delayed_node)
1296                return;
1297
1298        inode->delayed_node = NULL;
1299        btrfs_release_delayed_node(delayed_node);
1300}
1301
1302struct btrfs_async_delayed_work {
1303        struct btrfs_delayed_root *delayed_root;
1304        int nr;
1305        struct btrfs_work work;
1306};
1307
1308static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1309{
1310        struct btrfs_async_delayed_work *async_work;
1311        struct btrfs_delayed_root *delayed_root;
1312        struct btrfs_trans_handle *trans;
1313        struct btrfs_path *path;
1314        struct btrfs_delayed_node *delayed_node = NULL;
1315        struct btrfs_root *root;
1316        struct btrfs_block_rsv *block_rsv;
1317        int total_done = 0;
1318
1319        async_work = container_of(work, struct btrfs_async_delayed_work, work);
1320        delayed_root = async_work->delayed_root;
1321
1322        path = btrfs_alloc_path();
1323        if (!path)
1324                goto out;
1325
1326again:
1327        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1328                goto free_path;
1329
1330        delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1331        if (!delayed_node)
1332                goto free_path;
1333
1334        path->leave_spinning = 1;
1335        root = delayed_node->root;
1336
1337        trans = btrfs_join_transaction(root);
1338        if (IS_ERR(trans))
1339                goto release_path;
1340
1341        block_rsv = trans->block_rsv;
1342        trans->block_rsv = &root->fs_info->delayed_block_rsv;
1343
1344        __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1345
1346        trans->block_rsv = block_rsv;
1347        btrfs_end_transaction(trans);
1348        btrfs_btree_balance_dirty_nodelay(root->fs_info);
1349
1350release_path:
1351        btrfs_release_path(path);
1352        total_done++;
1353
1354        btrfs_release_prepared_delayed_node(delayed_node);
1355        if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
1356            total_done < async_work->nr)
1357                goto again;
1358
1359free_path:
1360        btrfs_free_path(path);
1361out:
1362        wake_up(&delayed_root->wait);
1363        kfree(async_work);
1364}
1365
1366
1367static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1368                                     struct btrfs_fs_info *fs_info, int nr)
1369{
1370        struct btrfs_async_delayed_work *async_work;
1371
1372        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
1373            btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1374                return 0;
1375
1376        async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1377        if (!async_work)
1378                return -ENOMEM;
1379
1380        async_work->delayed_root = delayed_root;
1381        btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1382                        btrfs_async_run_delayed_root, NULL, NULL);
1383        async_work->nr = nr;
1384
1385        btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1386        return 0;
1387}
1388
1389void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1390{
1391        WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1392}
1393
1394static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1395{
1396        int val = atomic_read(&delayed_root->items_seq);
1397
1398        if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1399                return 1;
1400
1401        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1402                return 1;
1403
1404        return 0;
1405}
1406
1407void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1408{
1409        struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1410
1411        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1412                return;
1413
1414        if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1415                int seq;
1416                int ret;
1417
1418                seq = atomic_read(&delayed_root->items_seq);
1419
1420                ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1421                if (ret)
1422                        return;
1423
1424                wait_event_interruptible(delayed_root->wait,
1425                                         could_end_wait(delayed_root, seq));
1426                return;
1427        }
1428
1429        btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1430}
1431
1432/* Will return 0 or -ENOMEM */
1433int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1434                                   struct btrfs_fs_info *fs_info,
1435                                   const char *name, int name_len,
1436                                   struct btrfs_inode *dir,
1437                                   struct btrfs_disk_key *disk_key, u8 type,
1438                                   u64 index)
1439{
1440        struct btrfs_delayed_node *delayed_node;
1441        struct btrfs_delayed_item *delayed_item;
1442        struct btrfs_dir_item *dir_item;
1443        int ret;
1444
1445        delayed_node = btrfs_get_or_create_delayed_node(dir);
1446        if (IS_ERR(delayed_node))
1447                return PTR_ERR(delayed_node);
1448
1449        delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1450        if (!delayed_item) {
1451                ret = -ENOMEM;
1452                goto release_node;
1453        }
1454
1455        delayed_item->key.objectid = btrfs_ino(dir);
1456        delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1457        delayed_item->key.offset = index;
1458
1459        dir_item = (struct btrfs_dir_item *)delayed_item->data;
1460        dir_item->location = *disk_key;
1461        btrfs_set_stack_dir_transid(dir_item, trans->transid);
1462        btrfs_set_stack_dir_data_len(dir_item, 0);
1463        btrfs_set_stack_dir_name_len(dir_item, name_len);
1464        btrfs_set_stack_dir_type(dir_item, type);
1465        memcpy((char *)(dir_item + 1), name, name_len);
1466
1467        ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
1468        /*
1469         * we have reserved enough space when we start a new transaction,
1470         * so reserving metadata failure is impossible
1471         */
1472        BUG_ON(ret);
1473
1474
1475        mutex_lock(&delayed_node->mutex);
1476        ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1477        if (unlikely(ret)) {
1478                btrfs_err(fs_info,
1479                          "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1480                          name_len, name, delayed_node->root->objectid,
1481                          delayed_node->inode_id, ret);
1482                BUG();
1483        }
1484        mutex_unlock(&delayed_node->mutex);
1485
1486release_node:
1487        btrfs_release_delayed_node(delayed_node);
1488        return ret;
1489}
1490
1491static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1492                                               struct btrfs_delayed_node *node,
1493                                               struct btrfs_key *key)
1494{
1495        struct btrfs_delayed_item *item;
1496
1497        mutex_lock(&node->mutex);
1498        item = __btrfs_lookup_delayed_insertion_item(node, key);
1499        if (!item) {
1500                mutex_unlock(&node->mutex);
1501                return 1;
1502        }
1503
1504        btrfs_delayed_item_release_metadata(fs_info, item);
1505        btrfs_release_delayed_item(item);
1506        mutex_unlock(&node->mutex);
1507        return 0;
1508}
1509
1510int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1511                                   struct btrfs_fs_info *fs_info,
1512                                   struct btrfs_inode *dir, u64 index)
1513{
1514        struct btrfs_delayed_node *node;
1515        struct btrfs_delayed_item *item;
1516        struct btrfs_key item_key;
1517        int ret;
1518
1519        node = btrfs_get_or_create_delayed_node(dir);
1520        if (IS_ERR(node))
1521                return PTR_ERR(node);
1522
1523        item_key.objectid = btrfs_ino(dir);
1524        item_key.type = BTRFS_DIR_INDEX_KEY;
1525        item_key.offset = index;
1526
1527        ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
1528        if (!ret)
1529                goto end;
1530
1531        item = btrfs_alloc_delayed_item(0);
1532        if (!item) {
1533                ret = -ENOMEM;
1534                goto end;
1535        }
1536
1537        item->key = item_key;
1538
1539        ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
1540        /*
1541         * we have reserved enough space when we start a new transaction,
1542         * so reserving metadata failure is impossible.
1543         */
1544        BUG_ON(ret);
1545
1546        mutex_lock(&node->mutex);
1547        ret = __btrfs_add_delayed_deletion_item(node, item);
1548        if (unlikely(ret)) {
1549                btrfs_err(fs_info,
1550                          "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1551                          index, node->root->objectid, node->inode_id, ret);
1552                BUG();
1553        }
1554        mutex_unlock(&node->mutex);
1555end:
1556        btrfs_release_delayed_node(node);
1557        return ret;
1558}
1559
1560int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1561{
1562        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1563
1564        if (!delayed_node)
1565                return -ENOENT;
1566
1567        /*
1568         * Since we have held i_mutex of this directory, it is impossible that
1569         * a new directory index is added into the delayed node and index_cnt
1570         * is updated now. So we needn't lock the delayed node.
1571         */
1572        if (!delayed_node->index_cnt) {
1573                btrfs_release_delayed_node(delayed_node);
1574                return -EINVAL;
1575        }
1576
1577        inode->index_cnt = delayed_node->index_cnt;
1578        btrfs_release_delayed_node(delayed_node);
1579        return 0;
1580}
1581
1582bool btrfs_readdir_get_delayed_items(struct inode *inode,
1583                                     struct list_head *ins_list,
1584                                     struct list_head *del_list)
1585{
1586        struct btrfs_delayed_node *delayed_node;
1587        struct btrfs_delayed_item *item;
1588
1589        delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1590        if (!delayed_node)
1591                return false;
1592
1593        /*
1594         * We can only do one readdir with delayed items at a time because of
1595         * item->readdir_list.
1596         */
1597        inode_unlock_shared(inode);
1598        inode_lock(inode);
1599
1600        mutex_lock(&delayed_node->mutex);
1601        item = __btrfs_first_delayed_insertion_item(delayed_node);
1602        while (item) {
1603                refcount_inc(&item->refs);
1604                list_add_tail(&item->readdir_list, ins_list);
1605                item = __btrfs_next_delayed_item(item);
1606        }
1607
1608        item = __btrfs_first_delayed_deletion_item(delayed_node);
1609        while (item) {
1610                refcount_inc(&item->refs);
1611                list_add_tail(&item->readdir_list, del_list);
1612                item = __btrfs_next_delayed_item(item);
1613        }
1614        mutex_unlock(&delayed_node->mutex);
1615        /*
1616         * This delayed node is still cached in the btrfs inode, so refs
1617         * must be > 1 now, and we needn't check it is going to be freed
1618         * or not.
1619         *
1620         * Besides that, this function is used to read dir, we do not
1621         * insert/delete delayed items in this period. So we also needn't
1622         * requeue or dequeue this delayed node.
1623         */
1624        refcount_dec(&delayed_node->refs);
1625
1626        return true;
1627}
1628
1629void btrfs_readdir_put_delayed_items(struct inode *inode,
1630                                     struct list_head *ins_list,
1631                                     struct list_head *del_list)
1632{
1633        struct btrfs_delayed_item *curr, *next;
1634
1635        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1636                list_del(&curr->readdir_list);
1637                if (refcount_dec_and_test(&curr->refs))
1638                        kfree(curr);
1639        }
1640
1641        list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1642                list_del(&curr->readdir_list);
1643                if (refcount_dec_and_test(&curr->refs))
1644                        kfree(curr);
1645        }
1646
1647        /*
1648         * The VFS is going to do up_read(), so we need to downgrade back to a
1649         * read lock.
1650         */
1651        downgrade_write(&inode->i_rwsem);
1652}
1653
1654int btrfs_should_delete_dir_index(struct list_head *del_list,
1655                                  u64 index)
1656{
1657        struct btrfs_delayed_item *curr, *next;
1658        int ret;
1659
1660        if (list_empty(del_list))
1661                return 0;
1662
1663        list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1664                if (curr->key.offset > index)
1665                        break;
1666
1667                list_del(&curr->readdir_list);
1668                ret = (curr->key.offset == index);
1669
1670                if (refcount_dec_and_test(&curr->refs))
1671                        kfree(curr);
1672
1673                if (ret)
1674                        return 1;
1675                else
1676                        continue;
1677        }
1678        return 0;
1679}
1680
1681/*
1682 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1683 *
1684 */
1685int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1686                                    struct list_head *ins_list)
1687{
1688        struct btrfs_dir_item *di;
1689        struct btrfs_delayed_item *curr, *next;
1690        struct btrfs_key location;
1691        char *name;
1692        int name_len;
1693        int over = 0;
1694        unsigned char d_type;
1695
1696        if (list_empty(ins_list))
1697                return 0;
1698
1699        /*
1700         * Changing the data of the delayed item is impossible. So
1701         * we needn't lock them. And we have held i_mutex of the
1702         * directory, nobody can delete any directory indexes now.
1703         */
1704        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1705                list_del(&curr->readdir_list);
1706
1707                if (curr->key.offset < ctx->pos) {
1708                        if (refcount_dec_and_test(&curr->refs))
1709                                kfree(curr);
1710                        continue;
1711                }
1712
1713                ctx->pos = curr->key.offset;
1714
1715                di = (struct btrfs_dir_item *)curr->data;
1716                name = (char *)(di + 1);
1717                name_len = btrfs_stack_dir_name_len(di);
1718
1719                d_type = btrfs_filetype_table[di->type];
1720                btrfs_disk_key_to_cpu(&location, &di->location);
1721
1722                over = !dir_emit(ctx, name, name_len,
1723                               location.objectid, d_type);
1724
1725                if (refcount_dec_and_test(&curr->refs))
1726                        kfree(curr);
1727
1728                if (over)
1729                        return 1;
1730        }
1731        return 0;
1732}
1733
1734static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1735                                  struct btrfs_inode_item *inode_item,
1736                                  struct inode *inode)
1737{
1738        btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1739        btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1740        btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1741        btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1742        btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1743        btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1744        btrfs_set_stack_inode_generation(inode_item,
1745                                         BTRFS_I(inode)->generation);
1746        btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1747        btrfs_set_stack_inode_transid(inode_item, trans->transid);
1748        btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1749        btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1750        btrfs_set_stack_inode_block_group(inode_item, 0);
1751
1752        btrfs_set_stack_timespec_sec(&inode_item->atime,
1753                                     inode->i_atime.tv_sec);
1754        btrfs_set_stack_timespec_nsec(&inode_item->atime,
1755                                      inode->i_atime.tv_nsec);
1756
1757        btrfs_set_stack_timespec_sec(&inode_item->mtime,
1758                                     inode->i_mtime.tv_sec);
1759        btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1760                                      inode->i_mtime.tv_nsec);
1761
1762        btrfs_set_stack_timespec_sec(&inode_item->ctime,
1763                                     inode->i_ctime.tv_sec);
1764        btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1765                                      inode->i_ctime.tv_nsec);
1766
1767        btrfs_set_stack_timespec_sec(&inode_item->otime,
1768                                     BTRFS_I(inode)->i_otime.tv_sec);
1769        btrfs_set_stack_timespec_nsec(&inode_item->otime,
1770                                     BTRFS_I(inode)->i_otime.tv_nsec);
1771}
1772
1773int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1774{
1775        struct btrfs_delayed_node *delayed_node;
1776        struct btrfs_inode_item *inode_item;
1777
1778        delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1779        if (!delayed_node)
1780                return -ENOENT;
1781
1782        mutex_lock(&delayed_node->mutex);
1783        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1784                mutex_unlock(&delayed_node->mutex);
1785                btrfs_release_delayed_node(delayed_node);
1786                return -ENOENT;
1787        }
1788
1789        inode_item = &delayed_node->inode_item;
1790
1791        i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1792        i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1793        btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1794        inode->i_mode = btrfs_stack_inode_mode(inode_item);
1795        set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1796        inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1797        BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1798        BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1799
1800        inode->i_version = btrfs_stack_inode_sequence(inode_item);
1801        inode->i_rdev = 0;
1802        *rdev = btrfs_stack_inode_rdev(inode_item);
1803        BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1804
1805        inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1806        inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1807
1808        inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1809        inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1810
1811        inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1812        inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1813
1814        BTRFS_I(inode)->i_otime.tv_sec =
1815                btrfs_stack_timespec_sec(&inode_item->otime);
1816        BTRFS_I(inode)->i_otime.tv_nsec =
1817                btrfs_stack_timespec_nsec(&inode_item->otime);
1818
1819        inode->i_generation = BTRFS_I(inode)->generation;
1820        BTRFS_I(inode)->index_cnt = (u64)-1;
1821
1822        mutex_unlock(&delayed_node->mutex);
1823        btrfs_release_delayed_node(delayed_node);
1824        return 0;
1825}
1826
1827int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1828                               struct btrfs_root *root, struct inode *inode)
1829{
1830        struct btrfs_delayed_node *delayed_node;
1831        int ret = 0;
1832
1833        delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1834        if (IS_ERR(delayed_node))
1835                return PTR_ERR(delayed_node);
1836
1837        mutex_lock(&delayed_node->mutex);
1838        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1839                fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1840                goto release_node;
1841        }
1842
1843        ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1844                                                   delayed_node);
1845        if (ret)
1846                goto release_node;
1847
1848        fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1849        set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1850        delayed_node->count++;
1851        atomic_inc(&root->fs_info->delayed_root->items);
1852release_node:
1853        mutex_unlock(&delayed_node->mutex);
1854        btrfs_release_delayed_node(delayed_node);
1855        return ret;
1856}
1857
1858int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1859{
1860        struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1861        struct btrfs_delayed_node *delayed_node;
1862
1863        /*
1864         * we don't do delayed inode updates during log recovery because it
1865         * leads to enospc problems.  This means we also can't do
1866         * delayed inode refs
1867         */
1868        if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1869                return -EAGAIN;
1870
1871        delayed_node = btrfs_get_or_create_delayed_node(inode);
1872        if (IS_ERR(delayed_node))
1873                return PTR_ERR(delayed_node);
1874
1875        /*
1876         * We don't reserve space for inode ref deletion is because:
1877         * - We ONLY do async inode ref deletion for the inode who has only
1878         *   one link(i_nlink == 1), it means there is only one inode ref.
1879         *   And in most case, the inode ref and the inode item are in the
1880         *   same leaf, and we will deal with them at the same time.
1881         *   Since we are sure we will reserve the space for the inode item,
1882         *   it is unnecessary to reserve space for inode ref deletion.
1883         * - If the inode ref and the inode item are not in the same leaf,
1884         *   We also needn't worry about enospc problem, because we reserve
1885         *   much more space for the inode update than it needs.
1886         * - At the worst, we can steal some space from the global reservation.
1887         *   It is very rare.
1888         */
1889        mutex_lock(&delayed_node->mutex);
1890        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1891                goto release_node;
1892
1893        set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1894        delayed_node->count++;
1895        atomic_inc(&fs_info->delayed_root->items);
1896release_node:
1897        mutex_unlock(&delayed_node->mutex);
1898        btrfs_release_delayed_node(delayed_node);
1899        return 0;
1900}
1901
1902static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1903{
1904        struct btrfs_root *root = delayed_node->root;
1905        struct btrfs_fs_info *fs_info = root->fs_info;
1906        struct btrfs_delayed_item *curr_item, *prev_item;
1907
1908        mutex_lock(&delayed_node->mutex);
1909        curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1910        while (curr_item) {
1911                btrfs_delayed_item_release_metadata(fs_info, curr_item);
1912                prev_item = curr_item;
1913                curr_item = __btrfs_next_delayed_item(prev_item);
1914                btrfs_release_delayed_item(prev_item);
1915        }
1916
1917        curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1918        while (curr_item) {
1919                btrfs_delayed_item_release_metadata(fs_info, curr_item);
1920                prev_item = curr_item;
1921                curr_item = __btrfs_next_delayed_item(prev_item);
1922                btrfs_release_delayed_item(prev_item);
1923        }
1924
1925        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1926                btrfs_release_delayed_iref(delayed_node);
1927
1928        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1929                btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
1930                btrfs_release_delayed_inode(delayed_node);
1931        }
1932        mutex_unlock(&delayed_node->mutex);
1933}
1934
1935void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1936{
1937        struct btrfs_delayed_node *delayed_node;
1938
1939        delayed_node = btrfs_get_delayed_node(inode);
1940        if (!delayed_node)
1941                return;
1942
1943        __btrfs_kill_delayed_node(delayed_node);
1944        btrfs_release_delayed_node(delayed_node);
1945}
1946
1947void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1948{
1949        u64 inode_id = 0;
1950        struct btrfs_delayed_node *delayed_nodes[8];
1951        int i, n;
1952
1953        while (1) {
1954                spin_lock(&root->inode_lock);
1955                n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1956                                           (void **)delayed_nodes, inode_id,
1957                                           ARRAY_SIZE(delayed_nodes));
1958                if (!n) {
1959                        spin_unlock(&root->inode_lock);
1960                        break;
1961                }
1962
1963                inode_id = delayed_nodes[n - 1]->inode_id + 1;
1964
1965                for (i = 0; i < n; i++)
1966                        refcount_inc(&delayed_nodes[i]->refs);
1967                spin_unlock(&root->inode_lock);
1968
1969                for (i = 0; i < n; i++) {
1970                        __btrfs_kill_delayed_node(delayed_nodes[i]);
1971                        btrfs_release_delayed_node(delayed_nodes[i]);
1972                }
1973        }
1974}
1975
1976void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1977{
1978        struct btrfs_delayed_node *curr_node, *prev_node;
1979
1980        curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1981        while (curr_node) {
1982                __btrfs_kill_delayed_node(curr_node);
1983
1984                prev_node = curr_node;
1985                curr_node = btrfs_next_delayed_node(curr_node);
1986                btrfs_release_delayed_node(prev_node);
1987        }
1988}
1989
1990