linux/fs/btrfs/delayed-inode.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 Fujitsu.  All rights reserved.
   4 * Written by Miao Xie <miaox@cn.fujitsu.com>
   5 */
   6
   7#include <linux/slab.h>
   8#include <linux/iversion.h>
   9#include "misc.h"
  10#include "delayed-inode.h"
  11#include "disk-io.h"
  12#include "transaction.h"
  13#include "ctree.h"
  14#include "qgroup.h"
  15#include "locking.h"
  16
  17#define BTRFS_DELAYED_WRITEBACK         512
  18#define BTRFS_DELAYED_BACKGROUND        128
  19#define BTRFS_DELAYED_BATCH             16
  20
  21static struct kmem_cache *delayed_node_cache;
  22
  23int __init btrfs_delayed_inode_init(void)
  24{
  25        delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  26                                        sizeof(struct btrfs_delayed_node),
  27                                        0,
  28                                        SLAB_MEM_SPREAD,
  29                                        NULL);
  30        if (!delayed_node_cache)
  31                return -ENOMEM;
  32        return 0;
  33}
  34
  35void __cold btrfs_delayed_inode_exit(void)
  36{
  37        kmem_cache_destroy(delayed_node_cache);
  38}
  39
  40static inline void btrfs_init_delayed_node(
  41                                struct btrfs_delayed_node *delayed_node,
  42                                struct btrfs_root *root, u64 inode_id)
  43{
  44        delayed_node->root = root;
  45        delayed_node->inode_id = inode_id;
  46        refcount_set(&delayed_node->refs, 0);
  47        delayed_node->ins_root = RB_ROOT_CACHED;
  48        delayed_node->del_root = RB_ROOT_CACHED;
  49        mutex_init(&delayed_node->mutex);
  50        INIT_LIST_HEAD(&delayed_node->n_list);
  51        INIT_LIST_HEAD(&delayed_node->p_list);
  52}
  53
  54static inline int btrfs_is_continuous_delayed_item(
  55                                        struct btrfs_delayed_item *item1,
  56                                        struct btrfs_delayed_item *item2)
  57{
  58        if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  59            item1->key.objectid == item2->key.objectid &&
  60            item1->key.type == item2->key.type &&
  61            item1->key.offset + 1 == item2->key.offset)
  62                return 1;
  63        return 0;
  64}
  65
  66static struct btrfs_delayed_node *btrfs_get_delayed_node(
  67                struct btrfs_inode *btrfs_inode)
  68{
  69        struct btrfs_root *root = btrfs_inode->root;
  70        u64 ino = btrfs_ino(btrfs_inode);
  71        struct btrfs_delayed_node *node;
  72
  73        node = READ_ONCE(btrfs_inode->delayed_node);
  74        if (node) {
  75                refcount_inc(&node->refs);
  76                return node;
  77        }
  78
  79        spin_lock(&root->inode_lock);
  80        node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
  81
  82        if (node) {
  83                if (btrfs_inode->delayed_node) {
  84                        refcount_inc(&node->refs);      /* can be accessed */
  85                        BUG_ON(btrfs_inode->delayed_node != node);
  86                        spin_unlock(&root->inode_lock);
  87                        return node;
  88                }
  89
  90                /*
  91                 * It's possible that we're racing into the middle of removing
  92                 * this node from the radix tree.  In this case, the refcount
  93                 * was zero and it should never go back to one.  Just return
  94                 * NULL like it was never in the radix at all; our release
  95                 * function is in the process of removing it.
  96                 *
  97                 * Some implementations of refcount_inc refuse to bump the
  98                 * refcount once it has hit zero.  If we don't do this dance
  99                 * here, refcount_inc() may decide to just WARN_ONCE() instead
 100                 * of actually bumping the refcount.
 101                 *
 102                 * If this node is properly in the radix, we want to bump the
 103                 * refcount twice, once for the inode and once for this get
 104                 * operation.
 105                 */
 106                if (refcount_inc_not_zero(&node->refs)) {
 107                        refcount_inc(&node->refs);
 108                        btrfs_inode->delayed_node = node;
 109                } else {
 110                        node = NULL;
 111                }
 112
 113                spin_unlock(&root->inode_lock);
 114                return node;
 115        }
 116        spin_unlock(&root->inode_lock);
 117
 118        return NULL;
 119}
 120
 121/* Will return either the node or PTR_ERR(-ENOMEM) */
 122static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 123                struct btrfs_inode *btrfs_inode)
 124{
 125        struct btrfs_delayed_node *node;
 126        struct btrfs_root *root = btrfs_inode->root;
 127        u64 ino = btrfs_ino(btrfs_inode);
 128        int ret;
 129
 130again:
 131        node = btrfs_get_delayed_node(btrfs_inode);
 132        if (node)
 133                return node;
 134
 135        node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
 136        if (!node)
 137                return ERR_PTR(-ENOMEM);
 138        btrfs_init_delayed_node(node, root, ino);
 139
 140        /* cached in the btrfs inode and can be accessed */
 141        refcount_set(&node->refs, 2);
 142
 143        ret = radix_tree_preload(GFP_NOFS);
 144        if (ret) {
 145                kmem_cache_free(delayed_node_cache, node);
 146                return ERR_PTR(ret);
 147        }
 148
 149        spin_lock(&root->inode_lock);
 150        ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
 151        if (ret == -EEXIST) {
 152                spin_unlock(&root->inode_lock);
 153                kmem_cache_free(delayed_node_cache, node);
 154                radix_tree_preload_end();
 155                goto again;
 156        }
 157        btrfs_inode->delayed_node = node;
 158        spin_unlock(&root->inode_lock);
 159        radix_tree_preload_end();
 160
 161        return node;
 162}
 163
 164/*
 165 * Call it when holding delayed_node->mutex
 166 *
 167 * If mod = 1, add this node into the prepared list.
 168 */
 169static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
 170                                     struct btrfs_delayed_node *node,
 171                                     int mod)
 172{
 173        spin_lock(&root->lock);
 174        if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 175                if (!list_empty(&node->p_list))
 176                        list_move_tail(&node->p_list, &root->prepare_list);
 177                else if (mod)
 178                        list_add_tail(&node->p_list, &root->prepare_list);
 179        } else {
 180                list_add_tail(&node->n_list, &root->node_list);
 181                list_add_tail(&node->p_list, &root->prepare_list);
 182                refcount_inc(&node->refs);      /* inserted into list */
 183                root->nodes++;
 184                set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 185        }
 186        spin_unlock(&root->lock);
 187}
 188
 189/* Call it when holding delayed_node->mutex */
 190static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
 191                                       struct btrfs_delayed_node *node)
 192{
 193        spin_lock(&root->lock);
 194        if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 195                root->nodes--;
 196                refcount_dec(&node->refs);      /* not in the list */
 197                list_del_init(&node->n_list);
 198                if (!list_empty(&node->p_list))
 199                        list_del_init(&node->p_list);
 200                clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 201        }
 202        spin_unlock(&root->lock);
 203}
 204
 205static struct btrfs_delayed_node *btrfs_first_delayed_node(
 206                        struct btrfs_delayed_root *delayed_root)
 207{
 208        struct list_head *p;
 209        struct btrfs_delayed_node *node = NULL;
 210
 211        spin_lock(&delayed_root->lock);
 212        if (list_empty(&delayed_root->node_list))
 213                goto out;
 214
 215        p = delayed_root->node_list.next;
 216        node = list_entry(p, struct btrfs_delayed_node, n_list);
 217        refcount_inc(&node->refs);
 218out:
 219        spin_unlock(&delayed_root->lock);
 220
 221        return node;
 222}
 223
 224static struct btrfs_delayed_node *btrfs_next_delayed_node(
 225                                                struct btrfs_delayed_node *node)
 226{
 227        struct btrfs_delayed_root *delayed_root;
 228        struct list_head *p;
 229        struct btrfs_delayed_node *next = NULL;
 230
 231        delayed_root = node->root->fs_info->delayed_root;
 232        spin_lock(&delayed_root->lock);
 233        if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 234                /* not in the list */
 235                if (list_empty(&delayed_root->node_list))
 236                        goto out;
 237                p = delayed_root->node_list.next;
 238        } else if (list_is_last(&node->n_list, &delayed_root->node_list))
 239                goto out;
 240        else
 241                p = node->n_list.next;
 242
 243        next = list_entry(p, struct btrfs_delayed_node, n_list);
 244        refcount_inc(&next->refs);
 245out:
 246        spin_unlock(&delayed_root->lock);
 247
 248        return next;
 249}
 250
 251static void __btrfs_release_delayed_node(
 252                                struct btrfs_delayed_node *delayed_node,
 253                                int mod)
 254{
 255        struct btrfs_delayed_root *delayed_root;
 256
 257        if (!delayed_node)
 258                return;
 259
 260        delayed_root = delayed_node->root->fs_info->delayed_root;
 261
 262        mutex_lock(&delayed_node->mutex);
 263        if (delayed_node->count)
 264                btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
 265        else
 266                btrfs_dequeue_delayed_node(delayed_root, delayed_node);
 267        mutex_unlock(&delayed_node->mutex);
 268
 269        if (refcount_dec_and_test(&delayed_node->refs)) {
 270                struct btrfs_root *root = delayed_node->root;
 271
 272                spin_lock(&root->inode_lock);
 273                /*
 274                 * Once our refcount goes to zero, nobody is allowed to bump it
 275                 * back up.  We can delete it now.
 276                 */
 277                ASSERT(refcount_read(&delayed_node->refs) == 0);
 278                radix_tree_delete(&root->delayed_nodes_tree,
 279                                  delayed_node->inode_id);
 280                spin_unlock(&root->inode_lock);
 281                kmem_cache_free(delayed_node_cache, delayed_node);
 282        }
 283}
 284
 285static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
 286{
 287        __btrfs_release_delayed_node(node, 0);
 288}
 289
 290static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 291                                        struct btrfs_delayed_root *delayed_root)
 292{
 293        struct list_head *p;
 294        struct btrfs_delayed_node *node = NULL;
 295
 296        spin_lock(&delayed_root->lock);
 297        if (list_empty(&delayed_root->prepare_list))
 298                goto out;
 299
 300        p = delayed_root->prepare_list.next;
 301        list_del_init(p);
 302        node = list_entry(p, struct btrfs_delayed_node, p_list);
 303        refcount_inc(&node->refs);
 304out:
 305        spin_unlock(&delayed_root->lock);
 306
 307        return node;
 308}
 309
 310static inline void btrfs_release_prepared_delayed_node(
 311                                        struct btrfs_delayed_node *node)
 312{
 313        __btrfs_release_delayed_node(node, 1);
 314}
 315
 316static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 317{
 318        struct btrfs_delayed_item *item;
 319        item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
 320        if (item) {
 321                item->data_len = data_len;
 322                item->ins_or_del = 0;
 323                item->bytes_reserved = 0;
 324                item->delayed_node = NULL;
 325                refcount_set(&item->refs, 1);
 326        }
 327        return item;
 328}
 329
 330/*
 331 * __btrfs_lookup_delayed_item - look up the delayed item by key
 332 * @delayed_node: pointer to the delayed node
 333 * @key:          the key to look up
 334 * @prev:         used to store the prev item if the right item isn't found
 335 * @next:         used to store the next item if the right item isn't found
 336 *
 337 * Note: if we don't find the right item, we will return the prev item and
 338 * the next item.
 339 */
 340static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
 341                                struct rb_root *root,
 342                                struct btrfs_key *key,
 343                                struct btrfs_delayed_item **prev,
 344                                struct btrfs_delayed_item **next)
 345{
 346        struct rb_node *node, *prev_node = NULL;
 347        struct btrfs_delayed_item *delayed_item = NULL;
 348        int ret = 0;
 349
 350        node = root->rb_node;
 351
 352        while (node) {
 353                delayed_item = rb_entry(node, struct btrfs_delayed_item,
 354                                        rb_node);
 355                prev_node = node;
 356                ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
 357                if (ret < 0)
 358                        node = node->rb_right;
 359                else if (ret > 0)
 360                        node = node->rb_left;
 361                else
 362                        return delayed_item;
 363        }
 364
 365        if (prev) {
 366                if (!prev_node)
 367                        *prev = NULL;
 368                else if (ret < 0)
 369                        *prev = delayed_item;
 370                else if ((node = rb_prev(prev_node)) != NULL) {
 371                        *prev = rb_entry(node, struct btrfs_delayed_item,
 372                                         rb_node);
 373                } else
 374                        *prev = NULL;
 375        }
 376
 377        if (next) {
 378                if (!prev_node)
 379                        *next = NULL;
 380                else if (ret > 0)
 381                        *next = delayed_item;
 382                else if ((node = rb_next(prev_node)) != NULL) {
 383                        *next = rb_entry(node, struct btrfs_delayed_item,
 384                                         rb_node);
 385                } else
 386                        *next = NULL;
 387        }
 388        return NULL;
 389}
 390
 391static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 392                                        struct btrfs_delayed_node *delayed_node,
 393                                        struct btrfs_key *key)
 394{
 395        return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
 396                                           NULL, NULL);
 397}
 398
 399static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 400                                    struct btrfs_delayed_item *ins,
 401                                    int action)
 402{
 403        struct rb_node **p, *node;
 404        struct rb_node *parent_node = NULL;
 405        struct rb_root_cached *root;
 406        struct btrfs_delayed_item *item;
 407        int cmp;
 408        bool leftmost = true;
 409
 410        if (action == BTRFS_DELAYED_INSERTION_ITEM)
 411                root = &delayed_node->ins_root;
 412        else if (action == BTRFS_DELAYED_DELETION_ITEM)
 413                root = &delayed_node->del_root;
 414        else
 415                BUG();
 416        p = &root->rb_root.rb_node;
 417        node = &ins->rb_node;
 418
 419        while (*p) {
 420                parent_node = *p;
 421                item = rb_entry(parent_node, struct btrfs_delayed_item,
 422                                 rb_node);
 423
 424                cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
 425                if (cmp < 0) {
 426                        p = &(*p)->rb_right;
 427                        leftmost = false;
 428                } else if (cmp > 0) {
 429                        p = &(*p)->rb_left;
 430                } else {
 431                        return -EEXIST;
 432                }
 433        }
 434
 435        rb_link_node(node, parent_node, p);
 436        rb_insert_color_cached(node, root, leftmost);
 437        ins->delayed_node = delayed_node;
 438        ins->ins_or_del = action;
 439
 440        if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
 441            action == BTRFS_DELAYED_INSERTION_ITEM &&
 442            ins->key.offset >= delayed_node->index_cnt)
 443                        delayed_node->index_cnt = ins->key.offset + 1;
 444
 445        delayed_node->count++;
 446        atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
 447        return 0;
 448}
 449
 450static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
 451                                              struct btrfs_delayed_item *item)
 452{
 453        return __btrfs_add_delayed_item(node, item,
 454                                        BTRFS_DELAYED_INSERTION_ITEM);
 455}
 456
 457static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 458                                             struct btrfs_delayed_item *item)
 459{
 460        return __btrfs_add_delayed_item(node, item,
 461                                        BTRFS_DELAYED_DELETION_ITEM);
 462}
 463
 464static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 465{
 466        int seq = atomic_inc_return(&delayed_root->items_seq);
 467
 468        /* atomic_dec_return implies a barrier */
 469        if ((atomic_dec_return(&delayed_root->items) <
 470            BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
 471                cond_wake_up_nomb(&delayed_root->wait);
 472}
 473
 474static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 475{
 476        struct rb_root_cached *root;
 477        struct btrfs_delayed_root *delayed_root;
 478
 479        /* Not associated with any delayed_node */
 480        if (!delayed_item->delayed_node)
 481                return;
 482        delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
 483
 484        BUG_ON(!delayed_root);
 485        BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
 486               delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
 487
 488        if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
 489                root = &delayed_item->delayed_node->ins_root;
 490        else
 491                root = &delayed_item->delayed_node->del_root;
 492
 493        rb_erase_cached(&delayed_item->rb_node, root);
 494        delayed_item->delayed_node->count--;
 495
 496        finish_one_item(delayed_root);
 497}
 498
 499static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 500{
 501        if (item) {
 502                __btrfs_remove_delayed_item(item);
 503                if (refcount_dec_and_test(&item->refs))
 504                        kfree(item);
 505        }
 506}
 507
 508static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 509                                        struct btrfs_delayed_node *delayed_node)
 510{
 511        struct rb_node *p;
 512        struct btrfs_delayed_item *item = NULL;
 513
 514        p = rb_first_cached(&delayed_node->ins_root);
 515        if (p)
 516                item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 517
 518        return item;
 519}
 520
 521static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 522                                        struct btrfs_delayed_node *delayed_node)
 523{
 524        struct rb_node *p;
 525        struct btrfs_delayed_item *item = NULL;
 526
 527        p = rb_first_cached(&delayed_node->del_root);
 528        if (p)
 529                item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 530
 531        return item;
 532}
 533
 534static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 535                                                struct btrfs_delayed_item *item)
 536{
 537        struct rb_node *p;
 538        struct btrfs_delayed_item *next = NULL;
 539
 540        p = rb_next(&item->rb_node);
 541        if (p)
 542                next = rb_entry(p, struct btrfs_delayed_item, rb_node);
 543
 544        return next;
 545}
 546
 547static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 548                                               struct btrfs_root *root,
 549                                               struct btrfs_delayed_item *item)
 550{
 551        struct btrfs_block_rsv *src_rsv;
 552        struct btrfs_block_rsv *dst_rsv;
 553        struct btrfs_fs_info *fs_info = root->fs_info;
 554        u64 num_bytes;
 555        int ret;
 556
 557        if (!trans->bytes_reserved)
 558                return 0;
 559
 560        src_rsv = trans->block_rsv;
 561        dst_rsv = &fs_info->delayed_block_rsv;
 562
 563        num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
 564
 565        /*
 566         * Here we migrate space rsv from transaction rsv, since have already
 567         * reserved space when starting a transaction.  So no need to reserve
 568         * qgroup space here.
 569         */
 570        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
 571        if (!ret) {
 572                trace_btrfs_space_reservation(fs_info, "delayed_item",
 573                                              item->key.objectid,
 574                                              num_bytes, 1);
 575                item->bytes_reserved = num_bytes;
 576        }
 577
 578        return ret;
 579}
 580
 581static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
 582                                                struct btrfs_delayed_item *item)
 583{
 584        struct btrfs_block_rsv *rsv;
 585        struct btrfs_fs_info *fs_info = root->fs_info;
 586
 587        if (!item->bytes_reserved)
 588                return;
 589
 590        rsv = &fs_info->delayed_block_rsv;
 591        /*
 592         * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
 593         * to release/reserve qgroup space.
 594         */
 595        trace_btrfs_space_reservation(fs_info, "delayed_item",
 596                                      item->key.objectid, item->bytes_reserved,
 597                                      0);
 598        btrfs_block_rsv_release(fs_info, rsv,
 599                                item->bytes_reserved);
 600}
 601
 602static int btrfs_delayed_inode_reserve_metadata(
 603                                        struct btrfs_trans_handle *trans,
 604                                        struct btrfs_root *root,
 605                                        struct btrfs_inode *inode,
 606                                        struct btrfs_delayed_node *node)
 607{
 608        struct btrfs_fs_info *fs_info = root->fs_info;
 609        struct btrfs_block_rsv *src_rsv;
 610        struct btrfs_block_rsv *dst_rsv;
 611        u64 num_bytes;
 612        int ret;
 613
 614        src_rsv = trans->block_rsv;
 615        dst_rsv = &fs_info->delayed_block_rsv;
 616
 617        num_bytes = btrfs_calc_metadata_size(fs_info, 1);
 618
 619        /*
 620         * btrfs_dirty_inode will update the inode under btrfs_join_transaction
 621         * which doesn't reserve space for speed.  This is a problem since we
 622         * still need to reserve space for this update, so try to reserve the
 623         * space.
 624         *
 625         * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 626         * we always reserve enough to update the inode item.
 627         */
 628        if (!src_rsv || (!trans->bytes_reserved &&
 629                         src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
 630                ret = btrfs_qgroup_reserve_meta_prealloc(root,
 631                                fs_info->nodesize, true);
 632                if (ret < 0)
 633                        return ret;
 634                ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 635                                          BTRFS_RESERVE_NO_FLUSH);
 636                /*
 637                 * Since we're under a transaction reserve_metadata_bytes could
 638                 * try to commit the transaction which will make it return
 639                 * EAGAIN to make us stop the transaction we have, so return
 640                 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
 641                 */
 642                if (ret == -EAGAIN) {
 643                        ret = -ENOSPC;
 644                        btrfs_qgroup_free_meta_prealloc(root, num_bytes);
 645                }
 646                if (!ret) {
 647                        node->bytes_reserved = num_bytes;
 648                        trace_btrfs_space_reservation(fs_info,
 649                                                      "delayed_inode",
 650                                                      btrfs_ino(inode),
 651                                                      num_bytes, 1);
 652                } else {
 653                        btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
 654                }
 655                return ret;
 656        }
 657
 658        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
 659        if (!ret) {
 660                trace_btrfs_space_reservation(fs_info, "delayed_inode",
 661                                              btrfs_ino(inode), num_bytes, 1);
 662                node->bytes_reserved = num_bytes;
 663        }
 664
 665        return ret;
 666}
 667
 668static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
 669                                                struct btrfs_delayed_node *node,
 670                                                bool qgroup_free)
 671{
 672        struct btrfs_block_rsv *rsv;
 673
 674        if (!node->bytes_reserved)
 675                return;
 676
 677        rsv = &fs_info->delayed_block_rsv;
 678        trace_btrfs_space_reservation(fs_info, "delayed_inode",
 679                                      node->inode_id, node->bytes_reserved, 0);
 680        btrfs_block_rsv_release(fs_info, rsv,
 681                                node->bytes_reserved);
 682        if (qgroup_free)
 683                btrfs_qgroup_free_meta_prealloc(node->root,
 684                                node->bytes_reserved);
 685        else
 686                btrfs_qgroup_convert_reserved_meta(node->root,
 687                                node->bytes_reserved);
 688        node->bytes_reserved = 0;
 689}
 690
 691/*
 692 * This helper will insert some continuous items into the same leaf according
 693 * to the free space of the leaf.
 694 */
 695static int btrfs_batch_insert_items(struct btrfs_root *root,
 696                                    struct btrfs_path *path,
 697                                    struct btrfs_delayed_item *item)
 698{
 699        struct btrfs_delayed_item *curr, *next;
 700        int free_space;
 701        int total_data_size = 0, total_size = 0;
 702        struct extent_buffer *leaf;
 703        char *data_ptr;
 704        struct btrfs_key *keys;
 705        u32 *data_size;
 706        struct list_head head;
 707        int slot;
 708        int nitems;
 709        int i;
 710        int ret = 0;
 711
 712        BUG_ON(!path->nodes[0]);
 713
 714        leaf = path->nodes[0];
 715        free_space = btrfs_leaf_free_space(leaf);
 716        INIT_LIST_HEAD(&head);
 717
 718        next = item;
 719        nitems = 0;
 720
 721        /*
 722         * count the number of the continuous items that we can insert in batch
 723         */
 724        while (total_size + next->data_len + sizeof(struct btrfs_item) <=
 725               free_space) {
 726                total_data_size += next->data_len;
 727                total_size += next->data_len + sizeof(struct btrfs_item);
 728                list_add_tail(&next->tree_list, &head);
 729                nitems++;
 730
 731                curr = next;
 732                next = __btrfs_next_delayed_item(curr);
 733                if (!next)
 734                        break;
 735
 736                if (!btrfs_is_continuous_delayed_item(curr, next))
 737                        break;
 738        }
 739
 740        if (!nitems) {
 741                ret = 0;
 742                goto out;
 743        }
 744
 745        /*
 746         * we need allocate some memory space, but it might cause the task
 747         * to sleep, so we set all locked nodes in the path to blocking locks
 748         * first.
 749         */
 750        btrfs_set_path_blocking(path);
 751
 752        keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
 753        if (!keys) {
 754                ret = -ENOMEM;
 755                goto out;
 756        }
 757
 758        data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
 759        if (!data_size) {
 760                ret = -ENOMEM;
 761                goto error;
 762        }
 763
 764        /* get keys of all the delayed items */
 765        i = 0;
 766        list_for_each_entry(next, &head, tree_list) {
 767                keys[i] = next->key;
 768                data_size[i] = next->data_len;
 769                i++;
 770        }
 771
 772        /* insert the keys of the items */
 773        setup_items_for_insert(root, path, keys, data_size,
 774                               total_data_size, total_size, nitems);
 775
 776        /* insert the dir index items */
 777        slot = path->slots[0];
 778        list_for_each_entry_safe(curr, next, &head, tree_list) {
 779                data_ptr = btrfs_item_ptr(leaf, slot, char);
 780                write_extent_buffer(leaf, &curr->data,
 781                                    (unsigned long)data_ptr,
 782                                    curr->data_len);
 783                slot++;
 784
 785                btrfs_delayed_item_release_metadata(root, curr);
 786
 787                list_del(&curr->tree_list);
 788                btrfs_release_delayed_item(curr);
 789        }
 790
 791error:
 792        kfree(data_size);
 793        kfree(keys);
 794out:
 795        return ret;
 796}
 797
 798/*
 799 * This helper can just do simple insertion that needn't extend item for new
 800 * data, such as directory name index insertion, inode insertion.
 801 */
 802static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 803                                     struct btrfs_root *root,
 804                                     struct btrfs_path *path,
 805                                     struct btrfs_delayed_item *delayed_item)
 806{
 807        struct extent_buffer *leaf;
 808        char *ptr;
 809        int ret;
 810
 811        ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
 812                                      delayed_item->data_len);
 813        if (ret < 0 && ret != -EEXIST)
 814                return ret;
 815
 816        leaf = path->nodes[0];
 817
 818        ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 819
 820        write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
 821                            delayed_item->data_len);
 822        btrfs_mark_buffer_dirty(leaf);
 823
 824        btrfs_delayed_item_release_metadata(root, delayed_item);
 825        return 0;
 826}
 827
 828/*
 829 * we insert an item first, then if there are some continuous items, we try
 830 * to insert those items into the same leaf.
 831 */
 832static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
 833                                      struct btrfs_path *path,
 834                                      struct btrfs_root *root,
 835                                      struct btrfs_delayed_node *node)
 836{
 837        struct btrfs_delayed_item *curr, *prev;
 838        int ret = 0;
 839
 840do_again:
 841        mutex_lock(&node->mutex);
 842        curr = __btrfs_first_delayed_insertion_item(node);
 843        if (!curr)
 844                goto insert_end;
 845
 846        ret = btrfs_insert_delayed_item(trans, root, path, curr);
 847        if (ret < 0) {
 848                btrfs_release_path(path);
 849                goto insert_end;
 850        }
 851
 852        prev = curr;
 853        curr = __btrfs_next_delayed_item(prev);
 854        if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 855                /* insert the continuous items into the same leaf */
 856                path->slots[0]++;
 857                btrfs_batch_insert_items(root, path, curr);
 858        }
 859        btrfs_release_delayed_item(prev);
 860        btrfs_mark_buffer_dirty(path->nodes[0]);
 861
 862        btrfs_release_path(path);
 863        mutex_unlock(&node->mutex);
 864        goto do_again;
 865
 866insert_end:
 867        mutex_unlock(&node->mutex);
 868        return ret;
 869}
 870
 871static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 872                                    struct btrfs_root *root,
 873                                    struct btrfs_path *path,
 874                                    struct btrfs_delayed_item *item)
 875{
 876        struct btrfs_delayed_item *curr, *next;
 877        struct extent_buffer *leaf;
 878        struct btrfs_key key;
 879        struct list_head head;
 880        int nitems, i, last_item;
 881        int ret = 0;
 882
 883        BUG_ON(!path->nodes[0]);
 884
 885        leaf = path->nodes[0];
 886
 887        i = path->slots[0];
 888        last_item = btrfs_header_nritems(leaf) - 1;
 889        if (i > last_item)
 890                return -ENOENT; /* FIXME: Is errno suitable? */
 891
 892        next = item;
 893        INIT_LIST_HEAD(&head);
 894        btrfs_item_key_to_cpu(leaf, &key, i);
 895        nitems = 0;
 896        /*
 897         * count the number of the dir index items that we can delete in batch
 898         */
 899        while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
 900                list_add_tail(&next->tree_list, &head);
 901                nitems++;
 902
 903                curr = next;
 904                next = __btrfs_next_delayed_item(curr);
 905                if (!next)
 906                        break;
 907
 908                if (!btrfs_is_continuous_delayed_item(curr, next))
 909                        break;
 910
 911                i++;
 912                if (i > last_item)
 913                        break;
 914                btrfs_item_key_to_cpu(leaf, &key, i);
 915        }
 916
 917        if (!nitems)
 918                return 0;
 919
 920        ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
 921        if (ret)
 922                goto out;
 923
 924        list_for_each_entry_safe(curr, next, &head, tree_list) {
 925                btrfs_delayed_item_release_metadata(root, curr);
 926                list_del(&curr->tree_list);
 927                btrfs_release_delayed_item(curr);
 928        }
 929
 930out:
 931        return ret;
 932}
 933
 934static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
 935                                      struct btrfs_path *path,
 936                                      struct btrfs_root *root,
 937                                      struct btrfs_delayed_node *node)
 938{
 939        struct btrfs_delayed_item *curr, *prev;
 940        int ret = 0;
 941
 942do_again:
 943        mutex_lock(&node->mutex);
 944        curr = __btrfs_first_delayed_deletion_item(node);
 945        if (!curr)
 946                goto delete_fail;
 947
 948        ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
 949        if (ret < 0)
 950                goto delete_fail;
 951        else if (ret > 0) {
 952                /*
 953                 * can't find the item which the node points to, so this node
 954                 * is invalid, just drop it.
 955                 */
 956                prev = curr;
 957                curr = __btrfs_next_delayed_item(prev);
 958                btrfs_release_delayed_item(prev);
 959                ret = 0;
 960                btrfs_release_path(path);
 961                if (curr) {
 962                        mutex_unlock(&node->mutex);
 963                        goto do_again;
 964                } else
 965                        goto delete_fail;
 966        }
 967
 968        btrfs_batch_delete_items(trans, root, path, curr);
 969        btrfs_release_path(path);
 970        mutex_unlock(&node->mutex);
 971        goto do_again;
 972
 973delete_fail:
 974        btrfs_release_path(path);
 975        mutex_unlock(&node->mutex);
 976        return ret;
 977}
 978
 979static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
 980{
 981        struct btrfs_delayed_root *delayed_root;
 982
 983        if (delayed_node &&
 984            test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 985                BUG_ON(!delayed_node->root);
 986                clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
 987                delayed_node->count--;
 988
 989                delayed_root = delayed_node->root->fs_info->delayed_root;
 990                finish_one_item(delayed_root);
 991        }
 992}
 993
 994static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
 995{
 996        struct btrfs_delayed_root *delayed_root;
 997
 998        ASSERT(delayed_node->root);
 999        clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1000        delayed_node->count--;
1001
1002        delayed_root = delayed_node->root->fs_info->delayed_root;
1003        finish_one_item(delayed_root);
1004}
1005
1006static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1007                                        struct btrfs_root *root,
1008                                        struct btrfs_path *path,
1009                                        struct btrfs_delayed_node *node)
1010{
1011        struct btrfs_fs_info *fs_info = root->fs_info;
1012        struct btrfs_key key;
1013        struct btrfs_inode_item *inode_item;
1014        struct extent_buffer *leaf;
1015        int mod;
1016        int ret;
1017
1018        key.objectid = node->inode_id;
1019        key.type = BTRFS_INODE_ITEM_KEY;
1020        key.offset = 0;
1021
1022        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1023                mod = -1;
1024        else
1025                mod = 1;
1026
1027        ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1028        if (ret > 0) {
1029                btrfs_release_path(path);
1030                return -ENOENT;
1031        } else if (ret < 0) {
1032                return ret;
1033        }
1034
1035        leaf = path->nodes[0];
1036        inode_item = btrfs_item_ptr(leaf, path->slots[0],
1037                                    struct btrfs_inode_item);
1038        write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1039                            sizeof(struct btrfs_inode_item));
1040        btrfs_mark_buffer_dirty(leaf);
1041
1042        if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1043                goto no_iref;
1044
1045        path->slots[0]++;
1046        if (path->slots[0] >= btrfs_header_nritems(leaf))
1047                goto search;
1048again:
1049        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1050        if (key.objectid != node->inode_id)
1051                goto out;
1052
1053        if (key.type != BTRFS_INODE_REF_KEY &&
1054            key.type != BTRFS_INODE_EXTREF_KEY)
1055                goto out;
1056
1057        /*
1058         * Delayed iref deletion is for the inode who has only one link,
1059         * so there is only one iref. The case that several irefs are
1060         * in the same item doesn't exist.
1061         */
1062        btrfs_del_item(trans, root, path);
1063out:
1064        btrfs_release_delayed_iref(node);
1065no_iref:
1066        btrfs_release_path(path);
1067err_out:
1068        btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1069        btrfs_release_delayed_inode(node);
1070
1071        return ret;
1072
1073search:
1074        btrfs_release_path(path);
1075
1076        key.type = BTRFS_INODE_EXTREF_KEY;
1077        key.offset = -1;
1078        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1079        if (ret < 0)
1080                goto err_out;
1081        ASSERT(ret);
1082
1083        ret = 0;
1084        leaf = path->nodes[0];
1085        path->slots[0]--;
1086        goto again;
1087}
1088
1089static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1090                                             struct btrfs_root *root,
1091                                             struct btrfs_path *path,
1092                                             struct btrfs_delayed_node *node)
1093{
1094        int ret;
1095
1096        mutex_lock(&node->mutex);
1097        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1098                mutex_unlock(&node->mutex);
1099                return 0;
1100        }
1101
1102        ret = __btrfs_update_delayed_inode(trans, root, path, node);
1103        mutex_unlock(&node->mutex);
1104        return ret;
1105}
1106
1107static inline int
1108__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1109                                   struct btrfs_path *path,
1110                                   struct btrfs_delayed_node *node)
1111{
1112        int ret;
1113
1114        ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1115        if (ret)
1116                return ret;
1117
1118        ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1119        if (ret)
1120                return ret;
1121
1122        ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1123        return ret;
1124}
1125
1126/*
1127 * Called when committing the transaction.
1128 * Returns 0 on success.
1129 * Returns < 0 on error and returns with an aborted transaction with any
1130 * outstanding delayed items cleaned up.
1131 */
1132static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1133{
1134        struct btrfs_fs_info *fs_info = trans->fs_info;
1135        struct btrfs_delayed_root *delayed_root;
1136        struct btrfs_delayed_node *curr_node, *prev_node;
1137        struct btrfs_path *path;
1138        struct btrfs_block_rsv *block_rsv;
1139        int ret = 0;
1140        bool count = (nr > 0);
1141
1142        if (trans->aborted)
1143                return -EIO;
1144
1145        path = btrfs_alloc_path();
1146        if (!path)
1147                return -ENOMEM;
1148        path->leave_spinning = 1;
1149
1150        block_rsv = trans->block_rsv;
1151        trans->block_rsv = &fs_info->delayed_block_rsv;
1152
1153        delayed_root = fs_info->delayed_root;
1154
1155        curr_node = btrfs_first_delayed_node(delayed_root);
1156        while (curr_node && (!count || (count && nr--))) {
1157                ret = __btrfs_commit_inode_delayed_items(trans, path,
1158                                                         curr_node);
1159                if (ret) {
1160                        btrfs_release_delayed_node(curr_node);
1161                        curr_node = NULL;
1162                        btrfs_abort_transaction(trans, ret);
1163                        break;
1164                }
1165
1166                prev_node = curr_node;
1167                curr_node = btrfs_next_delayed_node(curr_node);
1168                btrfs_release_delayed_node(prev_node);
1169        }
1170
1171        if (curr_node)
1172                btrfs_release_delayed_node(curr_node);
1173        btrfs_free_path(path);
1174        trans->block_rsv = block_rsv;
1175
1176        return ret;
1177}
1178
1179int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1180{
1181        return __btrfs_run_delayed_items(trans, -1);
1182}
1183
1184int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1185{
1186        return __btrfs_run_delayed_items(trans, nr);
1187}
1188
1189int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1190                                     struct btrfs_inode *inode)
1191{
1192        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1193        struct btrfs_path *path;
1194        struct btrfs_block_rsv *block_rsv;
1195        int ret;
1196
1197        if (!delayed_node)
1198                return 0;
1199
1200        mutex_lock(&delayed_node->mutex);
1201        if (!delayed_node->count) {
1202                mutex_unlock(&delayed_node->mutex);
1203                btrfs_release_delayed_node(delayed_node);
1204                return 0;
1205        }
1206        mutex_unlock(&delayed_node->mutex);
1207
1208        path = btrfs_alloc_path();
1209        if (!path) {
1210                btrfs_release_delayed_node(delayed_node);
1211                return -ENOMEM;
1212        }
1213        path->leave_spinning = 1;
1214
1215        block_rsv = trans->block_rsv;
1216        trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1217
1218        ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1219
1220        btrfs_release_delayed_node(delayed_node);
1221        btrfs_free_path(path);
1222        trans->block_rsv = block_rsv;
1223
1224        return ret;
1225}
1226
1227int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1228{
1229        struct btrfs_fs_info *fs_info = inode->root->fs_info;
1230        struct btrfs_trans_handle *trans;
1231        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1232        struct btrfs_path *path;
1233        struct btrfs_block_rsv *block_rsv;
1234        int ret;
1235
1236        if (!delayed_node)
1237                return 0;
1238
1239        mutex_lock(&delayed_node->mutex);
1240        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1241                mutex_unlock(&delayed_node->mutex);
1242                btrfs_release_delayed_node(delayed_node);
1243                return 0;
1244        }
1245        mutex_unlock(&delayed_node->mutex);
1246
1247        trans = btrfs_join_transaction(delayed_node->root);
1248        if (IS_ERR(trans)) {
1249                ret = PTR_ERR(trans);
1250                goto out;
1251        }
1252
1253        path = btrfs_alloc_path();
1254        if (!path) {
1255                ret = -ENOMEM;
1256                goto trans_out;
1257        }
1258        path->leave_spinning = 1;
1259
1260        block_rsv = trans->block_rsv;
1261        trans->block_rsv = &fs_info->delayed_block_rsv;
1262
1263        mutex_lock(&delayed_node->mutex);
1264        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1265                ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1266                                                   path, delayed_node);
1267        else
1268                ret = 0;
1269        mutex_unlock(&delayed_node->mutex);
1270
1271        btrfs_free_path(path);
1272        trans->block_rsv = block_rsv;
1273trans_out:
1274        btrfs_end_transaction(trans);
1275        btrfs_btree_balance_dirty(fs_info);
1276out:
1277        btrfs_release_delayed_node(delayed_node);
1278
1279        return ret;
1280}
1281
1282void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1283{
1284        struct btrfs_delayed_node *delayed_node;
1285
1286        delayed_node = READ_ONCE(inode->delayed_node);
1287        if (!delayed_node)
1288                return;
1289
1290        inode->delayed_node = NULL;
1291        btrfs_release_delayed_node(delayed_node);
1292}
1293
1294struct btrfs_async_delayed_work {
1295        struct btrfs_delayed_root *delayed_root;
1296        int nr;
1297        struct btrfs_work work;
1298};
1299
1300static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1301{
1302        struct btrfs_async_delayed_work *async_work;
1303        struct btrfs_delayed_root *delayed_root;
1304        struct btrfs_trans_handle *trans;
1305        struct btrfs_path *path;
1306        struct btrfs_delayed_node *delayed_node = NULL;
1307        struct btrfs_root *root;
1308        struct btrfs_block_rsv *block_rsv;
1309        int total_done = 0;
1310
1311        async_work = container_of(work, struct btrfs_async_delayed_work, work);
1312        delayed_root = async_work->delayed_root;
1313
1314        path = btrfs_alloc_path();
1315        if (!path)
1316                goto out;
1317
1318        do {
1319                if (atomic_read(&delayed_root->items) <
1320                    BTRFS_DELAYED_BACKGROUND / 2)
1321                        break;
1322
1323                delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1324                if (!delayed_node)
1325                        break;
1326
1327                path->leave_spinning = 1;
1328                root = delayed_node->root;
1329
1330                trans = btrfs_join_transaction(root);
1331                if (IS_ERR(trans)) {
1332                        btrfs_release_path(path);
1333                        btrfs_release_prepared_delayed_node(delayed_node);
1334                        total_done++;
1335                        continue;
1336                }
1337
1338                block_rsv = trans->block_rsv;
1339                trans->block_rsv = &root->fs_info->delayed_block_rsv;
1340
1341                __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1342
1343                trans->block_rsv = block_rsv;
1344                btrfs_end_transaction(trans);
1345                btrfs_btree_balance_dirty_nodelay(root->fs_info);
1346
1347                btrfs_release_path(path);
1348                btrfs_release_prepared_delayed_node(delayed_node);
1349                total_done++;
1350
1351        } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1352                 || total_done < async_work->nr);
1353
1354        btrfs_free_path(path);
1355out:
1356        wake_up(&delayed_root->wait);
1357        kfree(async_work);
1358}
1359
1360
1361static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1362                                     struct btrfs_fs_info *fs_info, int nr)
1363{
1364        struct btrfs_async_delayed_work *async_work;
1365
1366        async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1367        if (!async_work)
1368                return -ENOMEM;
1369
1370        async_work->delayed_root = delayed_root;
1371        btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1372                        NULL);
1373        async_work->nr = nr;
1374
1375        btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1376        return 0;
1377}
1378
1379void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1380{
1381        WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1382}
1383
1384static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1385{
1386        int val = atomic_read(&delayed_root->items_seq);
1387
1388        if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1389                return 1;
1390
1391        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1392                return 1;
1393
1394        return 0;
1395}
1396
1397void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1398{
1399        struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1400
1401        if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1402                btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1403                return;
1404
1405        if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1406                int seq;
1407                int ret;
1408
1409                seq = atomic_read(&delayed_root->items_seq);
1410
1411                ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1412                if (ret)
1413                        return;
1414
1415                wait_event_interruptible(delayed_root->wait,
1416                                         could_end_wait(delayed_root, seq));
1417                return;
1418        }
1419
1420        btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1421}
1422
1423/* Will return 0 or -ENOMEM */
1424int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1425                                   const char *name, int name_len,
1426                                   struct btrfs_inode *dir,
1427                                   struct btrfs_disk_key *disk_key, u8 type,
1428                                   u64 index)
1429{
1430        struct btrfs_delayed_node *delayed_node;
1431        struct btrfs_delayed_item *delayed_item;
1432        struct btrfs_dir_item *dir_item;
1433        int ret;
1434
1435        delayed_node = btrfs_get_or_create_delayed_node(dir);
1436        if (IS_ERR(delayed_node))
1437                return PTR_ERR(delayed_node);
1438
1439        delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1440        if (!delayed_item) {
1441                ret = -ENOMEM;
1442                goto release_node;
1443        }
1444
1445        delayed_item->key.objectid = btrfs_ino(dir);
1446        delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1447        delayed_item->key.offset = index;
1448
1449        dir_item = (struct btrfs_dir_item *)delayed_item->data;
1450        dir_item->location = *disk_key;
1451        btrfs_set_stack_dir_transid(dir_item, trans->transid);
1452        btrfs_set_stack_dir_data_len(dir_item, 0);
1453        btrfs_set_stack_dir_name_len(dir_item, name_len);
1454        btrfs_set_stack_dir_type(dir_item, type);
1455        memcpy((char *)(dir_item + 1), name, name_len);
1456
1457        ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1458        /*
1459         * we have reserved enough space when we start a new transaction,
1460         * so reserving metadata failure is impossible
1461         */
1462        BUG_ON(ret);
1463
1464        mutex_lock(&delayed_node->mutex);
1465        ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1466        if (unlikely(ret)) {
1467                btrfs_err(trans->fs_info,
1468                          "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1469                          name_len, name, delayed_node->root->root_key.objectid,
1470                          delayed_node->inode_id, ret);
1471                BUG();
1472        }
1473        mutex_unlock(&delayed_node->mutex);
1474
1475release_node:
1476        btrfs_release_delayed_node(delayed_node);
1477        return ret;
1478}
1479
1480static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1481                                               struct btrfs_delayed_node *node,
1482                                               struct btrfs_key *key)
1483{
1484        struct btrfs_delayed_item *item;
1485
1486        mutex_lock(&node->mutex);
1487        item = __btrfs_lookup_delayed_insertion_item(node, key);
1488        if (!item) {
1489                mutex_unlock(&node->mutex);
1490                return 1;
1491        }
1492
1493        btrfs_delayed_item_release_metadata(node->root, item);
1494        btrfs_release_delayed_item(item);
1495        mutex_unlock(&node->mutex);
1496        return 0;
1497}
1498
1499int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1500                                   struct btrfs_inode *dir, u64 index)
1501{
1502        struct btrfs_delayed_node *node;
1503        struct btrfs_delayed_item *item;
1504        struct btrfs_key item_key;
1505        int ret;
1506
1507        node = btrfs_get_or_create_delayed_node(dir);
1508        if (IS_ERR(node))
1509                return PTR_ERR(node);
1510
1511        item_key.objectid = btrfs_ino(dir);
1512        item_key.type = BTRFS_DIR_INDEX_KEY;
1513        item_key.offset = index;
1514
1515        ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1516                                                  &item_key);
1517        if (!ret)
1518                goto end;
1519
1520        item = btrfs_alloc_delayed_item(0);
1521        if (!item) {
1522                ret = -ENOMEM;
1523                goto end;
1524        }
1525
1526        item->key = item_key;
1527
1528        ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1529        /*
1530         * we have reserved enough space when we start a new transaction,
1531         * so reserving metadata failure is impossible.
1532         */
1533        if (ret < 0) {
1534                btrfs_err(trans->fs_info,
1535"metadata reservation failed for delayed dir item deltiona, should have been reserved");
1536                btrfs_release_delayed_item(item);
1537                goto end;
1538        }
1539
1540        mutex_lock(&node->mutex);
1541        ret = __btrfs_add_delayed_deletion_item(node, item);
1542        if (unlikely(ret)) {
1543                btrfs_err(trans->fs_info,
1544                          "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1545                          index, node->root->root_key.objectid,
1546                          node->inode_id, ret);
1547                btrfs_delayed_item_release_metadata(dir->root, item);
1548                btrfs_release_delayed_item(item);
1549        }
1550        mutex_unlock(&node->mutex);
1551end:
1552        btrfs_release_delayed_node(node);
1553        return ret;
1554}
1555
1556int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1557{
1558        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1559
1560        if (!delayed_node)
1561                return -ENOENT;
1562
1563        /*
1564         * Since we have held i_mutex of this directory, it is impossible that
1565         * a new directory index is added into the delayed node and index_cnt
1566         * is updated now. So we needn't lock the delayed node.
1567         */
1568        if (!delayed_node->index_cnt) {
1569                btrfs_release_delayed_node(delayed_node);
1570                return -EINVAL;
1571        }
1572
1573        inode->index_cnt = delayed_node->index_cnt;
1574        btrfs_release_delayed_node(delayed_node);
1575        return 0;
1576}
1577
1578bool btrfs_readdir_get_delayed_items(struct inode *inode,
1579                                     struct list_head *ins_list,
1580                                     struct list_head *del_list)
1581{
1582        struct btrfs_delayed_node *delayed_node;
1583        struct btrfs_delayed_item *item;
1584
1585        delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1586        if (!delayed_node)
1587                return false;
1588
1589        /*
1590         * We can only do one readdir with delayed items at a time because of
1591         * item->readdir_list.
1592         */
1593        inode_unlock_shared(inode);
1594        inode_lock(inode);
1595
1596        mutex_lock(&delayed_node->mutex);
1597        item = __btrfs_first_delayed_insertion_item(delayed_node);
1598        while (item) {
1599                refcount_inc(&item->refs);
1600                list_add_tail(&item->readdir_list, ins_list);
1601                item = __btrfs_next_delayed_item(item);
1602        }
1603
1604        item = __btrfs_first_delayed_deletion_item(delayed_node);
1605        while (item) {
1606                refcount_inc(&item->refs);
1607                list_add_tail(&item->readdir_list, del_list);
1608                item = __btrfs_next_delayed_item(item);
1609        }
1610        mutex_unlock(&delayed_node->mutex);
1611        /*
1612         * This delayed node is still cached in the btrfs inode, so refs
1613         * must be > 1 now, and we needn't check it is going to be freed
1614         * or not.
1615         *
1616         * Besides that, this function is used to read dir, we do not
1617         * insert/delete delayed items in this period. So we also needn't
1618         * requeue or dequeue this delayed node.
1619         */
1620        refcount_dec(&delayed_node->refs);
1621
1622        return true;
1623}
1624
1625void btrfs_readdir_put_delayed_items(struct inode *inode,
1626                                     struct list_head *ins_list,
1627                                     struct list_head *del_list)
1628{
1629        struct btrfs_delayed_item *curr, *next;
1630
1631        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1632                list_del(&curr->readdir_list);
1633                if (refcount_dec_and_test(&curr->refs))
1634                        kfree(curr);
1635        }
1636
1637        list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1638                list_del(&curr->readdir_list);
1639                if (refcount_dec_and_test(&curr->refs))
1640                        kfree(curr);
1641        }
1642
1643        /*
1644         * The VFS is going to do up_read(), so we need to downgrade back to a
1645         * read lock.
1646         */
1647        downgrade_write(&inode->i_rwsem);
1648}
1649
1650int btrfs_should_delete_dir_index(struct list_head *del_list,
1651                                  u64 index)
1652{
1653        struct btrfs_delayed_item *curr;
1654        int ret = 0;
1655
1656        list_for_each_entry(curr, del_list, readdir_list) {
1657                if (curr->key.offset > index)
1658                        break;
1659                if (curr->key.offset == index) {
1660                        ret = 1;
1661                        break;
1662                }
1663        }
1664        return ret;
1665}
1666
1667/*
1668 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1669 *
1670 */
1671int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1672                                    struct list_head *ins_list)
1673{
1674        struct btrfs_dir_item *di;
1675        struct btrfs_delayed_item *curr, *next;
1676        struct btrfs_key location;
1677        char *name;
1678        int name_len;
1679        int over = 0;
1680        unsigned char d_type;
1681
1682        if (list_empty(ins_list))
1683                return 0;
1684
1685        /*
1686         * Changing the data of the delayed item is impossible. So
1687         * we needn't lock them. And we have held i_mutex of the
1688         * directory, nobody can delete any directory indexes now.
1689         */
1690        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1691                list_del(&curr->readdir_list);
1692
1693                if (curr->key.offset < ctx->pos) {
1694                        if (refcount_dec_and_test(&curr->refs))
1695                                kfree(curr);
1696                        continue;
1697                }
1698
1699                ctx->pos = curr->key.offset;
1700
1701                di = (struct btrfs_dir_item *)curr->data;
1702                name = (char *)(di + 1);
1703                name_len = btrfs_stack_dir_name_len(di);
1704
1705                d_type = fs_ftype_to_dtype(di->type);
1706                btrfs_disk_key_to_cpu(&location, &di->location);
1707
1708                over = !dir_emit(ctx, name, name_len,
1709                               location.objectid, d_type);
1710
1711                if (refcount_dec_and_test(&curr->refs))
1712                        kfree(curr);
1713
1714                if (over)
1715                        return 1;
1716                ctx->pos++;
1717        }
1718        return 0;
1719}
1720
1721static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1722                                  struct btrfs_inode_item *inode_item,
1723                                  struct inode *inode)
1724{
1725        btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1726        btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1727        btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1728        btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1729        btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1730        btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1731        btrfs_set_stack_inode_generation(inode_item,
1732                                         BTRFS_I(inode)->generation);
1733        btrfs_set_stack_inode_sequence(inode_item,
1734                                       inode_peek_iversion(inode));
1735        btrfs_set_stack_inode_transid(inode_item, trans->transid);
1736        btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1737        btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1738        btrfs_set_stack_inode_block_group(inode_item, 0);
1739
1740        btrfs_set_stack_timespec_sec(&inode_item->atime,
1741                                     inode->i_atime.tv_sec);
1742        btrfs_set_stack_timespec_nsec(&inode_item->atime,
1743                                      inode->i_atime.tv_nsec);
1744
1745        btrfs_set_stack_timespec_sec(&inode_item->mtime,
1746                                     inode->i_mtime.tv_sec);
1747        btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1748                                      inode->i_mtime.tv_nsec);
1749
1750        btrfs_set_stack_timespec_sec(&inode_item->ctime,
1751                                     inode->i_ctime.tv_sec);
1752        btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1753                                      inode->i_ctime.tv_nsec);
1754
1755        btrfs_set_stack_timespec_sec(&inode_item->otime,
1756                                     BTRFS_I(inode)->i_otime.tv_sec);
1757        btrfs_set_stack_timespec_nsec(&inode_item->otime,
1758                                     BTRFS_I(inode)->i_otime.tv_nsec);
1759}
1760
1761int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1762{
1763        struct btrfs_delayed_node *delayed_node;
1764        struct btrfs_inode_item *inode_item;
1765
1766        delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1767        if (!delayed_node)
1768                return -ENOENT;
1769
1770        mutex_lock(&delayed_node->mutex);
1771        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1772                mutex_unlock(&delayed_node->mutex);
1773                btrfs_release_delayed_node(delayed_node);
1774                return -ENOENT;
1775        }
1776
1777        inode_item = &delayed_node->inode_item;
1778
1779        i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1780        i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1781        btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1782        inode->i_mode = btrfs_stack_inode_mode(inode_item);
1783        set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1784        inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1785        BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1786        BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1787
1788        inode_set_iversion_queried(inode,
1789                                   btrfs_stack_inode_sequence(inode_item));
1790        inode->i_rdev = 0;
1791        *rdev = btrfs_stack_inode_rdev(inode_item);
1792        BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1793
1794        inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1795        inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1796
1797        inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1798        inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1799
1800        inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1801        inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1802
1803        BTRFS_I(inode)->i_otime.tv_sec =
1804                btrfs_stack_timespec_sec(&inode_item->otime);
1805        BTRFS_I(inode)->i_otime.tv_nsec =
1806                btrfs_stack_timespec_nsec(&inode_item->otime);
1807
1808        inode->i_generation = BTRFS_I(inode)->generation;
1809        BTRFS_I(inode)->index_cnt = (u64)-1;
1810
1811        mutex_unlock(&delayed_node->mutex);
1812        btrfs_release_delayed_node(delayed_node);
1813        return 0;
1814}
1815
1816int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1817                               struct btrfs_root *root, struct inode *inode)
1818{
1819        struct btrfs_delayed_node *delayed_node;
1820        int ret = 0;
1821
1822        delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1823        if (IS_ERR(delayed_node))
1824                return PTR_ERR(delayed_node);
1825
1826        mutex_lock(&delayed_node->mutex);
1827        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1828                fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1829                goto release_node;
1830        }
1831
1832        ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1833                                                   delayed_node);
1834        if (ret)
1835                goto release_node;
1836
1837        fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1838        set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1839        delayed_node->count++;
1840        atomic_inc(&root->fs_info->delayed_root->items);
1841release_node:
1842        mutex_unlock(&delayed_node->mutex);
1843        btrfs_release_delayed_node(delayed_node);
1844        return ret;
1845}
1846
1847int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1848{
1849        struct btrfs_fs_info *fs_info = inode->root->fs_info;
1850        struct btrfs_delayed_node *delayed_node;
1851
1852        /*
1853         * we don't do delayed inode updates during log recovery because it
1854         * leads to enospc problems.  This means we also can't do
1855         * delayed inode refs
1856         */
1857        if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1858                return -EAGAIN;
1859
1860        delayed_node = btrfs_get_or_create_delayed_node(inode);
1861        if (IS_ERR(delayed_node))
1862                return PTR_ERR(delayed_node);
1863
1864        /*
1865         * We don't reserve space for inode ref deletion is because:
1866         * - We ONLY do async inode ref deletion for the inode who has only
1867         *   one link(i_nlink == 1), it means there is only one inode ref.
1868         *   And in most case, the inode ref and the inode item are in the
1869         *   same leaf, and we will deal with them at the same time.
1870         *   Since we are sure we will reserve the space for the inode item,
1871         *   it is unnecessary to reserve space for inode ref deletion.
1872         * - If the inode ref and the inode item are not in the same leaf,
1873         *   We also needn't worry about enospc problem, because we reserve
1874         *   much more space for the inode update than it needs.
1875         * - At the worst, we can steal some space from the global reservation.
1876         *   It is very rare.
1877         */
1878        mutex_lock(&delayed_node->mutex);
1879        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1880                goto release_node;
1881
1882        set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1883        delayed_node->count++;
1884        atomic_inc(&fs_info->delayed_root->items);
1885release_node:
1886        mutex_unlock(&delayed_node->mutex);
1887        btrfs_release_delayed_node(delayed_node);
1888        return 0;
1889}
1890
1891static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1892{
1893        struct btrfs_root *root = delayed_node->root;
1894        struct btrfs_fs_info *fs_info = root->fs_info;
1895        struct btrfs_delayed_item *curr_item, *prev_item;
1896
1897        mutex_lock(&delayed_node->mutex);
1898        curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1899        while (curr_item) {
1900                btrfs_delayed_item_release_metadata(root, curr_item);
1901                prev_item = curr_item;
1902                curr_item = __btrfs_next_delayed_item(prev_item);
1903                btrfs_release_delayed_item(prev_item);
1904        }
1905
1906        curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1907        while (curr_item) {
1908                btrfs_delayed_item_release_metadata(root, curr_item);
1909                prev_item = curr_item;
1910                curr_item = __btrfs_next_delayed_item(prev_item);
1911                btrfs_release_delayed_item(prev_item);
1912        }
1913
1914        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1915                btrfs_release_delayed_iref(delayed_node);
1916
1917        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1918                btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1919                btrfs_release_delayed_inode(delayed_node);
1920        }
1921        mutex_unlock(&delayed_node->mutex);
1922}
1923
1924void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1925{
1926        struct btrfs_delayed_node *delayed_node;
1927
1928        delayed_node = btrfs_get_delayed_node(inode);
1929        if (!delayed_node)
1930                return;
1931
1932        __btrfs_kill_delayed_node(delayed_node);
1933        btrfs_release_delayed_node(delayed_node);
1934}
1935
1936void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1937{
1938        u64 inode_id = 0;
1939        struct btrfs_delayed_node *delayed_nodes[8];
1940        int i, n;
1941
1942        while (1) {
1943                spin_lock(&root->inode_lock);
1944                n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1945                                           (void **)delayed_nodes, inode_id,
1946                                           ARRAY_SIZE(delayed_nodes));
1947                if (!n) {
1948                        spin_unlock(&root->inode_lock);
1949                        break;
1950                }
1951
1952                inode_id = delayed_nodes[n - 1]->inode_id + 1;
1953                for (i = 0; i < n; i++) {
1954                        /*
1955                         * Don't increase refs in case the node is dead and
1956                         * about to be removed from the tree in the loop below
1957                         */
1958                        if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1959                                delayed_nodes[i] = NULL;
1960                }
1961                spin_unlock(&root->inode_lock);
1962
1963                for (i = 0; i < n; i++) {
1964                        if (!delayed_nodes[i])
1965                                continue;
1966                        __btrfs_kill_delayed_node(delayed_nodes[i]);
1967                        btrfs_release_delayed_node(delayed_nodes[i]);
1968                }
1969        }
1970}
1971
1972void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1973{
1974        struct btrfs_delayed_node *curr_node, *prev_node;
1975
1976        curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1977        while (curr_node) {
1978                __btrfs_kill_delayed_node(curr_node);
1979
1980                prev_node = curr_node;
1981                curr_node = btrfs_next_delayed_node(curr_node);
1982                btrfs_release_delayed_node(prev_node);
1983        }
1984}
1985
1986