linux/fs/btrfs/delayed-inode.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011 Fujitsu.  All rights reserved.
   3 * Written by Miao Xie <miaox@cn.fujitsu.com>
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public
   7 * License v2 as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  12 * General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public
  15 * License along with this program; if not, write to the
  16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17 * Boston, MA 021110-1307, USA.
  18 */
  19
  20#include <linux/slab.h>
  21#include "delayed-inode.h"
  22#include "disk-io.h"
  23#include "transaction.h"
  24#include "ctree.h"
  25
  26#define BTRFS_DELAYED_WRITEBACK         512
  27#define BTRFS_DELAYED_BACKGROUND        128
  28#define BTRFS_DELAYED_BATCH             16
  29
  30static struct kmem_cache *delayed_node_cache;
  31
  32int __init btrfs_delayed_inode_init(void)
  33{
  34        delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  35                                        sizeof(struct btrfs_delayed_node),
  36                                        0,
  37                                        SLAB_MEM_SPREAD,
  38                                        NULL);
  39        if (!delayed_node_cache)
  40                return -ENOMEM;
  41        return 0;
  42}
  43
  44void btrfs_delayed_inode_exit(void)
  45{
  46        kmem_cache_destroy(delayed_node_cache);
  47}
  48
  49static inline void btrfs_init_delayed_node(
  50                                struct btrfs_delayed_node *delayed_node,
  51                                struct btrfs_root *root, u64 inode_id)
  52{
  53        delayed_node->root = root;
  54        delayed_node->inode_id = inode_id;
  55        atomic_set(&delayed_node->refs, 0);
  56        delayed_node->ins_root = RB_ROOT;
  57        delayed_node->del_root = RB_ROOT;
  58        mutex_init(&delayed_node->mutex);
  59        INIT_LIST_HEAD(&delayed_node->n_list);
  60        INIT_LIST_HEAD(&delayed_node->p_list);
  61}
  62
  63static inline int btrfs_is_continuous_delayed_item(
  64                                        struct btrfs_delayed_item *item1,
  65                                        struct btrfs_delayed_item *item2)
  66{
  67        if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  68            item1->key.objectid == item2->key.objectid &&
  69            item1->key.type == item2->key.type &&
  70            item1->key.offset + 1 == item2->key.offset)
  71                return 1;
  72        return 0;
  73}
  74
  75static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
  76                                                        struct btrfs_root *root)
  77{
  78        return root->fs_info->delayed_root;
  79}
  80
  81static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
  82{
  83        struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
  84        struct btrfs_root *root = btrfs_inode->root;
  85        u64 ino = btrfs_ino(inode);
  86        struct btrfs_delayed_node *node;
  87
  88        node = ACCESS_ONCE(btrfs_inode->delayed_node);
  89        if (node) {
  90                atomic_inc(&node->refs);
  91                return node;
  92        }
  93
  94        spin_lock(&root->inode_lock);
  95        node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
  96        if (node) {
  97                if (btrfs_inode->delayed_node) {
  98                        atomic_inc(&node->refs);        /* can be accessed */
  99                        BUG_ON(btrfs_inode->delayed_node != node);
 100                        spin_unlock(&root->inode_lock);
 101                        return node;
 102                }
 103                btrfs_inode->delayed_node = node;
 104                /* can be accessed and cached in the inode */
 105                atomic_add(2, &node->refs);
 106                spin_unlock(&root->inode_lock);
 107                return node;
 108        }
 109        spin_unlock(&root->inode_lock);
 110
 111        return NULL;
 112}
 113
 114/* Will return either the node or PTR_ERR(-ENOMEM) */
 115static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 116                                                        struct inode *inode)
 117{
 118        struct btrfs_delayed_node *node;
 119        struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
 120        struct btrfs_root *root = btrfs_inode->root;
 121        u64 ino = btrfs_ino(inode);
 122        int ret;
 123
 124again:
 125        node = btrfs_get_delayed_node(inode);
 126        if (node)
 127                return node;
 128
 129        node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
 130        if (!node)
 131                return ERR_PTR(-ENOMEM);
 132        btrfs_init_delayed_node(node, root, ino);
 133
 134        /* cached in the btrfs inode and can be accessed */
 135        atomic_add(2, &node->refs);
 136
 137        ret = radix_tree_preload(GFP_NOFS);
 138        if (ret) {
 139                kmem_cache_free(delayed_node_cache, node);
 140                return ERR_PTR(ret);
 141        }
 142
 143        spin_lock(&root->inode_lock);
 144        ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
 145        if (ret == -EEXIST) {
 146                spin_unlock(&root->inode_lock);
 147                kmem_cache_free(delayed_node_cache, node);
 148                radix_tree_preload_end();
 149                goto again;
 150        }
 151        btrfs_inode->delayed_node = node;
 152        spin_unlock(&root->inode_lock);
 153        radix_tree_preload_end();
 154
 155        return node;
 156}
 157
 158/*
 159 * Call it when holding delayed_node->mutex
 160 *
 161 * If mod = 1, add this node into the prepared list.
 162 */
 163static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
 164                                     struct btrfs_delayed_node *node,
 165                                     int mod)
 166{
 167        spin_lock(&root->lock);
 168        if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 169                if (!list_empty(&node->p_list))
 170                        list_move_tail(&node->p_list, &root->prepare_list);
 171                else if (mod)
 172                        list_add_tail(&node->p_list, &root->prepare_list);
 173        } else {
 174                list_add_tail(&node->n_list, &root->node_list);
 175                list_add_tail(&node->p_list, &root->prepare_list);
 176                atomic_inc(&node->refs);        /* inserted into list */
 177                root->nodes++;
 178                set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 179        }
 180        spin_unlock(&root->lock);
 181}
 182
 183/* Call it when holding delayed_node->mutex */
 184static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
 185                                       struct btrfs_delayed_node *node)
 186{
 187        spin_lock(&root->lock);
 188        if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 189                root->nodes--;
 190                atomic_dec(&node->refs);        /* not in the list */
 191                list_del_init(&node->n_list);
 192                if (!list_empty(&node->p_list))
 193                        list_del_init(&node->p_list);
 194                clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 195        }
 196        spin_unlock(&root->lock);
 197}
 198
 199static struct btrfs_delayed_node *btrfs_first_delayed_node(
 200                        struct btrfs_delayed_root *delayed_root)
 201{
 202        struct list_head *p;
 203        struct btrfs_delayed_node *node = NULL;
 204
 205        spin_lock(&delayed_root->lock);
 206        if (list_empty(&delayed_root->node_list))
 207                goto out;
 208
 209        p = delayed_root->node_list.next;
 210        node = list_entry(p, struct btrfs_delayed_node, n_list);
 211        atomic_inc(&node->refs);
 212out:
 213        spin_unlock(&delayed_root->lock);
 214
 215        return node;
 216}
 217
 218static struct btrfs_delayed_node *btrfs_next_delayed_node(
 219                                                struct btrfs_delayed_node *node)
 220{
 221        struct btrfs_delayed_root *delayed_root;
 222        struct list_head *p;
 223        struct btrfs_delayed_node *next = NULL;
 224
 225        delayed_root = node->root->fs_info->delayed_root;
 226        spin_lock(&delayed_root->lock);
 227        if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 228                /* not in the list */
 229                if (list_empty(&delayed_root->node_list))
 230                        goto out;
 231                p = delayed_root->node_list.next;
 232        } else if (list_is_last(&node->n_list, &delayed_root->node_list))
 233                goto out;
 234        else
 235                p = node->n_list.next;
 236
 237        next = list_entry(p, struct btrfs_delayed_node, n_list);
 238        atomic_inc(&next->refs);
 239out:
 240        spin_unlock(&delayed_root->lock);
 241
 242        return next;
 243}
 244
 245static void __btrfs_release_delayed_node(
 246                                struct btrfs_delayed_node *delayed_node,
 247                                int mod)
 248{
 249        struct btrfs_delayed_root *delayed_root;
 250
 251        if (!delayed_node)
 252                return;
 253
 254        delayed_root = delayed_node->root->fs_info->delayed_root;
 255
 256        mutex_lock(&delayed_node->mutex);
 257        if (delayed_node->count)
 258                btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
 259        else
 260                btrfs_dequeue_delayed_node(delayed_root, delayed_node);
 261        mutex_unlock(&delayed_node->mutex);
 262
 263        if (atomic_dec_and_test(&delayed_node->refs)) {
 264                bool free = false;
 265                struct btrfs_root *root = delayed_node->root;
 266                spin_lock(&root->inode_lock);
 267                if (atomic_read(&delayed_node->refs) == 0) {
 268                        radix_tree_delete(&root->delayed_nodes_tree,
 269                                          delayed_node->inode_id);
 270                        free = true;
 271                }
 272                spin_unlock(&root->inode_lock);
 273                if (free)
 274                        kmem_cache_free(delayed_node_cache, delayed_node);
 275        }
 276}
 277
 278static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
 279{
 280        __btrfs_release_delayed_node(node, 0);
 281}
 282
 283static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 284                                        struct btrfs_delayed_root *delayed_root)
 285{
 286        struct list_head *p;
 287        struct btrfs_delayed_node *node = NULL;
 288
 289        spin_lock(&delayed_root->lock);
 290        if (list_empty(&delayed_root->prepare_list))
 291                goto out;
 292
 293        p = delayed_root->prepare_list.next;
 294        list_del_init(p);
 295        node = list_entry(p, struct btrfs_delayed_node, p_list);
 296        atomic_inc(&node->refs);
 297out:
 298        spin_unlock(&delayed_root->lock);
 299
 300        return node;
 301}
 302
 303static inline void btrfs_release_prepared_delayed_node(
 304                                        struct btrfs_delayed_node *node)
 305{
 306        __btrfs_release_delayed_node(node, 1);
 307}
 308
 309static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 310{
 311        struct btrfs_delayed_item *item;
 312        item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
 313        if (item) {
 314                item->data_len = data_len;
 315                item->ins_or_del = 0;
 316                item->bytes_reserved = 0;
 317                item->delayed_node = NULL;
 318                atomic_set(&item->refs, 1);
 319        }
 320        return item;
 321}
 322
 323/*
 324 * __btrfs_lookup_delayed_item - look up the delayed item by key
 325 * @delayed_node: pointer to the delayed node
 326 * @key:          the key to look up
 327 * @prev:         used to store the prev item if the right item isn't found
 328 * @next:         used to store the next item if the right item isn't found
 329 *
 330 * Note: if we don't find the right item, we will return the prev item and
 331 * the next item.
 332 */
 333static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
 334                                struct rb_root *root,
 335                                struct btrfs_key *key,
 336                                struct btrfs_delayed_item **prev,
 337                                struct btrfs_delayed_item **next)
 338{
 339        struct rb_node *node, *prev_node = NULL;
 340        struct btrfs_delayed_item *delayed_item = NULL;
 341        int ret = 0;
 342
 343        node = root->rb_node;
 344
 345        while (node) {
 346                delayed_item = rb_entry(node, struct btrfs_delayed_item,
 347                                        rb_node);
 348                prev_node = node;
 349                ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
 350                if (ret < 0)
 351                        node = node->rb_right;
 352                else if (ret > 0)
 353                        node = node->rb_left;
 354                else
 355                        return delayed_item;
 356        }
 357
 358        if (prev) {
 359                if (!prev_node)
 360                        *prev = NULL;
 361                else if (ret < 0)
 362                        *prev = delayed_item;
 363                else if ((node = rb_prev(prev_node)) != NULL) {
 364                        *prev = rb_entry(node, struct btrfs_delayed_item,
 365                                         rb_node);
 366                } else
 367                        *prev = NULL;
 368        }
 369
 370        if (next) {
 371                if (!prev_node)
 372                        *next = NULL;
 373                else if (ret > 0)
 374                        *next = delayed_item;
 375                else if ((node = rb_next(prev_node)) != NULL) {
 376                        *next = rb_entry(node, struct btrfs_delayed_item,
 377                                         rb_node);
 378                } else
 379                        *next = NULL;
 380        }
 381        return NULL;
 382}
 383
 384static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 385                                        struct btrfs_delayed_node *delayed_node,
 386                                        struct btrfs_key *key)
 387{
 388        return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
 389                                           NULL, NULL);
 390}
 391
 392static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 393                                    struct btrfs_delayed_item *ins,
 394                                    int action)
 395{
 396        struct rb_node **p, *node;
 397        struct rb_node *parent_node = NULL;
 398        struct rb_root *root;
 399        struct btrfs_delayed_item *item;
 400        int cmp;
 401
 402        if (action == BTRFS_DELAYED_INSERTION_ITEM)
 403                root = &delayed_node->ins_root;
 404        else if (action == BTRFS_DELAYED_DELETION_ITEM)
 405                root = &delayed_node->del_root;
 406        else
 407                BUG();
 408        p = &root->rb_node;
 409        node = &ins->rb_node;
 410
 411        while (*p) {
 412                parent_node = *p;
 413                item = rb_entry(parent_node, struct btrfs_delayed_item,
 414                                 rb_node);
 415
 416                cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
 417                if (cmp < 0)
 418                        p = &(*p)->rb_right;
 419                else if (cmp > 0)
 420                        p = &(*p)->rb_left;
 421                else
 422                        return -EEXIST;
 423        }
 424
 425        rb_link_node(node, parent_node, p);
 426        rb_insert_color(node, root);
 427        ins->delayed_node = delayed_node;
 428        ins->ins_or_del = action;
 429
 430        if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
 431            action == BTRFS_DELAYED_INSERTION_ITEM &&
 432            ins->key.offset >= delayed_node->index_cnt)
 433                        delayed_node->index_cnt = ins->key.offset + 1;
 434
 435        delayed_node->count++;
 436        atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
 437        return 0;
 438}
 439
 440static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
 441                                              struct btrfs_delayed_item *item)
 442{
 443        return __btrfs_add_delayed_item(node, item,
 444                                        BTRFS_DELAYED_INSERTION_ITEM);
 445}
 446
 447static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 448                                             struct btrfs_delayed_item *item)
 449{
 450        return __btrfs_add_delayed_item(node, item,
 451                                        BTRFS_DELAYED_DELETION_ITEM);
 452}
 453
 454static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 455{
 456        int seq = atomic_inc_return(&delayed_root->items_seq);
 457
 458        /*
 459         * atomic_dec_return implies a barrier for waitqueue_active
 460         */
 461        if ((atomic_dec_return(&delayed_root->items) <
 462            BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
 463            waitqueue_active(&delayed_root->wait))
 464                wake_up(&delayed_root->wait);
 465}
 466
 467static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 468{
 469        struct rb_root *root;
 470        struct btrfs_delayed_root *delayed_root;
 471
 472        delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
 473
 474        BUG_ON(!delayed_root);
 475        BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
 476               delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
 477
 478        if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
 479                root = &delayed_item->delayed_node->ins_root;
 480        else
 481                root = &delayed_item->delayed_node->del_root;
 482
 483        rb_erase(&delayed_item->rb_node, root);
 484        delayed_item->delayed_node->count--;
 485
 486        finish_one_item(delayed_root);
 487}
 488
 489static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 490{
 491        if (item) {
 492                __btrfs_remove_delayed_item(item);
 493                if (atomic_dec_and_test(&item->refs))
 494                        kfree(item);
 495        }
 496}
 497
 498static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 499                                        struct btrfs_delayed_node *delayed_node)
 500{
 501        struct rb_node *p;
 502        struct btrfs_delayed_item *item = NULL;
 503
 504        p = rb_first(&delayed_node->ins_root);
 505        if (p)
 506                item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 507
 508        return item;
 509}
 510
 511static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 512                                        struct btrfs_delayed_node *delayed_node)
 513{
 514        struct rb_node *p;
 515        struct btrfs_delayed_item *item = NULL;
 516
 517        p = rb_first(&delayed_node->del_root);
 518        if (p)
 519                item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 520
 521        return item;
 522}
 523
 524static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 525                                                struct btrfs_delayed_item *item)
 526{
 527        struct rb_node *p;
 528        struct btrfs_delayed_item *next = NULL;
 529
 530        p = rb_next(&item->rb_node);
 531        if (p)
 532                next = rb_entry(p, struct btrfs_delayed_item, rb_node);
 533
 534        return next;
 535}
 536
 537static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 538                                               struct btrfs_root *root,
 539                                               struct btrfs_delayed_item *item)
 540{
 541        struct btrfs_block_rsv *src_rsv;
 542        struct btrfs_block_rsv *dst_rsv;
 543        u64 num_bytes;
 544        int ret;
 545
 546        if (!trans->bytes_reserved)
 547                return 0;
 548
 549        src_rsv = trans->block_rsv;
 550        dst_rsv = &root->fs_info->delayed_block_rsv;
 551
 552        num_bytes = btrfs_calc_trans_metadata_size(root, 1);
 553        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
 554        if (!ret) {
 555                trace_btrfs_space_reservation(root->fs_info, "delayed_item",
 556                                              item->key.objectid,
 557                                              num_bytes, 1);
 558                item->bytes_reserved = num_bytes;
 559        }
 560
 561        return ret;
 562}
 563
 564static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
 565                                                struct btrfs_delayed_item *item)
 566{
 567        struct btrfs_block_rsv *rsv;
 568
 569        if (!item->bytes_reserved)
 570                return;
 571
 572        rsv = &root->fs_info->delayed_block_rsv;
 573        trace_btrfs_space_reservation(root->fs_info, "delayed_item",
 574                                      item->key.objectid, item->bytes_reserved,
 575                                      0);
 576        btrfs_block_rsv_release(root, rsv,
 577                                item->bytes_reserved);
 578}
 579
 580static int btrfs_delayed_inode_reserve_metadata(
 581                                        struct btrfs_trans_handle *trans,
 582                                        struct btrfs_root *root,
 583                                        struct inode *inode,
 584                                        struct btrfs_delayed_node *node)
 585{
 586        struct btrfs_block_rsv *src_rsv;
 587        struct btrfs_block_rsv *dst_rsv;
 588        u64 num_bytes;
 589        int ret;
 590        bool release = false;
 591
 592        src_rsv = trans->block_rsv;
 593        dst_rsv = &root->fs_info->delayed_block_rsv;
 594
 595        num_bytes = btrfs_calc_trans_metadata_size(root, 1);
 596
 597        /*
 598         * If our block_rsv is the delalloc block reserve then check and see if
 599         * we have our extra reservation for updating the inode.  If not fall
 600         * through and try to reserve space quickly.
 601         *
 602         * We used to try and steal from the delalloc block rsv or the global
 603         * reserve, but we'd steal a full reservation, which isn't kind.  We are
 604         * here through delalloc which means we've likely just cowed down close
 605         * to the leaf that contains the inode, so we would steal less just
 606         * doing the fallback inode update, so if we do end up having to steal
 607         * from the global block rsv we hopefully only steal one or two blocks
 608         * worth which is less likely to hurt us.
 609         */
 610        if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
 611                spin_lock(&BTRFS_I(inode)->lock);
 612                if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
 613                                       &BTRFS_I(inode)->runtime_flags))
 614                        release = true;
 615                else
 616                        src_rsv = NULL;
 617                spin_unlock(&BTRFS_I(inode)->lock);
 618        }
 619
 620        /*
 621         * btrfs_dirty_inode will update the inode under btrfs_join_transaction
 622         * which doesn't reserve space for speed.  This is a problem since we
 623         * still need to reserve space for this update, so try to reserve the
 624         * space.
 625         *
 626         * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 627         * we're accounted for.
 628         */
 629        if (!src_rsv || (!trans->bytes_reserved &&
 630                         src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
 631                ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 632                                          BTRFS_RESERVE_NO_FLUSH);
 633                /*
 634                 * Since we're under a transaction reserve_metadata_bytes could
 635                 * try to commit the transaction which will make it return
 636                 * EAGAIN to make us stop the transaction we have, so return
 637                 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
 638                 */
 639                if (ret == -EAGAIN)
 640                        ret = -ENOSPC;
 641                if (!ret) {
 642                        node->bytes_reserved = num_bytes;
 643                        trace_btrfs_space_reservation(root->fs_info,
 644                                                      "delayed_inode",
 645                                                      btrfs_ino(inode),
 646                                                      num_bytes, 1);
 647                }
 648                return ret;
 649        }
 650
 651        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
 652
 653        /*
 654         * Migrate only takes a reservation, it doesn't touch the size of the
 655         * block_rsv.  This is to simplify people who don't normally have things
 656         * migrated from their block rsv.  If they go to release their
 657         * reservation, that will decrease the size as well, so if migrate
 658         * reduced size we'd end up with a negative size.  But for the
 659         * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
 660         * but we could in fact do this reserve/migrate dance several times
 661         * between the time we did the original reservation and we'd clean it
 662         * up.  So to take care of this, release the space for the meta
 663         * reservation here.  I think it may be time for a documentation page on
 664         * how block rsvs. work.
 665         */
 666        if (!ret) {
 667                trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
 668                                              btrfs_ino(inode), num_bytes, 1);
 669                node->bytes_reserved = num_bytes;
 670        }
 671
 672        if (release) {
 673                trace_btrfs_space_reservation(root->fs_info, "delalloc",
 674                                              btrfs_ino(inode), num_bytes, 0);
 675                btrfs_block_rsv_release(root, src_rsv, num_bytes);
 676        }
 677
 678        return ret;
 679}
 680
 681static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
 682                                                struct btrfs_delayed_node *node)
 683{
 684        struct btrfs_block_rsv *rsv;
 685
 686        if (!node->bytes_reserved)
 687                return;
 688
 689        rsv = &root->fs_info->delayed_block_rsv;
 690        trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
 691                                      node->inode_id, node->bytes_reserved, 0);
 692        btrfs_block_rsv_release(root, rsv,
 693                                node->bytes_reserved);
 694        node->bytes_reserved = 0;
 695}
 696
 697/*
 698 * This helper will insert some continuous items into the same leaf according
 699 * to the free space of the leaf.
 700 */
 701static int btrfs_batch_insert_items(struct btrfs_root *root,
 702                                    struct btrfs_path *path,
 703                                    struct btrfs_delayed_item *item)
 704{
 705        struct btrfs_delayed_item *curr, *next;
 706        int free_space;
 707        int total_data_size = 0, total_size = 0;
 708        struct extent_buffer *leaf;
 709        char *data_ptr;
 710        struct btrfs_key *keys;
 711        u32 *data_size;
 712        struct list_head head;
 713        int slot;
 714        int nitems;
 715        int i;
 716        int ret = 0;
 717
 718        BUG_ON(!path->nodes[0]);
 719
 720        leaf = path->nodes[0];
 721        free_space = btrfs_leaf_free_space(root, leaf);
 722        INIT_LIST_HEAD(&head);
 723
 724        next = item;
 725        nitems = 0;
 726
 727        /*
 728         * count the number of the continuous items that we can insert in batch
 729         */
 730        while (total_size + next->data_len + sizeof(struct btrfs_item) <=
 731               free_space) {
 732                total_data_size += next->data_len;
 733                total_size += next->data_len + sizeof(struct btrfs_item);
 734                list_add_tail(&next->tree_list, &head);
 735                nitems++;
 736
 737                curr = next;
 738                next = __btrfs_next_delayed_item(curr);
 739                if (!next)
 740                        break;
 741
 742                if (!btrfs_is_continuous_delayed_item(curr, next))
 743                        break;
 744        }
 745
 746        if (!nitems) {
 747                ret = 0;
 748                goto out;
 749        }
 750
 751        /*
 752         * we need allocate some memory space, but it might cause the task
 753         * to sleep, so we set all locked nodes in the path to blocking locks
 754         * first.
 755         */
 756        btrfs_set_path_blocking(path);
 757
 758        keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
 759        if (!keys) {
 760                ret = -ENOMEM;
 761                goto out;
 762        }
 763
 764        data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
 765        if (!data_size) {
 766                ret = -ENOMEM;
 767                goto error;
 768        }
 769
 770        /* get keys of all the delayed items */
 771        i = 0;
 772        list_for_each_entry(next, &head, tree_list) {
 773                keys[i] = next->key;
 774                data_size[i] = next->data_len;
 775                i++;
 776        }
 777
 778        /* reset all the locked nodes in the patch to spinning locks. */
 779        btrfs_clear_path_blocking(path, NULL, 0);
 780
 781        /* insert the keys of the items */
 782        setup_items_for_insert(root, path, keys, data_size,
 783                               total_data_size, total_size, nitems);
 784
 785        /* insert the dir index items */
 786        slot = path->slots[0];
 787        list_for_each_entry_safe(curr, next, &head, tree_list) {
 788                data_ptr = btrfs_item_ptr(leaf, slot, char);
 789                write_extent_buffer(leaf, &curr->data,
 790                                    (unsigned long)data_ptr,
 791                                    curr->data_len);
 792                slot++;
 793
 794                btrfs_delayed_item_release_metadata(root, curr);
 795
 796                list_del(&curr->tree_list);
 797                btrfs_release_delayed_item(curr);
 798        }
 799
 800error:
 801        kfree(data_size);
 802        kfree(keys);
 803out:
 804        return ret;
 805}
 806
 807/*
 808 * This helper can just do simple insertion that needn't extend item for new
 809 * data, such as directory name index insertion, inode insertion.
 810 */
 811static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 812                                     struct btrfs_root *root,
 813                                     struct btrfs_path *path,
 814                                     struct btrfs_delayed_item *delayed_item)
 815{
 816        struct extent_buffer *leaf;
 817        char *ptr;
 818        int ret;
 819
 820        ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
 821                                      delayed_item->data_len);
 822        if (ret < 0 && ret != -EEXIST)
 823                return ret;
 824
 825        leaf = path->nodes[0];
 826
 827        ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 828
 829        write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
 830                            delayed_item->data_len);
 831        btrfs_mark_buffer_dirty(leaf);
 832
 833        btrfs_delayed_item_release_metadata(root, delayed_item);
 834        return 0;
 835}
 836
 837/*
 838 * we insert an item first, then if there are some continuous items, we try
 839 * to insert those items into the same leaf.
 840 */
 841static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
 842                                      struct btrfs_path *path,
 843                                      struct btrfs_root *root,
 844                                      struct btrfs_delayed_node *node)
 845{
 846        struct btrfs_delayed_item *curr, *prev;
 847        int ret = 0;
 848
 849do_again:
 850        mutex_lock(&node->mutex);
 851        curr = __btrfs_first_delayed_insertion_item(node);
 852        if (!curr)
 853                goto insert_end;
 854
 855        ret = btrfs_insert_delayed_item(trans, root, path, curr);
 856        if (ret < 0) {
 857                btrfs_release_path(path);
 858                goto insert_end;
 859        }
 860
 861        prev = curr;
 862        curr = __btrfs_next_delayed_item(prev);
 863        if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 864                /* insert the continuous items into the same leaf */
 865                path->slots[0]++;
 866                btrfs_batch_insert_items(root, path, curr);
 867        }
 868        btrfs_release_delayed_item(prev);
 869        btrfs_mark_buffer_dirty(path->nodes[0]);
 870
 871        btrfs_release_path(path);
 872        mutex_unlock(&node->mutex);
 873        goto do_again;
 874
 875insert_end:
 876        mutex_unlock(&node->mutex);
 877        return ret;
 878}
 879
 880static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 881                                    struct btrfs_root *root,
 882                                    struct btrfs_path *path,
 883                                    struct btrfs_delayed_item *item)
 884{
 885        struct btrfs_delayed_item *curr, *next;
 886        struct extent_buffer *leaf;
 887        struct btrfs_key key;
 888        struct list_head head;
 889        int nitems, i, last_item;
 890        int ret = 0;
 891
 892        BUG_ON(!path->nodes[0]);
 893
 894        leaf = path->nodes[0];
 895
 896        i = path->slots[0];
 897        last_item = btrfs_header_nritems(leaf) - 1;
 898        if (i > last_item)
 899                return -ENOENT; /* FIXME: Is errno suitable? */
 900
 901        next = item;
 902        INIT_LIST_HEAD(&head);
 903        btrfs_item_key_to_cpu(leaf, &key, i);
 904        nitems = 0;
 905        /*
 906         * count the number of the dir index items that we can delete in batch
 907         */
 908        while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
 909                list_add_tail(&next->tree_list, &head);
 910                nitems++;
 911
 912                curr = next;
 913                next = __btrfs_next_delayed_item(curr);
 914                if (!next)
 915                        break;
 916
 917                if (!btrfs_is_continuous_delayed_item(curr, next))
 918                        break;
 919
 920                i++;
 921                if (i > last_item)
 922                        break;
 923                btrfs_item_key_to_cpu(leaf, &key, i);
 924        }
 925
 926        if (!nitems)
 927                return 0;
 928
 929        ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
 930        if (ret)
 931                goto out;
 932
 933        list_for_each_entry_safe(curr, next, &head, tree_list) {
 934                btrfs_delayed_item_release_metadata(root, curr);
 935                list_del(&curr->tree_list);
 936                btrfs_release_delayed_item(curr);
 937        }
 938
 939out:
 940        return ret;
 941}
 942
 943static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
 944                                      struct btrfs_path *path,
 945                                      struct btrfs_root *root,
 946                                      struct btrfs_delayed_node *node)
 947{
 948        struct btrfs_delayed_item *curr, *prev;
 949        int ret = 0;
 950
 951do_again:
 952        mutex_lock(&node->mutex);
 953        curr = __btrfs_first_delayed_deletion_item(node);
 954        if (!curr)
 955                goto delete_fail;
 956
 957        ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
 958        if (ret < 0)
 959                goto delete_fail;
 960        else if (ret > 0) {
 961                /*
 962                 * can't find the item which the node points to, so this node
 963                 * is invalid, just drop it.
 964                 */
 965                prev = curr;
 966                curr = __btrfs_next_delayed_item(prev);
 967                btrfs_release_delayed_item(prev);
 968                ret = 0;
 969                btrfs_release_path(path);
 970                if (curr) {
 971                        mutex_unlock(&node->mutex);
 972                        goto do_again;
 973                } else
 974                        goto delete_fail;
 975        }
 976
 977        btrfs_batch_delete_items(trans, root, path, curr);
 978        btrfs_release_path(path);
 979        mutex_unlock(&node->mutex);
 980        goto do_again;
 981
 982delete_fail:
 983        btrfs_release_path(path);
 984        mutex_unlock(&node->mutex);
 985        return ret;
 986}
 987
 988static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
 989{
 990        struct btrfs_delayed_root *delayed_root;
 991
 992        if (delayed_node &&
 993            test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 994                BUG_ON(!delayed_node->root);
 995                clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
 996                delayed_node->count--;
 997
 998                delayed_root = delayed_node->root->fs_info->delayed_root;
 999                finish_one_item(delayed_root);
1000        }
1001}
1002
1003static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1004{
1005        struct btrfs_delayed_root *delayed_root;
1006
1007        ASSERT(delayed_node->root);
1008        clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1009        delayed_node->count--;
1010
1011        delayed_root = delayed_node->root->fs_info->delayed_root;
1012        finish_one_item(delayed_root);
1013}
1014
1015static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1016                                        struct btrfs_root *root,
1017                                        struct btrfs_path *path,
1018                                        struct btrfs_delayed_node *node)
1019{
1020        struct btrfs_key key;
1021        struct btrfs_inode_item *inode_item;
1022        struct extent_buffer *leaf;
1023        int mod;
1024        int ret;
1025
1026        key.objectid = node->inode_id;
1027        key.type = BTRFS_INODE_ITEM_KEY;
1028        key.offset = 0;
1029
1030        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1031                mod = -1;
1032        else
1033                mod = 1;
1034
1035        ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1036        if (ret > 0) {
1037                btrfs_release_path(path);
1038                return -ENOENT;
1039        } else if (ret < 0) {
1040                return ret;
1041        }
1042
1043        leaf = path->nodes[0];
1044        inode_item = btrfs_item_ptr(leaf, path->slots[0],
1045                                    struct btrfs_inode_item);
1046        write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1047                            sizeof(struct btrfs_inode_item));
1048        btrfs_mark_buffer_dirty(leaf);
1049
1050        if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1051                goto no_iref;
1052
1053        path->slots[0]++;
1054        if (path->slots[0] >= btrfs_header_nritems(leaf))
1055                goto search;
1056again:
1057        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1058        if (key.objectid != node->inode_id)
1059                goto out;
1060
1061        if (key.type != BTRFS_INODE_REF_KEY &&
1062            key.type != BTRFS_INODE_EXTREF_KEY)
1063                goto out;
1064
1065        /*
1066         * Delayed iref deletion is for the inode who has only one link,
1067         * so there is only one iref. The case that several irefs are
1068         * in the same item doesn't exist.
1069         */
1070        btrfs_del_item(trans, root, path);
1071out:
1072        btrfs_release_delayed_iref(node);
1073no_iref:
1074        btrfs_release_path(path);
1075err_out:
1076        btrfs_delayed_inode_release_metadata(root, node);
1077        btrfs_release_delayed_inode(node);
1078
1079        return ret;
1080
1081search:
1082        btrfs_release_path(path);
1083
1084        key.type = BTRFS_INODE_EXTREF_KEY;
1085        key.offset = -1;
1086        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1087        if (ret < 0)
1088                goto err_out;
1089        ASSERT(ret);
1090
1091        ret = 0;
1092        leaf = path->nodes[0];
1093        path->slots[0]--;
1094        goto again;
1095}
1096
1097static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1098                                             struct btrfs_root *root,
1099                                             struct btrfs_path *path,
1100                                             struct btrfs_delayed_node *node)
1101{
1102        int ret;
1103
1104        mutex_lock(&node->mutex);
1105        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1106                mutex_unlock(&node->mutex);
1107                return 0;
1108        }
1109
1110        ret = __btrfs_update_delayed_inode(trans, root, path, node);
1111        mutex_unlock(&node->mutex);
1112        return ret;
1113}
1114
1115static inline int
1116__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1117                                   struct btrfs_path *path,
1118                                   struct btrfs_delayed_node *node)
1119{
1120        int ret;
1121
1122        ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1123        if (ret)
1124                return ret;
1125
1126        ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1127        if (ret)
1128                return ret;
1129
1130        ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1131        return ret;
1132}
1133
1134/*
1135 * Called when committing the transaction.
1136 * Returns 0 on success.
1137 * Returns < 0 on error and returns with an aborted transaction with any
1138 * outstanding delayed items cleaned up.
1139 */
1140static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1141                                     struct btrfs_root *root, int nr)
1142{
1143        struct btrfs_delayed_root *delayed_root;
1144        struct btrfs_delayed_node *curr_node, *prev_node;
1145        struct btrfs_path *path;
1146        struct btrfs_block_rsv *block_rsv;
1147        int ret = 0;
1148        bool count = (nr > 0);
1149
1150        if (trans->aborted)
1151                return -EIO;
1152
1153        path = btrfs_alloc_path();
1154        if (!path)
1155                return -ENOMEM;
1156        path->leave_spinning = 1;
1157
1158        block_rsv = trans->block_rsv;
1159        trans->block_rsv = &root->fs_info->delayed_block_rsv;
1160
1161        delayed_root = btrfs_get_delayed_root(root);
1162
1163        curr_node = btrfs_first_delayed_node(delayed_root);
1164        while (curr_node && (!count || (count && nr--))) {
1165                ret = __btrfs_commit_inode_delayed_items(trans, path,
1166                                                         curr_node);
1167                if (ret) {
1168                        btrfs_release_delayed_node(curr_node);
1169                        curr_node = NULL;
1170                        btrfs_abort_transaction(trans, ret);
1171                        break;
1172                }
1173
1174                prev_node = curr_node;
1175                curr_node = btrfs_next_delayed_node(curr_node);
1176                btrfs_release_delayed_node(prev_node);
1177        }
1178
1179        if (curr_node)
1180                btrfs_release_delayed_node(curr_node);
1181        btrfs_free_path(path);
1182        trans->block_rsv = block_rsv;
1183
1184        return ret;
1185}
1186
1187int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1188                            struct btrfs_root *root)
1189{
1190        return __btrfs_run_delayed_items(trans, root, -1);
1191}
1192
1193int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1194                               struct btrfs_root *root, int nr)
1195{
1196        return __btrfs_run_delayed_items(trans, root, nr);
1197}
1198
1199int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1200                                     struct inode *inode)
1201{
1202        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1203        struct btrfs_path *path;
1204        struct btrfs_block_rsv *block_rsv;
1205        int ret;
1206
1207        if (!delayed_node)
1208                return 0;
1209
1210        mutex_lock(&delayed_node->mutex);
1211        if (!delayed_node->count) {
1212                mutex_unlock(&delayed_node->mutex);
1213                btrfs_release_delayed_node(delayed_node);
1214                return 0;
1215        }
1216        mutex_unlock(&delayed_node->mutex);
1217
1218        path = btrfs_alloc_path();
1219        if (!path) {
1220                btrfs_release_delayed_node(delayed_node);
1221                return -ENOMEM;
1222        }
1223        path->leave_spinning = 1;
1224
1225        block_rsv = trans->block_rsv;
1226        trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1227
1228        ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1229
1230        btrfs_release_delayed_node(delayed_node);
1231        btrfs_free_path(path);
1232        trans->block_rsv = block_rsv;
1233
1234        return ret;
1235}
1236
1237int btrfs_commit_inode_delayed_inode(struct inode *inode)
1238{
1239        struct btrfs_trans_handle *trans;
1240        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1241        struct btrfs_path *path;
1242        struct btrfs_block_rsv *block_rsv;
1243        int ret;
1244
1245        if (!delayed_node)
1246                return 0;
1247
1248        mutex_lock(&delayed_node->mutex);
1249        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1250                mutex_unlock(&delayed_node->mutex);
1251                btrfs_release_delayed_node(delayed_node);
1252                return 0;
1253        }
1254        mutex_unlock(&delayed_node->mutex);
1255
1256        trans = btrfs_join_transaction(delayed_node->root);
1257        if (IS_ERR(trans)) {
1258                ret = PTR_ERR(trans);
1259                goto out;
1260        }
1261
1262        path = btrfs_alloc_path();
1263        if (!path) {
1264                ret = -ENOMEM;
1265                goto trans_out;
1266        }
1267        path->leave_spinning = 1;
1268
1269        block_rsv = trans->block_rsv;
1270        trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1271
1272        mutex_lock(&delayed_node->mutex);
1273        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1274                ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1275                                                   path, delayed_node);
1276        else
1277                ret = 0;
1278        mutex_unlock(&delayed_node->mutex);
1279
1280        btrfs_free_path(path);
1281        trans->block_rsv = block_rsv;
1282trans_out:
1283        btrfs_end_transaction(trans, delayed_node->root);
1284        btrfs_btree_balance_dirty(delayed_node->root);
1285out:
1286        btrfs_release_delayed_node(delayed_node);
1287
1288        return ret;
1289}
1290
1291void btrfs_remove_delayed_node(struct inode *inode)
1292{
1293        struct btrfs_delayed_node *delayed_node;
1294
1295        delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1296        if (!delayed_node)
1297                return;
1298
1299        BTRFS_I(inode)->delayed_node = NULL;
1300        btrfs_release_delayed_node(delayed_node);
1301}
1302
1303struct btrfs_async_delayed_work {
1304        struct btrfs_delayed_root *delayed_root;
1305        int nr;
1306        struct btrfs_work work;
1307};
1308
1309static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1310{
1311        struct btrfs_async_delayed_work *async_work;
1312        struct btrfs_delayed_root *delayed_root;
1313        struct btrfs_trans_handle *trans;
1314        struct btrfs_path *path;
1315        struct btrfs_delayed_node *delayed_node = NULL;
1316        struct btrfs_root *root;
1317        struct btrfs_block_rsv *block_rsv;
1318        int total_done = 0;
1319
1320        async_work = container_of(work, struct btrfs_async_delayed_work, work);
1321        delayed_root = async_work->delayed_root;
1322
1323        path = btrfs_alloc_path();
1324        if (!path)
1325                goto out;
1326
1327again:
1328        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1329                goto free_path;
1330
1331        delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1332        if (!delayed_node)
1333                goto free_path;
1334
1335        path->leave_spinning = 1;
1336        root = delayed_node->root;
1337
1338        trans = btrfs_join_transaction(root);
1339        if (IS_ERR(trans))
1340                goto release_path;
1341
1342        block_rsv = trans->block_rsv;
1343        trans->block_rsv = &root->fs_info->delayed_block_rsv;
1344
1345        __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1346
1347        trans->block_rsv = block_rsv;
1348        btrfs_end_transaction(trans, root);
1349        btrfs_btree_balance_dirty_nodelay(root);
1350
1351release_path:
1352        btrfs_release_path(path);
1353        total_done++;
1354
1355        btrfs_release_prepared_delayed_node(delayed_node);
1356        if (async_work->nr == 0 || total_done < async_work->nr)
1357                goto again;
1358
1359free_path:
1360        btrfs_free_path(path);
1361out:
1362        wake_up(&delayed_root->wait);
1363        kfree(async_work);
1364}
1365
1366
1367static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1368                                     struct btrfs_fs_info *fs_info, int nr)
1369{
1370        struct btrfs_async_delayed_work *async_work;
1371
1372        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1373                return 0;
1374
1375        async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1376        if (!async_work)
1377                return -ENOMEM;
1378
1379        async_work->delayed_root = delayed_root;
1380        btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1381                        btrfs_async_run_delayed_root, NULL, NULL);
1382        async_work->nr = nr;
1383
1384        btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1385        return 0;
1386}
1387
1388void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1389{
1390        struct btrfs_delayed_root *delayed_root;
1391        delayed_root = btrfs_get_delayed_root(root);
1392        WARN_ON(btrfs_first_delayed_node(delayed_root));
1393}
1394
1395static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1396{
1397        int val = atomic_read(&delayed_root->items_seq);
1398
1399        if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1400                return 1;
1401
1402        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1403                return 1;
1404
1405        return 0;
1406}
1407
1408void btrfs_balance_delayed_items(struct btrfs_root *root)
1409{
1410        struct btrfs_delayed_root *delayed_root;
1411        struct btrfs_fs_info *fs_info = root->fs_info;
1412
1413        delayed_root = btrfs_get_delayed_root(root);
1414
1415        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1416                return;
1417
1418        if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1419                int seq;
1420                int ret;
1421
1422                seq = atomic_read(&delayed_root->items_seq);
1423
1424                ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1425                if (ret)
1426                        return;
1427
1428                wait_event_interruptible(delayed_root->wait,
1429                                         could_end_wait(delayed_root, seq));
1430                return;
1431        }
1432
1433        btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1434}
1435
1436/* Will return 0 or -ENOMEM */
1437int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1438                                   struct btrfs_root *root, const char *name,
1439                                   int name_len, struct inode *dir,
1440                                   struct btrfs_disk_key *disk_key, u8 type,
1441                                   u64 index)
1442{
1443        struct btrfs_delayed_node *delayed_node;
1444        struct btrfs_delayed_item *delayed_item;
1445        struct btrfs_dir_item *dir_item;
1446        int ret;
1447
1448        delayed_node = btrfs_get_or_create_delayed_node(dir);
1449        if (IS_ERR(delayed_node))
1450                return PTR_ERR(delayed_node);
1451
1452        delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1453        if (!delayed_item) {
1454                ret = -ENOMEM;
1455                goto release_node;
1456        }
1457
1458        delayed_item->key.objectid = btrfs_ino(dir);
1459        delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1460        delayed_item->key.offset = index;
1461
1462        dir_item = (struct btrfs_dir_item *)delayed_item->data;
1463        dir_item->location = *disk_key;
1464        btrfs_set_stack_dir_transid(dir_item, trans->transid);
1465        btrfs_set_stack_dir_data_len(dir_item, 0);
1466        btrfs_set_stack_dir_name_len(dir_item, name_len);
1467        btrfs_set_stack_dir_type(dir_item, type);
1468        memcpy((char *)(dir_item + 1), name, name_len);
1469
1470        ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1471        /*
1472         * we have reserved enough space when we start a new transaction,
1473         * so reserving metadata failure is impossible
1474         */
1475        BUG_ON(ret);
1476
1477
1478        mutex_lock(&delayed_node->mutex);
1479        ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1480        if (unlikely(ret)) {
1481                btrfs_err(root->fs_info,
1482                          "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1483                          name_len, name, delayed_node->root->objectid,
1484                          delayed_node->inode_id, ret);
1485                BUG();
1486        }
1487        mutex_unlock(&delayed_node->mutex);
1488
1489release_node:
1490        btrfs_release_delayed_node(delayed_node);
1491        return ret;
1492}
1493
1494static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1495                                               struct btrfs_delayed_node *node,
1496                                               struct btrfs_key *key)
1497{
1498        struct btrfs_delayed_item *item;
1499
1500        mutex_lock(&node->mutex);
1501        item = __btrfs_lookup_delayed_insertion_item(node, key);
1502        if (!item) {
1503                mutex_unlock(&node->mutex);
1504                return 1;
1505        }
1506
1507        btrfs_delayed_item_release_metadata(root, item);
1508        btrfs_release_delayed_item(item);
1509        mutex_unlock(&node->mutex);
1510        return 0;
1511}
1512
1513int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1514                                   struct btrfs_root *root, struct inode *dir,
1515                                   u64 index)
1516{
1517        struct btrfs_delayed_node *node;
1518        struct btrfs_delayed_item *item;
1519        struct btrfs_key item_key;
1520        int ret;
1521
1522        node = btrfs_get_or_create_delayed_node(dir);
1523        if (IS_ERR(node))
1524                return PTR_ERR(node);
1525
1526        item_key.objectid = btrfs_ino(dir);
1527        item_key.type = BTRFS_DIR_INDEX_KEY;
1528        item_key.offset = index;
1529
1530        ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1531        if (!ret)
1532                goto end;
1533
1534        item = btrfs_alloc_delayed_item(0);
1535        if (!item) {
1536                ret = -ENOMEM;
1537                goto end;
1538        }
1539
1540        item->key = item_key;
1541
1542        ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1543        /*
1544         * we have reserved enough space when we start a new transaction,
1545         * so reserving metadata failure is impossible.
1546         */
1547        BUG_ON(ret);
1548
1549        mutex_lock(&node->mutex);
1550        ret = __btrfs_add_delayed_deletion_item(node, item);
1551        if (unlikely(ret)) {
1552                btrfs_err(root->fs_info,
1553                          "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1554                          index, node->root->objectid, node->inode_id, ret);
1555                BUG();
1556        }
1557        mutex_unlock(&node->mutex);
1558end:
1559        btrfs_release_delayed_node(node);
1560        return ret;
1561}
1562
1563int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1564{
1565        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1566
1567        if (!delayed_node)
1568                return -ENOENT;
1569
1570        /*
1571         * Since we have held i_mutex of this directory, it is impossible that
1572         * a new directory index is added into the delayed node and index_cnt
1573         * is updated now. So we needn't lock the delayed node.
1574         */
1575        if (!delayed_node->index_cnt) {
1576                btrfs_release_delayed_node(delayed_node);
1577                return -EINVAL;
1578        }
1579
1580        BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1581        btrfs_release_delayed_node(delayed_node);
1582        return 0;
1583}
1584
1585bool btrfs_readdir_get_delayed_items(struct inode *inode,
1586                                     struct list_head *ins_list,
1587                                     struct list_head *del_list)
1588{
1589        struct btrfs_delayed_node *delayed_node;
1590        struct btrfs_delayed_item *item;
1591
1592        delayed_node = btrfs_get_delayed_node(inode);
1593        if (!delayed_node)
1594                return false;
1595
1596        /*
1597         * We can only do one readdir with delayed items at a time because of
1598         * item->readdir_list.
1599         */
1600        inode_unlock_shared(inode);
1601        inode_lock(inode);
1602
1603        mutex_lock(&delayed_node->mutex);
1604        item = __btrfs_first_delayed_insertion_item(delayed_node);
1605        while (item) {
1606                atomic_inc(&item->refs);
1607                list_add_tail(&item->readdir_list, ins_list);
1608                item = __btrfs_next_delayed_item(item);
1609        }
1610
1611        item = __btrfs_first_delayed_deletion_item(delayed_node);
1612        while (item) {
1613                atomic_inc(&item->refs);
1614                list_add_tail(&item->readdir_list, del_list);
1615                item = __btrfs_next_delayed_item(item);
1616        }
1617        mutex_unlock(&delayed_node->mutex);
1618        /*
1619         * This delayed node is still cached in the btrfs inode, so refs
1620         * must be > 1 now, and we needn't check it is going to be freed
1621         * or not.
1622         *
1623         * Besides that, this function is used to read dir, we do not
1624         * insert/delete delayed items in this period. So we also needn't
1625         * requeue or dequeue this delayed node.
1626         */
1627        atomic_dec(&delayed_node->refs);
1628
1629        return true;
1630}
1631
1632void btrfs_readdir_put_delayed_items(struct inode *inode,
1633                                     struct list_head *ins_list,
1634                                     struct list_head *del_list)
1635{
1636        struct btrfs_delayed_item *curr, *next;
1637
1638        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1639                list_del(&curr->readdir_list);
1640                if (atomic_dec_and_test(&curr->refs))
1641                        kfree(curr);
1642        }
1643
1644        list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1645                list_del(&curr->readdir_list);
1646                if (atomic_dec_and_test(&curr->refs))
1647                        kfree(curr);
1648        }
1649
1650        /*
1651         * The VFS is going to do up_read(), so we need to downgrade back to a
1652         * read lock.
1653         */
1654        downgrade_write(&inode->i_rwsem);
1655}
1656
1657int btrfs_should_delete_dir_index(struct list_head *del_list,
1658                                  u64 index)
1659{
1660        struct btrfs_delayed_item *curr, *next;
1661        int ret;
1662
1663        if (list_empty(del_list))
1664                return 0;
1665
1666        list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1667                if (curr->key.offset > index)
1668                        break;
1669
1670                list_del(&curr->readdir_list);
1671                ret = (curr->key.offset == index);
1672
1673                if (atomic_dec_and_test(&curr->refs))
1674                        kfree(curr);
1675
1676                if (ret)
1677                        return 1;
1678                else
1679                        continue;
1680        }
1681        return 0;
1682}
1683
1684/*
1685 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1686 *
1687 */
1688int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1689                                    struct list_head *ins_list, bool *emitted)
1690{
1691        struct btrfs_dir_item *di;
1692        struct btrfs_delayed_item *curr, *next;
1693        struct btrfs_key location;
1694        char *name;
1695        int name_len;
1696        int over = 0;
1697        unsigned char d_type;
1698
1699        if (list_empty(ins_list))
1700                return 0;
1701
1702        /*
1703         * Changing the data of the delayed item is impossible. So
1704         * we needn't lock them. And we have held i_mutex of the
1705         * directory, nobody can delete any directory indexes now.
1706         */
1707        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1708                list_del(&curr->readdir_list);
1709
1710                if (curr->key.offset < ctx->pos) {
1711                        if (atomic_dec_and_test(&curr->refs))
1712                                kfree(curr);
1713                        continue;
1714                }
1715
1716                ctx->pos = curr->key.offset;
1717
1718                di = (struct btrfs_dir_item *)curr->data;
1719                name = (char *)(di + 1);
1720                name_len = btrfs_stack_dir_name_len(di);
1721
1722                d_type = btrfs_filetype_table[di->type];
1723                btrfs_disk_key_to_cpu(&location, &di->location);
1724
1725                over = !dir_emit(ctx, name, name_len,
1726                               location.objectid, d_type);
1727
1728                if (atomic_dec_and_test(&curr->refs))
1729                        kfree(curr);
1730
1731                if (over)
1732                        return 1;
1733                *emitted = true;
1734        }
1735        return 0;
1736}
1737
1738static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1739                                  struct btrfs_inode_item *inode_item,
1740                                  struct inode *inode)
1741{
1742        btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1743        btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1744        btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1745        btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1746        btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1747        btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1748        btrfs_set_stack_inode_generation(inode_item,
1749                                         BTRFS_I(inode)->generation);
1750        btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1751        btrfs_set_stack_inode_transid(inode_item, trans->transid);
1752        btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1753        btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1754        btrfs_set_stack_inode_block_group(inode_item, 0);
1755
1756        btrfs_set_stack_timespec_sec(&inode_item->atime,
1757                                     inode->i_atime.tv_sec);
1758        btrfs_set_stack_timespec_nsec(&inode_item->atime,
1759                                      inode->i_atime.tv_nsec);
1760
1761        btrfs_set_stack_timespec_sec(&inode_item->mtime,
1762                                     inode->i_mtime.tv_sec);
1763        btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1764                                      inode->i_mtime.tv_nsec);
1765
1766        btrfs_set_stack_timespec_sec(&inode_item->ctime,
1767                                     inode->i_ctime.tv_sec);
1768        btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1769                                      inode->i_ctime.tv_nsec);
1770
1771        btrfs_set_stack_timespec_sec(&inode_item->otime,
1772                                     BTRFS_I(inode)->i_otime.tv_sec);
1773        btrfs_set_stack_timespec_nsec(&inode_item->otime,
1774                                     BTRFS_I(inode)->i_otime.tv_nsec);
1775}
1776
1777int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1778{
1779        struct btrfs_delayed_node *delayed_node;
1780        struct btrfs_inode_item *inode_item;
1781
1782        delayed_node = btrfs_get_delayed_node(inode);
1783        if (!delayed_node)
1784                return -ENOENT;
1785
1786        mutex_lock(&delayed_node->mutex);
1787        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1788                mutex_unlock(&delayed_node->mutex);
1789                btrfs_release_delayed_node(delayed_node);
1790                return -ENOENT;
1791        }
1792
1793        inode_item = &delayed_node->inode_item;
1794
1795        i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1796        i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1797        btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1798        inode->i_mode = btrfs_stack_inode_mode(inode_item);
1799        set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1800        inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1801        BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1802        BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1803
1804        inode->i_version = btrfs_stack_inode_sequence(inode_item);
1805        inode->i_rdev = 0;
1806        *rdev = btrfs_stack_inode_rdev(inode_item);
1807        BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1808
1809        inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1810        inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1811
1812        inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1813        inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1814
1815        inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1816        inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1817
1818        BTRFS_I(inode)->i_otime.tv_sec =
1819                btrfs_stack_timespec_sec(&inode_item->otime);
1820        BTRFS_I(inode)->i_otime.tv_nsec =
1821                btrfs_stack_timespec_nsec(&inode_item->otime);
1822
1823        inode->i_generation = BTRFS_I(inode)->generation;
1824        BTRFS_I(inode)->index_cnt = (u64)-1;
1825
1826        mutex_unlock(&delayed_node->mutex);
1827        btrfs_release_delayed_node(delayed_node);
1828        return 0;
1829}
1830
1831int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1832                               struct btrfs_root *root, struct inode *inode)
1833{
1834        struct btrfs_delayed_node *delayed_node;
1835        int ret = 0;
1836
1837        delayed_node = btrfs_get_or_create_delayed_node(inode);
1838        if (IS_ERR(delayed_node))
1839                return PTR_ERR(delayed_node);
1840
1841        mutex_lock(&delayed_node->mutex);
1842        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1843                fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1844                goto release_node;
1845        }
1846
1847        ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1848                                                   delayed_node);
1849        if (ret)
1850                goto release_node;
1851
1852        fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1853        set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1854        delayed_node->count++;
1855        atomic_inc(&root->fs_info->delayed_root->items);
1856release_node:
1857        mutex_unlock(&delayed_node->mutex);
1858        btrfs_release_delayed_node(delayed_node);
1859        return ret;
1860}
1861
1862int btrfs_delayed_delete_inode_ref(struct inode *inode)
1863{
1864        struct btrfs_delayed_node *delayed_node;
1865
1866        /*
1867         * we don't do delayed inode updates during log recovery because it
1868         * leads to enospc problems.  This means we also can't do
1869         * delayed inode refs
1870         */
1871        if (test_bit(BTRFS_FS_LOG_RECOVERING,
1872                     &BTRFS_I(inode)->root->fs_info->flags))
1873                return -EAGAIN;
1874
1875        delayed_node = btrfs_get_or_create_delayed_node(inode);
1876        if (IS_ERR(delayed_node))
1877                return PTR_ERR(delayed_node);
1878
1879        /*
1880         * We don't reserve space for inode ref deletion is because:
1881         * - We ONLY do async inode ref deletion for the inode who has only
1882         *   one link(i_nlink == 1), it means there is only one inode ref.
1883         *   And in most case, the inode ref and the inode item are in the
1884         *   same leaf, and we will deal with them at the same time.
1885         *   Since we are sure we will reserve the space for the inode item,
1886         *   it is unnecessary to reserve space for inode ref deletion.
1887         * - If the inode ref and the inode item are not in the same leaf,
1888         *   We also needn't worry about enospc problem, because we reserve
1889         *   much more space for the inode update than it needs.
1890         * - At the worst, we can steal some space from the global reservation.
1891         *   It is very rare.
1892         */
1893        mutex_lock(&delayed_node->mutex);
1894        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1895                goto release_node;
1896
1897        set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1898        delayed_node->count++;
1899        atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items);
1900release_node:
1901        mutex_unlock(&delayed_node->mutex);
1902        btrfs_release_delayed_node(delayed_node);
1903        return 0;
1904}
1905
1906static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1907{
1908        struct btrfs_root *root = delayed_node->root;
1909        struct btrfs_delayed_item *curr_item, *prev_item;
1910
1911        mutex_lock(&delayed_node->mutex);
1912        curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1913        while (curr_item) {
1914                btrfs_delayed_item_release_metadata(root, curr_item);
1915                prev_item = curr_item;
1916                curr_item = __btrfs_next_delayed_item(prev_item);
1917                btrfs_release_delayed_item(prev_item);
1918        }
1919
1920        curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1921        while (curr_item) {
1922                btrfs_delayed_item_release_metadata(root, curr_item);
1923                prev_item = curr_item;
1924                curr_item = __btrfs_next_delayed_item(prev_item);
1925                btrfs_release_delayed_item(prev_item);
1926        }
1927
1928        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1929                btrfs_release_delayed_iref(delayed_node);
1930
1931        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1932                btrfs_delayed_inode_release_metadata(root, delayed_node);
1933                btrfs_release_delayed_inode(delayed_node);
1934        }
1935        mutex_unlock(&delayed_node->mutex);
1936}
1937
1938void btrfs_kill_delayed_inode_items(struct inode *inode)
1939{
1940        struct btrfs_delayed_node *delayed_node;
1941
1942        delayed_node = btrfs_get_delayed_node(inode);
1943        if (!delayed_node)
1944                return;
1945
1946        __btrfs_kill_delayed_node(delayed_node);
1947        btrfs_release_delayed_node(delayed_node);
1948}
1949
1950void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1951{
1952        u64 inode_id = 0;
1953        struct btrfs_delayed_node *delayed_nodes[8];
1954        int i, n;
1955
1956        while (1) {
1957                spin_lock(&root->inode_lock);
1958                n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1959                                           (void **)delayed_nodes, inode_id,
1960                                           ARRAY_SIZE(delayed_nodes));
1961                if (!n) {
1962                        spin_unlock(&root->inode_lock);
1963                        break;
1964                }
1965
1966                inode_id = delayed_nodes[n - 1]->inode_id + 1;
1967
1968                for (i = 0; i < n; i++)
1969                        atomic_inc(&delayed_nodes[i]->refs);
1970                spin_unlock(&root->inode_lock);
1971
1972                for (i = 0; i < n; i++) {
1973                        __btrfs_kill_delayed_node(delayed_nodes[i]);
1974                        btrfs_release_delayed_node(delayed_nodes[i]);
1975                }
1976        }
1977}
1978
1979void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
1980{
1981        struct btrfs_delayed_root *delayed_root;
1982        struct btrfs_delayed_node *curr_node, *prev_node;
1983
1984        delayed_root = btrfs_get_delayed_root(root);
1985
1986        curr_node = btrfs_first_delayed_node(delayed_root);
1987        while (curr_node) {
1988                __btrfs_kill_delayed_node(curr_node);
1989
1990                prev_node = curr_node;
1991                curr_node = btrfs_next_delayed_node(curr_node);
1992                btrfs_release_delayed_node(prev_node);
1993        }
1994}
1995
1996