linux/fs/btrfs/delayed-inode.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 Fujitsu.  All rights reserved.
   4 * Written by Miao Xie <miaox@cn.fujitsu.com>
   5 */
   6
   7#include <linux/slab.h>
   8#include <linux/iversion.h>
   9#include "delayed-inode.h"
  10#include "disk-io.h"
  11#include "transaction.h"
  12#include "ctree.h"
  13#include "qgroup.h"
  14
  15#define BTRFS_DELAYED_WRITEBACK         512
  16#define BTRFS_DELAYED_BACKGROUND        128
  17#define BTRFS_DELAYED_BATCH             16
  18
  19static struct kmem_cache *delayed_node_cache;
  20
  21int __init btrfs_delayed_inode_init(void)
  22{
  23        delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  24                                        sizeof(struct btrfs_delayed_node),
  25                                        0,
  26                                        SLAB_MEM_SPREAD,
  27                                        NULL);
  28        if (!delayed_node_cache)
  29                return -ENOMEM;
  30        return 0;
  31}
  32
  33void __cold btrfs_delayed_inode_exit(void)
  34{
  35        kmem_cache_destroy(delayed_node_cache);
  36}
  37
  38static inline void btrfs_init_delayed_node(
  39                                struct btrfs_delayed_node *delayed_node,
  40                                struct btrfs_root *root, u64 inode_id)
  41{
  42        delayed_node->root = root;
  43        delayed_node->inode_id = inode_id;
  44        refcount_set(&delayed_node->refs, 0);
  45        delayed_node->ins_root = RB_ROOT;
  46        delayed_node->del_root = RB_ROOT;
  47        mutex_init(&delayed_node->mutex);
  48        INIT_LIST_HEAD(&delayed_node->n_list);
  49        INIT_LIST_HEAD(&delayed_node->p_list);
  50}
  51
  52static inline int btrfs_is_continuous_delayed_item(
  53                                        struct btrfs_delayed_item *item1,
  54                                        struct btrfs_delayed_item *item2)
  55{
  56        if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  57            item1->key.objectid == item2->key.objectid &&
  58            item1->key.type == item2->key.type &&
  59            item1->key.offset + 1 == item2->key.offset)
  60                return 1;
  61        return 0;
  62}
  63
  64static struct btrfs_delayed_node *btrfs_get_delayed_node(
  65                struct btrfs_inode *btrfs_inode)
  66{
  67        struct btrfs_root *root = btrfs_inode->root;
  68        u64 ino = btrfs_ino(btrfs_inode);
  69        struct btrfs_delayed_node *node;
  70
  71        node = READ_ONCE(btrfs_inode->delayed_node);
  72        if (node) {
  73                refcount_inc(&node->refs);
  74                return node;
  75        }
  76
  77        spin_lock(&root->inode_lock);
  78        node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
  79
  80        if (node) {
  81                if (btrfs_inode->delayed_node) {
  82                        refcount_inc(&node->refs);      /* can be accessed */
  83                        BUG_ON(btrfs_inode->delayed_node != node);
  84                        spin_unlock(&root->inode_lock);
  85                        return node;
  86                }
  87
  88                /*
  89                 * It's possible that we're racing into the middle of removing
  90                 * this node from the radix tree.  In this case, the refcount
  91                 * was zero and it should never go back to one.  Just return
  92                 * NULL like it was never in the radix at all; our release
  93                 * function is in the process of removing it.
  94                 *
  95                 * Some implementations of refcount_inc refuse to bump the
  96                 * refcount once it has hit zero.  If we don't do this dance
  97                 * here, refcount_inc() may decide to just WARN_ONCE() instead
  98                 * of actually bumping the refcount.
  99                 *
 100                 * If this node is properly in the radix, we want to bump the
 101                 * refcount twice, once for the inode and once for this get
 102                 * operation.
 103                 */
 104                if (refcount_inc_not_zero(&node->refs)) {
 105                        refcount_inc(&node->refs);
 106                        btrfs_inode->delayed_node = node;
 107                } else {
 108                        node = NULL;
 109                }
 110
 111                spin_unlock(&root->inode_lock);
 112                return node;
 113        }
 114        spin_unlock(&root->inode_lock);
 115
 116        return NULL;
 117}
 118
 119/* Will return either the node or PTR_ERR(-ENOMEM) */
 120static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 121                struct btrfs_inode *btrfs_inode)
 122{
 123        struct btrfs_delayed_node *node;
 124        struct btrfs_root *root = btrfs_inode->root;
 125        u64 ino = btrfs_ino(btrfs_inode);
 126        int ret;
 127
 128again:
 129        node = btrfs_get_delayed_node(btrfs_inode);
 130        if (node)
 131                return node;
 132
 133        node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
 134        if (!node)
 135                return ERR_PTR(-ENOMEM);
 136        btrfs_init_delayed_node(node, root, ino);
 137
 138        /* cached in the btrfs inode and can be accessed */
 139        refcount_set(&node->refs, 2);
 140
 141        ret = radix_tree_preload(GFP_NOFS);
 142        if (ret) {
 143                kmem_cache_free(delayed_node_cache, node);
 144                return ERR_PTR(ret);
 145        }
 146
 147        spin_lock(&root->inode_lock);
 148        ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
 149        if (ret == -EEXIST) {
 150                spin_unlock(&root->inode_lock);
 151                kmem_cache_free(delayed_node_cache, node);
 152                radix_tree_preload_end();
 153                goto again;
 154        }
 155        btrfs_inode->delayed_node = node;
 156        spin_unlock(&root->inode_lock);
 157        radix_tree_preload_end();
 158
 159        return node;
 160}
 161
 162/*
 163 * Call it when holding delayed_node->mutex
 164 *
 165 * If mod = 1, add this node into the prepared list.
 166 */
 167static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
 168                                     struct btrfs_delayed_node *node,
 169                                     int mod)
 170{
 171        spin_lock(&root->lock);
 172        if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 173                if (!list_empty(&node->p_list))
 174                        list_move_tail(&node->p_list, &root->prepare_list);
 175                else if (mod)
 176                        list_add_tail(&node->p_list, &root->prepare_list);
 177        } else {
 178                list_add_tail(&node->n_list, &root->node_list);
 179                list_add_tail(&node->p_list, &root->prepare_list);
 180                refcount_inc(&node->refs);      /* inserted into list */
 181                root->nodes++;
 182                set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 183        }
 184        spin_unlock(&root->lock);
 185}
 186
 187/* Call it when holding delayed_node->mutex */
 188static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
 189                                       struct btrfs_delayed_node *node)
 190{
 191        spin_lock(&root->lock);
 192        if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 193                root->nodes--;
 194                refcount_dec(&node->refs);      /* not in the list */
 195                list_del_init(&node->n_list);
 196                if (!list_empty(&node->p_list))
 197                        list_del_init(&node->p_list);
 198                clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 199        }
 200        spin_unlock(&root->lock);
 201}
 202
 203static struct btrfs_delayed_node *btrfs_first_delayed_node(
 204                        struct btrfs_delayed_root *delayed_root)
 205{
 206        struct list_head *p;
 207        struct btrfs_delayed_node *node = NULL;
 208
 209        spin_lock(&delayed_root->lock);
 210        if (list_empty(&delayed_root->node_list))
 211                goto out;
 212
 213        p = delayed_root->node_list.next;
 214        node = list_entry(p, struct btrfs_delayed_node, n_list);
 215        refcount_inc(&node->refs);
 216out:
 217        spin_unlock(&delayed_root->lock);
 218
 219        return node;
 220}
 221
 222static struct btrfs_delayed_node *btrfs_next_delayed_node(
 223                                                struct btrfs_delayed_node *node)
 224{
 225        struct btrfs_delayed_root *delayed_root;
 226        struct list_head *p;
 227        struct btrfs_delayed_node *next = NULL;
 228
 229        delayed_root = node->root->fs_info->delayed_root;
 230        spin_lock(&delayed_root->lock);
 231        if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 232                /* not in the list */
 233                if (list_empty(&delayed_root->node_list))
 234                        goto out;
 235                p = delayed_root->node_list.next;
 236        } else if (list_is_last(&node->n_list, &delayed_root->node_list))
 237                goto out;
 238        else
 239                p = node->n_list.next;
 240
 241        next = list_entry(p, struct btrfs_delayed_node, n_list);
 242        refcount_inc(&next->refs);
 243out:
 244        spin_unlock(&delayed_root->lock);
 245
 246        return next;
 247}
 248
 249static void __btrfs_release_delayed_node(
 250                                struct btrfs_delayed_node *delayed_node,
 251                                int mod)
 252{
 253        struct btrfs_delayed_root *delayed_root;
 254
 255        if (!delayed_node)
 256                return;
 257
 258        delayed_root = delayed_node->root->fs_info->delayed_root;
 259
 260        mutex_lock(&delayed_node->mutex);
 261        if (delayed_node->count)
 262                btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
 263        else
 264                btrfs_dequeue_delayed_node(delayed_root, delayed_node);
 265        mutex_unlock(&delayed_node->mutex);
 266
 267        if (refcount_dec_and_test(&delayed_node->refs)) {
 268                struct btrfs_root *root = delayed_node->root;
 269
 270                spin_lock(&root->inode_lock);
 271                /*
 272                 * Once our refcount goes to zero, nobody is allowed to bump it
 273                 * back up.  We can delete it now.
 274                 */
 275                ASSERT(refcount_read(&delayed_node->refs) == 0);
 276                radix_tree_delete(&root->delayed_nodes_tree,
 277                                  delayed_node->inode_id);
 278                spin_unlock(&root->inode_lock);
 279                kmem_cache_free(delayed_node_cache, delayed_node);
 280        }
 281}
 282
 283static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
 284{
 285        __btrfs_release_delayed_node(node, 0);
 286}
 287
 288static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 289                                        struct btrfs_delayed_root *delayed_root)
 290{
 291        struct list_head *p;
 292        struct btrfs_delayed_node *node = NULL;
 293
 294        spin_lock(&delayed_root->lock);
 295        if (list_empty(&delayed_root->prepare_list))
 296                goto out;
 297
 298        p = delayed_root->prepare_list.next;
 299        list_del_init(p);
 300        node = list_entry(p, struct btrfs_delayed_node, p_list);
 301        refcount_inc(&node->refs);
 302out:
 303        spin_unlock(&delayed_root->lock);
 304
 305        return node;
 306}
 307
 308static inline void btrfs_release_prepared_delayed_node(
 309                                        struct btrfs_delayed_node *node)
 310{
 311        __btrfs_release_delayed_node(node, 1);
 312}
 313
 314static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 315{
 316        struct btrfs_delayed_item *item;
 317        item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
 318        if (item) {
 319                item->data_len = data_len;
 320                item->ins_or_del = 0;
 321                item->bytes_reserved = 0;
 322                item->delayed_node = NULL;
 323                refcount_set(&item->refs, 1);
 324        }
 325        return item;
 326}
 327
 328/*
 329 * __btrfs_lookup_delayed_item - look up the delayed item by key
 330 * @delayed_node: pointer to the delayed node
 331 * @key:          the key to look up
 332 * @prev:         used to store the prev item if the right item isn't found
 333 * @next:         used to store the next item if the right item isn't found
 334 *
 335 * Note: if we don't find the right item, we will return the prev item and
 336 * the next item.
 337 */
 338static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
 339                                struct rb_root *root,
 340                                struct btrfs_key *key,
 341                                struct btrfs_delayed_item **prev,
 342                                struct btrfs_delayed_item **next)
 343{
 344        struct rb_node *node, *prev_node = NULL;
 345        struct btrfs_delayed_item *delayed_item = NULL;
 346        int ret = 0;
 347
 348        node = root->rb_node;
 349
 350        while (node) {
 351                delayed_item = rb_entry(node, struct btrfs_delayed_item,
 352                                        rb_node);
 353                prev_node = node;
 354                ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
 355                if (ret < 0)
 356                        node = node->rb_right;
 357                else if (ret > 0)
 358                        node = node->rb_left;
 359                else
 360                        return delayed_item;
 361        }
 362
 363        if (prev) {
 364                if (!prev_node)
 365                        *prev = NULL;
 366                else if (ret < 0)
 367                        *prev = delayed_item;
 368                else if ((node = rb_prev(prev_node)) != NULL) {
 369                        *prev = rb_entry(node, struct btrfs_delayed_item,
 370                                         rb_node);
 371                } else
 372                        *prev = NULL;
 373        }
 374
 375        if (next) {
 376                if (!prev_node)
 377                        *next = NULL;
 378                else if (ret > 0)
 379                        *next = delayed_item;
 380                else if ((node = rb_next(prev_node)) != NULL) {
 381                        *next = rb_entry(node, struct btrfs_delayed_item,
 382                                         rb_node);
 383                } else
 384                        *next = NULL;
 385        }
 386        return NULL;
 387}
 388
 389static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 390                                        struct btrfs_delayed_node *delayed_node,
 391                                        struct btrfs_key *key)
 392{
 393        return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
 394                                           NULL, NULL);
 395}
 396
 397static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 398                                    struct btrfs_delayed_item *ins,
 399                                    int action)
 400{
 401        struct rb_node **p, *node;
 402        struct rb_node *parent_node = NULL;
 403        struct rb_root *root;
 404        struct btrfs_delayed_item *item;
 405        int cmp;
 406
 407        if (action == BTRFS_DELAYED_INSERTION_ITEM)
 408                root = &delayed_node->ins_root;
 409        else if (action == BTRFS_DELAYED_DELETION_ITEM)
 410                root = &delayed_node->del_root;
 411        else
 412                BUG();
 413        p = &root->rb_node;
 414        node = &ins->rb_node;
 415
 416        while (*p) {
 417                parent_node = *p;
 418                item = rb_entry(parent_node, struct btrfs_delayed_item,
 419                                 rb_node);
 420
 421                cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
 422                if (cmp < 0)
 423                        p = &(*p)->rb_right;
 424                else if (cmp > 0)
 425                        p = &(*p)->rb_left;
 426                else
 427                        return -EEXIST;
 428        }
 429
 430        rb_link_node(node, parent_node, p);
 431        rb_insert_color(node, root);
 432        ins->delayed_node = delayed_node;
 433        ins->ins_or_del = action;
 434
 435        if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
 436            action == BTRFS_DELAYED_INSERTION_ITEM &&
 437            ins->key.offset >= delayed_node->index_cnt)
 438                        delayed_node->index_cnt = ins->key.offset + 1;
 439
 440        delayed_node->count++;
 441        atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
 442        return 0;
 443}
 444
 445static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
 446                                              struct btrfs_delayed_item *item)
 447{
 448        return __btrfs_add_delayed_item(node, item,
 449                                        BTRFS_DELAYED_INSERTION_ITEM);
 450}
 451
 452static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 453                                             struct btrfs_delayed_item *item)
 454{
 455        return __btrfs_add_delayed_item(node, item,
 456                                        BTRFS_DELAYED_DELETION_ITEM);
 457}
 458
 459static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 460{
 461        int seq = atomic_inc_return(&delayed_root->items_seq);
 462
 463        /*
 464         * atomic_dec_return implies a barrier for waitqueue_active
 465         */
 466        if ((atomic_dec_return(&delayed_root->items) <
 467            BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
 468            waitqueue_active(&delayed_root->wait))
 469                wake_up(&delayed_root->wait);
 470}
 471
 472static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 473{
 474        struct rb_root *root;
 475        struct btrfs_delayed_root *delayed_root;
 476
 477        delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
 478
 479        BUG_ON(!delayed_root);
 480        BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
 481               delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
 482
 483        if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
 484                root = &delayed_item->delayed_node->ins_root;
 485        else
 486                root = &delayed_item->delayed_node->del_root;
 487
 488        rb_erase(&delayed_item->rb_node, root);
 489        delayed_item->delayed_node->count--;
 490
 491        finish_one_item(delayed_root);
 492}
 493
 494static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 495{
 496        if (item) {
 497                __btrfs_remove_delayed_item(item);
 498                if (refcount_dec_and_test(&item->refs))
 499                        kfree(item);
 500        }
 501}
 502
 503static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 504                                        struct btrfs_delayed_node *delayed_node)
 505{
 506        struct rb_node *p;
 507        struct btrfs_delayed_item *item = NULL;
 508
 509        p = rb_first(&delayed_node->ins_root);
 510        if (p)
 511                item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 512
 513        return item;
 514}
 515
 516static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 517                                        struct btrfs_delayed_node *delayed_node)
 518{
 519        struct rb_node *p;
 520        struct btrfs_delayed_item *item = NULL;
 521
 522        p = rb_first(&delayed_node->del_root);
 523        if (p)
 524                item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 525
 526        return item;
 527}
 528
 529static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 530                                                struct btrfs_delayed_item *item)
 531{
 532        struct rb_node *p;
 533        struct btrfs_delayed_item *next = NULL;
 534
 535        p = rb_next(&item->rb_node);
 536        if (p)
 537                next = rb_entry(p, struct btrfs_delayed_item, rb_node);
 538
 539        return next;
 540}
 541
 542static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 543                                               struct btrfs_root *root,
 544                                               struct btrfs_delayed_item *item)
 545{
 546        struct btrfs_block_rsv *src_rsv;
 547        struct btrfs_block_rsv *dst_rsv;
 548        struct btrfs_fs_info *fs_info = root->fs_info;
 549        u64 num_bytes;
 550        int ret;
 551
 552        if (!trans->bytes_reserved)
 553                return 0;
 554
 555        src_rsv = trans->block_rsv;
 556        dst_rsv = &fs_info->delayed_block_rsv;
 557
 558        num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
 559
 560        /*
 561         * Here we migrate space rsv from transaction rsv, since have already
 562         * reserved space when starting a transaction.  So no need to reserve
 563         * qgroup space here.
 564         */
 565        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
 566        if (!ret) {
 567                trace_btrfs_space_reservation(fs_info, "delayed_item",
 568                                              item->key.objectid,
 569                                              num_bytes, 1);
 570                item->bytes_reserved = num_bytes;
 571        }
 572
 573        return ret;
 574}
 575
 576static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
 577                                                struct btrfs_delayed_item *item)
 578{
 579        struct btrfs_block_rsv *rsv;
 580        struct btrfs_fs_info *fs_info = root->fs_info;
 581
 582        if (!item->bytes_reserved)
 583                return;
 584
 585        rsv = &fs_info->delayed_block_rsv;
 586        /*
 587         * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
 588         * to release/reserve qgroup space.
 589         */
 590        trace_btrfs_space_reservation(fs_info, "delayed_item",
 591                                      item->key.objectid, item->bytes_reserved,
 592                                      0);
 593        btrfs_block_rsv_release(fs_info, rsv,
 594                                item->bytes_reserved);
 595}
 596
 597static int btrfs_delayed_inode_reserve_metadata(
 598                                        struct btrfs_trans_handle *trans,
 599                                        struct btrfs_root *root,
 600                                        struct btrfs_inode *inode,
 601                                        struct btrfs_delayed_node *node)
 602{
 603        struct btrfs_fs_info *fs_info = root->fs_info;
 604        struct btrfs_block_rsv *src_rsv;
 605        struct btrfs_block_rsv *dst_rsv;
 606        u64 num_bytes;
 607        int ret;
 608
 609        src_rsv = trans->block_rsv;
 610        dst_rsv = &fs_info->delayed_block_rsv;
 611
 612        num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
 613
 614        /*
 615         * btrfs_dirty_inode will update the inode under btrfs_join_transaction
 616         * which doesn't reserve space for speed.  This is a problem since we
 617         * still need to reserve space for this update, so try to reserve the
 618         * space.
 619         *
 620         * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 621         * we always reserve enough to update the inode item.
 622         */
 623        if (!src_rsv || (!trans->bytes_reserved &&
 624                         src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
 625                ret = btrfs_qgroup_reserve_meta_prealloc(root,
 626                                fs_info->nodesize, true);
 627                if (ret < 0)
 628                        return ret;
 629                ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 630                                          BTRFS_RESERVE_NO_FLUSH);
 631                /*
 632                 * Since we're under a transaction reserve_metadata_bytes could
 633                 * try to commit the transaction which will make it return
 634                 * EAGAIN to make us stop the transaction we have, so return
 635                 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
 636                 */
 637                if (ret == -EAGAIN) {
 638                        ret = -ENOSPC;
 639                        btrfs_qgroup_free_meta_prealloc(root, num_bytes);
 640                }
 641                if (!ret) {
 642                        node->bytes_reserved = num_bytes;
 643                        trace_btrfs_space_reservation(fs_info,
 644                                                      "delayed_inode",
 645                                                      btrfs_ino(inode),
 646                                                      num_bytes, 1);
 647                } else {
 648                        btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
 649                }
 650                return ret;
 651        }
 652
 653        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
 654        if (!ret) {
 655                trace_btrfs_space_reservation(fs_info, "delayed_inode",
 656                                              btrfs_ino(inode), num_bytes, 1);
 657                node->bytes_reserved = num_bytes;
 658        }
 659
 660        return ret;
 661}
 662
 663static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
 664                                                struct btrfs_delayed_node *node,
 665                                                bool qgroup_free)
 666{
 667        struct btrfs_block_rsv *rsv;
 668
 669        if (!node->bytes_reserved)
 670                return;
 671
 672        rsv = &fs_info->delayed_block_rsv;
 673        trace_btrfs_space_reservation(fs_info, "delayed_inode",
 674                                      node->inode_id, node->bytes_reserved, 0);
 675        btrfs_block_rsv_release(fs_info, rsv,
 676                                node->bytes_reserved);
 677        if (qgroup_free)
 678                btrfs_qgroup_free_meta_prealloc(node->root,
 679                                node->bytes_reserved);
 680        else
 681                btrfs_qgroup_convert_reserved_meta(node->root,
 682                                node->bytes_reserved);
 683        node->bytes_reserved = 0;
 684}
 685
 686/*
 687 * This helper will insert some continuous items into the same leaf according
 688 * to the free space of the leaf.
 689 */
 690static int btrfs_batch_insert_items(struct btrfs_root *root,
 691                                    struct btrfs_path *path,
 692                                    struct btrfs_delayed_item *item)
 693{
 694        struct btrfs_fs_info *fs_info = root->fs_info;
 695        struct btrfs_delayed_item *curr, *next;
 696        int free_space;
 697        int total_data_size = 0, total_size = 0;
 698        struct extent_buffer *leaf;
 699        char *data_ptr;
 700        struct btrfs_key *keys;
 701        u32 *data_size;
 702        struct list_head head;
 703        int slot;
 704        int nitems;
 705        int i;
 706        int ret = 0;
 707
 708        BUG_ON(!path->nodes[0]);
 709
 710        leaf = path->nodes[0];
 711        free_space = btrfs_leaf_free_space(fs_info, leaf);
 712        INIT_LIST_HEAD(&head);
 713
 714        next = item;
 715        nitems = 0;
 716
 717        /*
 718         * count the number of the continuous items that we can insert in batch
 719         */
 720        while (total_size + next->data_len + sizeof(struct btrfs_item) <=
 721               free_space) {
 722                total_data_size += next->data_len;
 723                total_size += next->data_len + sizeof(struct btrfs_item);
 724                list_add_tail(&next->tree_list, &head);
 725                nitems++;
 726
 727                curr = next;
 728                next = __btrfs_next_delayed_item(curr);
 729                if (!next)
 730                        break;
 731
 732                if (!btrfs_is_continuous_delayed_item(curr, next))
 733                        break;
 734        }
 735
 736        if (!nitems) {
 737                ret = 0;
 738                goto out;
 739        }
 740
 741        /*
 742         * we need allocate some memory space, but it might cause the task
 743         * to sleep, so we set all locked nodes in the path to blocking locks
 744         * first.
 745         */
 746        btrfs_set_path_blocking(path);
 747
 748        keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
 749        if (!keys) {
 750                ret = -ENOMEM;
 751                goto out;
 752        }
 753
 754        data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
 755        if (!data_size) {
 756                ret = -ENOMEM;
 757                goto error;
 758        }
 759
 760        /* get keys of all the delayed items */
 761        i = 0;
 762        list_for_each_entry(next, &head, tree_list) {
 763                keys[i] = next->key;
 764                data_size[i] = next->data_len;
 765                i++;
 766        }
 767
 768        /* reset all the locked nodes in the patch to spinning locks. */
 769        btrfs_clear_path_blocking(path, NULL, 0);
 770
 771        /* insert the keys of the items */
 772        setup_items_for_insert(root, path, keys, data_size,
 773                               total_data_size, total_size, nitems);
 774
 775        /* insert the dir index items */
 776        slot = path->slots[0];
 777        list_for_each_entry_safe(curr, next, &head, tree_list) {
 778                data_ptr = btrfs_item_ptr(leaf, slot, char);
 779                write_extent_buffer(leaf, &curr->data,
 780                                    (unsigned long)data_ptr,
 781                                    curr->data_len);
 782                slot++;
 783
 784                btrfs_delayed_item_release_metadata(root, curr);
 785
 786                list_del(&curr->tree_list);
 787                btrfs_release_delayed_item(curr);
 788        }
 789
 790error:
 791        kfree(data_size);
 792        kfree(keys);
 793out:
 794        return ret;
 795}
 796
 797/*
 798 * This helper can just do simple insertion that needn't extend item for new
 799 * data, such as directory name index insertion, inode insertion.
 800 */
 801static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 802                                     struct btrfs_root *root,
 803                                     struct btrfs_path *path,
 804                                     struct btrfs_delayed_item *delayed_item)
 805{
 806        struct extent_buffer *leaf;
 807        char *ptr;
 808        int ret;
 809
 810        ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
 811                                      delayed_item->data_len);
 812        if (ret < 0 && ret != -EEXIST)
 813                return ret;
 814
 815        leaf = path->nodes[0];
 816
 817        ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 818
 819        write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
 820                            delayed_item->data_len);
 821        btrfs_mark_buffer_dirty(leaf);
 822
 823        btrfs_delayed_item_release_metadata(root, delayed_item);
 824        return 0;
 825}
 826
 827/*
 828 * we insert an item first, then if there are some continuous items, we try
 829 * to insert those items into the same leaf.
 830 */
 831static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
 832                                      struct btrfs_path *path,
 833                                      struct btrfs_root *root,
 834                                      struct btrfs_delayed_node *node)
 835{
 836        struct btrfs_delayed_item *curr, *prev;
 837        int ret = 0;
 838
 839do_again:
 840        mutex_lock(&node->mutex);
 841        curr = __btrfs_first_delayed_insertion_item(node);
 842        if (!curr)
 843                goto insert_end;
 844
 845        ret = btrfs_insert_delayed_item(trans, root, path, curr);
 846        if (ret < 0) {
 847                btrfs_release_path(path);
 848                goto insert_end;
 849        }
 850
 851        prev = curr;
 852        curr = __btrfs_next_delayed_item(prev);
 853        if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 854                /* insert the continuous items into the same leaf */
 855                path->slots[0]++;
 856                btrfs_batch_insert_items(root, path, curr);
 857        }
 858        btrfs_release_delayed_item(prev);
 859        btrfs_mark_buffer_dirty(path->nodes[0]);
 860
 861        btrfs_release_path(path);
 862        mutex_unlock(&node->mutex);
 863        goto do_again;
 864
 865insert_end:
 866        mutex_unlock(&node->mutex);
 867        return ret;
 868}
 869
 870static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 871                                    struct btrfs_root *root,
 872                                    struct btrfs_path *path,
 873                                    struct btrfs_delayed_item *item)
 874{
 875        struct btrfs_delayed_item *curr, *next;
 876        struct extent_buffer *leaf;
 877        struct btrfs_key key;
 878        struct list_head head;
 879        int nitems, i, last_item;
 880        int ret = 0;
 881
 882        BUG_ON(!path->nodes[0]);
 883
 884        leaf = path->nodes[0];
 885
 886        i = path->slots[0];
 887        last_item = btrfs_header_nritems(leaf) - 1;
 888        if (i > last_item)
 889                return -ENOENT; /* FIXME: Is errno suitable? */
 890
 891        next = item;
 892        INIT_LIST_HEAD(&head);
 893        btrfs_item_key_to_cpu(leaf, &key, i);
 894        nitems = 0;
 895        /*
 896         * count the number of the dir index items that we can delete in batch
 897         */
 898        while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
 899                list_add_tail(&next->tree_list, &head);
 900                nitems++;
 901
 902                curr = next;
 903                next = __btrfs_next_delayed_item(curr);
 904                if (!next)
 905                        break;
 906
 907                if (!btrfs_is_continuous_delayed_item(curr, next))
 908                        break;
 909
 910                i++;
 911                if (i > last_item)
 912                        break;
 913                btrfs_item_key_to_cpu(leaf, &key, i);
 914        }
 915
 916        if (!nitems)
 917                return 0;
 918
 919        ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
 920        if (ret)
 921                goto out;
 922
 923        list_for_each_entry_safe(curr, next, &head, tree_list) {
 924                btrfs_delayed_item_release_metadata(root, curr);
 925                list_del(&curr->tree_list);
 926                btrfs_release_delayed_item(curr);
 927        }
 928
 929out:
 930        return ret;
 931}
 932
 933static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
 934                                      struct btrfs_path *path,
 935                                      struct btrfs_root *root,
 936                                      struct btrfs_delayed_node *node)
 937{
 938        struct btrfs_delayed_item *curr, *prev;
 939        int ret = 0;
 940
 941do_again:
 942        mutex_lock(&node->mutex);
 943        curr = __btrfs_first_delayed_deletion_item(node);
 944        if (!curr)
 945                goto delete_fail;
 946
 947        ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
 948        if (ret < 0)
 949                goto delete_fail;
 950        else if (ret > 0) {
 951                /*
 952                 * can't find the item which the node points to, so this node
 953                 * is invalid, just drop it.
 954                 */
 955                prev = curr;
 956                curr = __btrfs_next_delayed_item(prev);
 957                btrfs_release_delayed_item(prev);
 958                ret = 0;
 959                btrfs_release_path(path);
 960                if (curr) {
 961                        mutex_unlock(&node->mutex);
 962                        goto do_again;
 963                } else
 964                        goto delete_fail;
 965        }
 966
 967        btrfs_batch_delete_items(trans, root, path, curr);
 968        btrfs_release_path(path);
 969        mutex_unlock(&node->mutex);
 970        goto do_again;
 971
 972delete_fail:
 973        btrfs_release_path(path);
 974        mutex_unlock(&node->mutex);
 975        return ret;
 976}
 977
 978static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
 979{
 980        struct btrfs_delayed_root *delayed_root;
 981
 982        if (delayed_node &&
 983            test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 984                BUG_ON(!delayed_node->root);
 985                clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
 986                delayed_node->count--;
 987
 988                delayed_root = delayed_node->root->fs_info->delayed_root;
 989                finish_one_item(delayed_root);
 990        }
 991}
 992
 993static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
 994{
 995        struct btrfs_delayed_root *delayed_root;
 996
 997        ASSERT(delayed_node->root);
 998        clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
 999        delayed_node->count--;
1000
1001        delayed_root = delayed_node->root->fs_info->delayed_root;
1002        finish_one_item(delayed_root);
1003}
1004
1005static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1006                                        struct btrfs_root *root,
1007                                        struct btrfs_path *path,
1008                                        struct btrfs_delayed_node *node)
1009{
1010        struct btrfs_fs_info *fs_info = root->fs_info;
1011        struct btrfs_key key;
1012        struct btrfs_inode_item *inode_item;
1013        struct extent_buffer *leaf;
1014        int mod;
1015        int ret;
1016
1017        key.objectid = node->inode_id;
1018        key.type = BTRFS_INODE_ITEM_KEY;
1019        key.offset = 0;
1020
1021        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1022                mod = -1;
1023        else
1024                mod = 1;
1025
1026        ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1027        if (ret > 0) {
1028                btrfs_release_path(path);
1029                return -ENOENT;
1030        } else if (ret < 0) {
1031                return ret;
1032        }
1033
1034        leaf = path->nodes[0];
1035        inode_item = btrfs_item_ptr(leaf, path->slots[0],
1036                                    struct btrfs_inode_item);
1037        write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1038                            sizeof(struct btrfs_inode_item));
1039        btrfs_mark_buffer_dirty(leaf);
1040
1041        if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1042                goto no_iref;
1043
1044        path->slots[0]++;
1045        if (path->slots[0] >= btrfs_header_nritems(leaf))
1046                goto search;
1047again:
1048        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1049        if (key.objectid != node->inode_id)
1050                goto out;
1051
1052        if (key.type != BTRFS_INODE_REF_KEY &&
1053            key.type != BTRFS_INODE_EXTREF_KEY)
1054                goto out;
1055
1056        /*
1057         * Delayed iref deletion is for the inode who has only one link,
1058         * so there is only one iref. The case that several irefs are
1059         * in the same item doesn't exist.
1060         */
1061        btrfs_del_item(trans, root, path);
1062out:
1063        btrfs_release_delayed_iref(node);
1064no_iref:
1065        btrfs_release_path(path);
1066err_out:
1067        btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1068        btrfs_release_delayed_inode(node);
1069
1070        return ret;
1071
1072search:
1073        btrfs_release_path(path);
1074
1075        key.type = BTRFS_INODE_EXTREF_KEY;
1076        key.offset = -1;
1077        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1078        if (ret < 0)
1079                goto err_out;
1080        ASSERT(ret);
1081
1082        ret = 0;
1083        leaf = path->nodes[0];
1084        path->slots[0]--;
1085        goto again;
1086}
1087
1088static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1089                                             struct btrfs_root *root,
1090                                             struct btrfs_path *path,
1091                                             struct btrfs_delayed_node *node)
1092{
1093        int ret;
1094
1095        mutex_lock(&node->mutex);
1096        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1097                mutex_unlock(&node->mutex);
1098                return 0;
1099        }
1100
1101        ret = __btrfs_update_delayed_inode(trans, root, path, node);
1102        mutex_unlock(&node->mutex);
1103        return ret;
1104}
1105
1106static inline int
1107__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1108                                   struct btrfs_path *path,
1109                                   struct btrfs_delayed_node *node)
1110{
1111        int ret;
1112
1113        ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1114        if (ret)
1115                return ret;
1116
1117        ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1118        if (ret)
1119                return ret;
1120
1121        ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1122        return ret;
1123}
1124
1125/*
1126 * Called when committing the transaction.
1127 * Returns 0 on success.
1128 * Returns < 0 on error and returns with an aborted transaction with any
1129 * outstanding delayed items cleaned up.
1130 */
1131static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1132{
1133        struct btrfs_fs_info *fs_info = trans->fs_info;
1134        struct btrfs_delayed_root *delayed_root;
1135        struct btrfs_delayed_node *curr_node, *prev_node;
1136        struct btrfs_path *path;
1137        struct btrfs_block_rsv *block_rsv;
1138        int ret = 0;
1139        bool count = (nr > 0);
1140
1141        if (trans->aborted)
1142                return -EIO;
1143
1144        path = btrfs_alloc_path();
1145        if (!path)
1146                return -ENOMEM;
1147        path->leave_spinning = 1;
1148
1149        block_rsv = trans->block_rsv;
1150        trans->block_rsv = &fs_info->delayed_block_rsv;
1151
1152        delayed_root = fs_info->delayed_root;
1153
1154        curr_node = btrfs_first_delayed_node(delayed_root);
1155        while (curr_node && (!count || (count && nr--))) {
1156                ret = __btrfs_commit_inode_delayed_items(trans, path,
1157                                                         curr_node);
1158                if (ret) {
1159                        btrfs_release_delayed_node(curr_node);
1160                        curr_node = NULL;
1161                        btrfs_abort_transaction(trans, ret);
1162                        break;
1163                }
1164
1165                prev_node = curr_node;
1166                curr_node = btrfs_next_delayed_node(curr_node);
1167                btrfs_release_delayed_node(prev_node);
1168        }
1169
1170        if (curr_node)
1171                btrfs_release_delayed_node(curr_node);
1172        btrfs_free_path(path);
1173        trans->block_rsv = block_rsv;
1174
1175        return ret;
1176}
1177
1178int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1179{
1180        return __btrfs_run_delayed_items(trans, -1);
1181}
1182
1183int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1184{
1185        return __btrfs_run_delayed_items(trans, nr);
1186}
1187
1188int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1189                                     struct btrfs_inode *inode)
1190{
1191        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1192        struct btrfs_path *path;
1193        struct btrfs_block_rsv *block_rsv;
1194        int ret;
1195
1196        if (!delayed_node)
1197                return 0;
1198
1199        mutex_lock(&delayed_node->mutex);
1200        if (!delayed_node->count) {
1201                mutex_unlock(&delayed_node->mutex);
1202                btrfs_release_delayed_node(delayed_node);
1203                return 0;
1204        }
1205        mutex_unlock(&delayed_node->mutex);
1206
1207        path = btrfs_alloc_path();
1208        if (!path) {
1209                btrfs_release_delayed_node(delayed_node);
1210                return -ENOMEM;
1211        }
1212        path->leave_spinning = 1;
1213
1214        block_rsv = trans->block_rsv;
1215        trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1216
1217        ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1218
1219        btrfs_release_delayed_node(delayed_node);
1220        btrfs_free_path(path);
1221        trans->block_rsv = block_rsv;
1222
1223        return ret;
1224}
1225
1226int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1227{
1228        struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1229        struct btrfs_trans_handle *trans;
1230        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1231        struct btrfs_path *path;
1232        struct btrfs_block_rsv *block_rsv;
1233        int ret;
1234
1235        if (!delayed_node)
1236                return 0;
1237
1238        mutex_lock(&delayed_node->mutex);
1239        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1240                mutex_unlock(&delayed_node->mutex);
1241                btrfs_release_delayed_node(delayed_node);
1242                return 0;
1243        }
1244        mutex_unlock(&delayed_node->mutex);
1245
1246        trans = btrfs_join_transaction(delayed_node->root);
1247        if (IS_ERR(trans)) {
1248                ret = PTR_ERR(trans);
1249                goto out;
1250        }
1251
1252        path = btrfs_alloc_path();
1253        if (!path) {
1254                ret = -ENOMEM;
1255                goto trans_out;
1256        }
1257        path->leave_spinning = 1;
1258
1259        block_rsv = trans->block_rsv;
1260        trans->block_rsv = &fs_info->delayed_block_rsv;
1261
1262        mutex_lock(&delayed_node->mutex);
1263        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1264                ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1265                                                   path, delayed_node);
1266        else
1267                ret = 0;
1268        mutex_unlock(&delayed_node->mutex);
1269
1270        btrfs_free_path(path);
1271        trans->block_rsv = block_rsv;
1272trans_out:
1273        btrfs_end_transaction(trans);
1274        btrfs_btree_balance_dirty(fs_info);
1275out:
1276        btrfs_release_delayed_node(delayed_node);
1277
1278        return ret;
1279}
1280
1281void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1282{
1283        struct btrfs_delayed_node *delayed_node;
1284
1285        delayed_node = READ_ONCE(inode->delayed_node);
1286        if (!delayed_node)
1287                return;
1288
1289        inode->delayed_node = NULL;
1290        btrfs_release_delayed_node(delayed_node);
1291}
1292
1293struct btrfs_async_delayed_work {
1294        struct btrfs_delayed_root *delayed_root;
1295        int nr;
1296        struct btrfs_work work;
1297};
1298
1299static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1300{
1301        struct btrfs_async_delayed_work *async_work;
1302        struct btrfs_delayed_root *delayed_root;
1303        struct btrfs_trans_handle *trans;
1304        struct btrfs_path *path;
1305        struct btrfs_delayed_node *delayed_node = NULL;
1306        struct btrfs_root *root;
1307        struct btrfs_block_rsv *block_rsv;
1308        int total_done = 0;
1309
1310        async_work = container_of(work, struct btrfs_async_delayed_work, work);
1311        delayed_root = async_work->delayed_root;
1312
1313        path = btrfs_alloc_path();
1314        if (!path)
1315                goto out;
1316
1317        do {
1318                if (atomic_read(&delayed_root->items) <
1319                    BTRFS_DELAYED_BACKGROUND / 2)
1320                        break;
1321
1322                delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1323                if (!delayed_node)
1324                        break;
1325
1326                path->leave_spinning = 1;
1327                root = delayed_node->root;
1328
1329                trans = btrfs_join_transaction(root);
1330                if (IS_ERR(trans)) {
1331                        btrfs_release_path(path);
1332                        btrfs_release_prepared_delayed_node(delayed_node);
1333                        total_done++;
1334                        continue;
1335                }
1336
1337                block_rsv = trans->block_rsv;
1338                trans->block_rsv = &root->fs_info->delayed_block_rsv;
1339
1340                __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1341
1342                trans->block_rsv = block_rsv;
1343                btrfs_end_transaction(trans);
1344                btrfs_btree_balance_dirty_nodelay(root->fs_info);
1345
1346                btrfs_release_path(path);
1347                btrfs_release_prepared_delayed_node(delayed_node);
1348                total_done++;
1349
1350        } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1351                 || total_done < async_work->nr);
1352
1353        btrfs_free_path(path);
1354out:
1355        wake_up(&delayed_root->wait);
1356        kfree(async_work);
1357}
1358
1359
1360static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1361                                     struct btrfs_fs_info *fs_info, int nr)
1362{
1363        struct btrfs_async_delayed_work *async_work;
1364
1365        async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1366        if (!async_work)
1367                return -ENOMEM;
1368
1369        async_work->delayed_root = delayed_root;
1370        btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1371                        btrfs_async_run_delayed_root, NULL, NULL);
1372        async_work->nr = nr;
1373
1374        btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1375        return 0;
1376}
1377
1378void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1379{
1380        WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1381}
1382
1383static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1384{
1385        int val = atomic_read(&delayed_root->items_seq);
1386
1387        if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1388                return 1;
1389
1390        if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1391                return 1;
1392
1393        return 0;
1394}
1395
1396void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1397{
1398        struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1399
1400        if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1401                btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1402                return;
1403
1404        if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1405                int seq;
1406                int ret;
1407
1408                seq = atomic_read(&delayed_root->items_seq);
1409
1410                ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1411                if (ret)
1412                        return;
1413
1414                wait_event_interruptible(delayed_root->wait,
1415                                         could_end_wait(delayed_root, seq));
1416                return;
1417        }
1418
1419        btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1420}
1421
1422/* Will return 0 or -ENOMEM */
1423int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1424                                   struct btrfs_fs_info *fs_info,
1425                                   const char *name, int name_len,
1426                                   struct btrfs_inode *dir,
1427                                   struct btrfs_disk_key *disk_key, u8 type,
1428                                   u64 index)
1429{
1430        struct btrfs_delayed_node *delayed_node;
1431        struct btrfs_delayed_item *delayed_item;
1432        struct btrfs_dir_item *dir_item;
1433        int ret;
1434
1435        delayed_node = btrfs_get_or_create_delayed_node(dir);
1436        if (IS_ERR(delayed_node))
1437                return PTR_ERR(delayed_node);
1438
1439        delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1440        if (!delayed_item) {
1441                ret = -ENOMEM;
1442                goto release_node;
1443        }
1444
1445        delayed_item->key.objectid = btrfs_ino(dir);
1446        delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1447        delayed_item->key.offset = index;
1448
1449        dir_item = (struct btrfs_dir_item *)delayed_item->data;
1450        dir_item->location = *disk_key;
1451        btrfs_set_stack_dir_transid(dir_item, trans->transid);
1452        btrfs_set_stack_dir_data_len(dir_item, 0);
1453        btrfs_set_stack_dir_name_len(dir_item, name_len);
1454        btrfs_set_stack_dir_type(dir_item, type);
1455        memcpy((char *)(dir_item + 1), name, name_len);
1456
1457        ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1458        /*
1459         * we have reserved enough space when we start a new transaction,
1460         * so reserving metadata failure is impossible
1461         */
1462        BUG_ON(ret);
1463
1464
1465        mutex_lock(&delayed_node->mutex);
1466        ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1467        if (unlikely(ret)) {
1468                btrfs_err(fs_info,
1469                          "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1470                          name_len, name, delayed_node->root->objectid,
1471                          delayed_node->inode_id, ret);
1472                BUG();
1473        }
1474        mutex_unlock(&delayed_node->mutex);
1475
1476release_node:
1477        btrfs_release_delayed_node(delayed_node);
1478        return ret;
1479}
1480
1481static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1482                                               struct btrfs_delayed_node *node,
1483                                               struct btrfs_key *key)
1484{
1485        struct btrfs_delayed_item *item;
1486
1487        mutex_lock(&node->mutex);
1488        item = __btrfs_lookup_delayed_insertion_item(node, key);
1489        if (!item) {
1490                mutex_unlock(&node->mutex);
1491                return 1;
1492        }
1493
1494        btrfs_delayed_item_release_metadata(node->root, item);
1495        btrfs_release_delayed_item(item);
1496        mutex_unlock(&node->mutex);
1497        return 0;
1498}
1499
1500int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1501                                   struct btrfs_fs_info *fs_info,
1502                                   struct btrfs_inode *dir, u64 index)
1503{
1504        struct btrfs_delayed_node *node;
1505        struct btrfs_delayed_item *item;
1506        struct btrfs_key item_key;
1507        int ret;
1508
1509        node = btrfs_get_or_create_delayed_node(dir);
1510        if (IS_ERR(node))
1511                return PTR_ERR(node);
1512
1513        item_key.objectid = btrfs_ino(dir);
1514        item_key.type = BTRFS_DIR_INDEX_KEY;
1515        item_key.offset = index;
1516
1517        ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
1518        if (!ret)
1519                goto end;
1520
1521        item = btrfs_alloc_delayed_item(0);
1522        if (!item) {
1523                ret = -ENOMEM;
1524                goto end;
1525        }
1526
1527        item->key = item_key;
1528
1529        ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1530        /*
1531         * we have reserved enough space when we start a new transaction,
1532         * so reserving metadata failure is impossible.
1533         */
1534        BUG_ON(ret);
1535
1536        mutex_lock(&node->mutex);
1537        ret = __btrfs_add_delayed_deletion_item(node, item);
1538        if (unlikely(ret)) {
1539                btrfs_err(fs_info,
1540                          "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1541                          index, node->root->objectid, node->inode_id, ret);
1542                BUG();
1543        }
1544        mutex_unlock(&node->mutex);
1545end:
1546        btrfs_release_delayed_node(node);
1547        return ret;
1548}
1549
1550int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1551{
1552        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1553
1554        if (!delayed_node)
1555                return -ENOENT;
1556
1557        /*
1558         * Since we have held i_mutex of this directory, it is impossible that
1559         * a new directory index is added into the delayed node and index_cnt
1560         * is updated now. So we needn't lock the delayed node.
1561         */
1562        if (!delayed_node->index_cnt) {
1563                btrfs_release_delayed_node(delayed_node);
1564                return -EINVAL;
1565        }
1566
1567        inode->index_cnt = delayed_node->index_cnt;
1568        btrfs_release_delayed_node(delayed_node);
1569        return 0;
1570}
1571
1572bool btrfs_readdir_get_delayed_items(struct inode *inode,
1573                                     struct list_head *ins_list,
1574                                     struct list_head *del_list)
1575{
1576        struct btrfs_delayed_node *delayed_node;
1577        struct btrfs_delayed_item *item;
1578
1579        delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1580        if (!delayed_node)
1581                return false;
1582
1583        /*
1584         * We can only do one readdir with delayed items at a time because of
1585         * item->readdir_list.
1586         */
1587        inode_unlock_shared(inode);
1588        inode_lock(inode);
1589
1590        mutex_lock(&delayed_node->mutex);
1591        item = __btrfs_first_delayed_insertion_item(delayed_node);
1592        while (item) {
1593                refcount_inc(&item->refs);
1594                list_add_tail(&item->readdir_list, ins_list);
1595                item = __btrfs_next_delayed_item(item);
1596        }
1597
1598        item = __btrfs_first_delayed_deletion_item(delayed_node);
1599        while (item) {
1600                refcount_inc(&item->refs);
1601                list_add_tail(&item->readdir_list, del_list);
1602                item = __btrfs_next_delayed_item(item);
1603        }
1604        mutex_unlock(&delayed_node->mutex);
1605        /*
1606         * This delayed node is still cached in the btrfs inode, so refs
1607         * must be > 1 now, and we needn't check it is going to be freed
1608         * or not.
1609         *
1610         * Besides that, this function is used to read dir, we do not
1611         * insert/delete delayed items in this period. So we also needn't
1612         * requeue or dequeue this delayed node.
1613         */
1614        refcount_dec(&delayed_node->refs);
1615
1616        return true;
1617}
1618
1619void btrfs_readdir_put_delayed_items(struct inode *inode,
1620                                     struct list_head *ins_list,
1621                                     struct list_head *del_list)
1622{
1623        struct btrfs_delayed_item *curr, *next;
1624
1625        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1626                list_del(&curr->readdir_list);
1627                if (refcount_dec_and_test(&curr->refs))
1628                        kfree(curr);
1629        }
1630
1631        list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1632                list_del(&curr->readdir_list);
1633                if (refcount_dec_and_test(&curr->refs))
1634                        kfree(curr);
1635        }
1636
1637        /*
1638         * The VFS is going to do up_read(), so we need to downgrade back to a
1639         * read lock.
1640         */
1641        downgrade_write(&inode->i_rwsem);
1642}
1643
1644int btrfs_should_delete_dir_index(struct list_head *del_list,
1645                                  u64 index)
1646{
1647        struct btrfs_delayed_item *curr;
1648        int ret = 0;
1649
1650        list_for_each_entry(curr, del_list, readdir_list) {
1651                if (curr->key.offset > index)
1652                        break;
1653                if (curr->key.offset == index) {
1654                        ret = 1;
1655                        break;
1656                }
1657        }
1658        return ret;
1659}
1660
1661/*
1662 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1663 *
1664 */
1665int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1666                                    struct list_head *ins_list)
1667{
1668        struct btrfs_dir_item *di;
1669        struct btrfs_delayed_item *curr, *next;
1670        struct btrfs_key location;
1671        char *name;
1672        int name_len;
1673        int over = 0;
1674        unsigned char d_type;
1675
1676        if (list_empty(ins_list))
1677                return 0;
1678
1679        /*
1680         * Changing the data of the delayed item is impossible. So
1681         * we needn't lock them. And we have held i_mutex of the
1682         * directory, nobody can delete any directory indexes now.
1683         */
1684        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1685                list_del(&curr->readdir_list);
1686
1687                if (curr->key.offset < ctx->pos) {
1688                        if (refcount_dec_and_test(&curr->refs))
1689                                kfree(curr);
1690                        continue;
1691                }
1692
1693                ctx->pos = curr->key.offset;
1694
1695                di = (struct btrfs_dir_item *)curr->data;
1696                name = (char *)(di + 1);
1697                name_len = btrfs_stack_dir_name_len(di);
1698
1699                d_type = btrfs_filetype_table[di->type];
1700                btrfs_disk_key_to_cpu(&location, &di->location);
1701
1702                over = !dir_emit(ctx, name, name_len,
1703                               location.objectid, d_type);
1704
1705                if (refcount_dec_and_test(&curr->refs))
1706                        kfree(curr);
1707
1708                if (over)
1709                        return 1;
1710                ctx->pos++;
1711        }
1712        return 0;
1713}
1714
1715static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1716                                  struct btrfs_inode_item *inode_item,
1717                                  struct inode *inode)
1718{
1719        btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1720        btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1721        btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1722        btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1723        btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1724        btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1725        btrfs_set_stack_inode_generation(inode_item,
1726                                         BTRFS_I(inode)->generation);
1727        btrfs_set_stack_inode_sequence(inode_item,
1728                                       inode_peek_iversion(inode));
1729        btrfs_set_stack_inode_transid(inode_item, trans->transid);
1730        btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1731        btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1732        btrfs_set_stack_inode_block_group(inode_item, 0);
1733
1734        btrfs_set_stack_timespec_sec(&inode_item->atime,
1735                                     inode->i_atime.tv_sec);
1736        btrfs_set_stack_timespec_nsec(&inode_item->atime,
1737                                      inode->i_atime.tv_nsec);
1738
1739        btrfs_set_stack_timespec_sec(&inode_item->mtime,
1740                                     inode->i_mtime.tv_sec);
1741        btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1742                                      inode->i_mtime.tv_nsec);
1743
1744        btrfs_set_stack_timespec_sec(&inode_item->ctime,
1745                                     inode->i_ctime.tv_sec);
1746        btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1747                                      inode->i_ctime.tv_nsec);
1748
1749        btrfs_set_stack_timespec_sec(&inode_item->otime,
1750                                     BTRFS_I(inode)->i_otime.tv_sec);
1751        btrfs_set_stack_timespec_nsec(&inode_item->otime,
1752                                     BTRFS_I(inode)->i_otime.tv_nsec);
1753}
1754
1755int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1756{
1757        struct btrfs_delayed_node *delayed_node;
1758        struct btrfs_inode_item *inode_item;
1759
1760        delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1761        if (!delayed_node)
1762                return -ENOENT;
1763
1764        mutex_lock(&delayed_node->mutex);
1765        if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1766                mutex_unlock(&delayed_node->mutex);
1767                btrfs_release_delayed_node(delayed_node);
1768                return -ENOENT;
1769        }
1770
1771        inode_item = &delayed_node->inode_item;
1772
1773        i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1774        i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1775        btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1776        inode->i_mode = btrfs_stack_inode_mode(inode_item);
1777        set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1778        inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1779        BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1780        BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1781
1782        inode_set_iversion_queried(inode,
1783                                   btrfs_stack_inode_sequence(inode_item));
1784        inode->i_rdev = 0;
1785        *rdev = btrfs_stack_inode_rdev(inode_item);
1786        BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1787
1788        inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1789        inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1790
1791        inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1792        inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1793
1794        inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1795        inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1796
1797        BTRFS_I(inode)->i_otime.tv_sec =
1798                btrfs_stack_timespec_sec(&inode_item->otime);
1799        BTRFS_I(inode)->i_otime.tv_nsec =
1800                btrfs_stack_timespec_nsec(&inode_item->otime);
1801
1802        inode->i_generation = BTRFS_I(inode)->generation;
1803        BTRFS_I(inode)->index_cnt = (u64)-1;
1804
1805        mutex_unlock(&delayed_node->mutex);
1806        btrfs_release_delayed_node(delayed_node);
1807        return 0;
1808}
1809
1810int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1811                               struct btrfs_root *root, struct inode *inode)
1812{
1813        struct btrfs_delayed_node *delayed_node;
1814        int ret = 0;
1815
1816        delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1817        if (IS_ERR(delayed_node))
1818                return PTR_ERR(delayed_node);
1819
1820        mutex_lock(&delayed_node->mutex);
1821        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1822                fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1823                goto release_node;
1824        }
1825
1826        ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1827                                                   delayed_node);
1828        if (ret)
1829                goto release_node;
1830
1831        fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1832        set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1833        delayed_node->count++;
1834        atomic_inc(&root->fs_info->delayed_root->items);
1835release_node:
1836        mutex_unlock(&delayed_node->mutex);
1837        btrfs_release_delayed_node(delayed_node);
1838        return ret;
1839}
1840
1841int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1842{
1843        struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1844        struct btrfs_delayed_node *delayed_node;
1845
1846        /*
1847         * we don't do delayed inode updates during log recovery because it
1848         * leads to enospc problems.  This means we also can't do
1849         * delayed inode refs
1850         */
1851        if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1852                return -EAGAIN;
1853
1854        delayed_node = btrfs_get_or_create_delayed_node(inode);
1855        if (IS_ERR(delayed_node))
1856                return PTR_ERR(delayed_node);
1857
1858        /*
1859         * We don't reserve space for inode ref deletion is because:
1860         * - We ONLY do async inode ref deletion for the inode who has only
1861         *   one link(i_nlink == 1), it means there is only one inode ref.
1862         *   And in most case, the inode ref and the inode item are in the
1863         *   same leaf, and we will deal with them at the same time.
1864         *   Since we are sure we will reserve the space for the inode item,
1865         *   it is unnecessary to reserve space for inode ref deletion.
1866         * - If the inode ref and the inode item are not in the same leaf,
1867         *   We also needn't worry about enospc problem, because we reserve
1868         *   much more space for the inode update than it needs.
1869         * - At the worst, we can steal some space from the global reservation.
1870         *   It is very rare.
1871         */
1872        mutex_lock(&delayed_node->mutex);
1873        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1874                goto release_node;
1875
1876        set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1877        delayed_node->count++;
1878        atomic_inc(&fs_info->delayed_root->items);
1879release_node:
1880        mutex_unlock(&delayed_node->mutex);
1881        btrfs_release_delayed_node(delayed_node);
1882        return 0;
1883}
1884
1885static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1886{
1887        struct btrfs_root *root = delayed_node->root;
1888        struct btrfs_fs_info *fs_info = root->fs_info;
1889        struct btrfs_delayed_item *curr_item, *prev_item;
1890
1891        mutex_lock(&delayed_node->mutex);
1892        curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1893        while (curr_item) {
1894                btrfs_delayed_item_release_metadata(root, curr_item);
1895                prev_item = curr_item;
1896                curr_item = __btrfs_next_delayed_item(prev_item);
1897                btrfs_release_delayed_item(prev_item);
1898        }
1899
1900        curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1901        while (curr_item) {
1902                btrfs_delayed_item_release_metadata(root, curr_item);
1903                prev_item = curr_item;
1904                curr_item = __btrfs_next_delayed_item(prev_item);
1905                btrfs_release_delayed_item(prev_item);
1906        }
1907
1908        if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1909                btrfs_release_delayed_iref(delayed_node);
1910
1911        if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1912                btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1913                btrfs_release_delayed_inode(delayed_node);
1914        }
1915        mutex_unlock(&delayed_node->mutex);
1916}
1917
1918void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1919{
1920        struct btrfs_delayed_node *delayed_node;
1921
1922        delayed_node = btrfs_get_delayed_node(inode);
1923        if (!delayed_node)
1924                return;
1925
1926        __btrfs_kill_delayed_node(delayed_node);
1927        btrfs_release_delayed_node(delayed_node);
1928}
1929
1930void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1931{
1932        u64 inode_id = 0;
1933        struct btrfs_delayed_node *delayed_nodes[8];
1934        int i, n;
1935
1936        while (1) {
1937                spin_lock(&root->inode_lock);
1938                n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1939                                           (void **)delayed_nodes, inode_id,
1940                                           ARRAY_SIZE(delayed_nodes));
1941                if (!n) {
1942                        spin_unlock(&root->inode_lock);
1943                        break;
1944                }
1945
1946                inode_id = delayed_nodes[n - 1]->inode_id + 1;
1947
1948                for (i = 0; i < n; i++)
1949                        refcount_inc(&delayed_nodes[i]->refs);
1950                spin_unlock(&root->inode_lock);
1951
1952                for (i = 0; i < n; i++) {
1953                        __btrfs_kill_delayed_node(delayed_nodes[i]);
1954                        btrfs_release_delayed_node(delayed_nodes[i]);
1955                }
1956        }
1957}
1958
1959void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1960{
1961        struct btrfs_delayed_node *curr_node, *prev_node;
1962
1963        curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1964        while (curr_node) {
1965                __btrfs_kill_delayed_node(curr_node);
1966
1967                prev_node = curr_node;
1968                curr_node = btrfs_next_delayed_node(curr_node);
1969                btrfs_release_delayed_node(prev_node);
1970        }
1971}
1972
1973