linux/fs/btrfs/delayed-ref.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (C) 2008 Oracle.  All rights reserved.
   4 */
   5
   6#ifndef BTRFS_DELAYED_REF_H
   7#define BTRFS_DELAYED_REF_H
   8
   9#include <linux/refcount.h>
  10
  11/* these are the possible values of struct btrfs_delayed_ref_node->action */
  12#define BTRFS_ADD_DELAYED_REF    1 /* add one backref to the tree */
  13#define BTRFS_DROP_DELAYED_REF   2 /* delete one backref from the tree */
  14#define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
  15#define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
  16
  17struct btrfs_delayed_ref_node {
  18        struct rb_node ref_node;
  19        /*
  20         * If action is BTRFS_ADD_DELAYED_REF, also link this node to
  21         * ref_head->ref_add_list, then we do not need to iterate the
  22         * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
  23         */
  24        struct list_head add_list;
  25
  26        /* the starting bytenr of the extent */
  27        u64 bytenr;
  28
  29        /* the size of the extent */
  30        u64 num_bytes;
  31
  32        /* seq number to keep track of insertion order */
  33        u64 seq;
  34
  35        /* ref count on this data structure */
  36        refcount_t refs;
  37
  38        /*
  39         * how many refs is this entry adding or deleting.  For
  40         * head refs, this may be a negative number because it is keeping
  41         * track of the total mods done to the reference count.
  42         * For individual refs, this will always be a positive number
  43         *
  44         * It may be more than one, since it is possible for a single
  45         * parent to have more than one ref on an extent
  46         */
  47        int ref_mod;
  48
  49        unsigned int action:8;
  50        unsigned int type:8;
  51        /* is this node still in the rbtree? */
  52        unsigned int is_head:1;
  53        unsigned int in_tree:1;
  54};
  55
  56struct btrfs_delayed_extent_op {
  57        struct btrfs_disk_key key;
  58        u8 level;
  59        bool update_key;
  60        bool update_flags;
  61        bool is_data;
  62        u64 flags_to_set;
  63};
  64
  65/*
  66 * the head refs are used to hold a lock on a given extent, which allows us
  67 * to make sure that only one process is running the delayed refs
  68 * at a time for a single extent.  They also store the sum of all the
  69 * reference count modifications we've queued up.
  70 */
  71struct btrfs_delayed_ref_head {
  72        u64 bytenr;
  73        u64 num_bytes;
  74        refcount_t refs;
  75        /*
  76         * the mutex is held while running the refs, and it is also
  77         * held when checking the sum of reference modifications.
  78         */
  79        struct mutex mutex;
  80
  81        spinlock_t lock;
  82        struct rb_root_cached ref_tree;
  83        /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
  84        struct list_head ref_add_list;
  85
  86        struct rb_node href_node;
  87
  88        struct btrfs_delayed_extent_op *extent_op;
  89
  90        /*
  91         * This is used to track the final ref_mod from all the refs associated
  92         * with this head ref, this is not adjusted as delayed refs are run,
  93         * this is meant to track if we need to do the csum accounting or not.
  94         */
  95        int total_ref_mod;
  96
  97        /*
  98         * This is the current outstanding mod references for this bytenr.  This
  99         * is used with lookup_extent_info to get an accurate reference count
 100         * for a bytenr, so it is adjusted as delayed refs are run so that any
 101         * on disk reference count + ref_mod is accurate.
 102         */
 103        int ref_mod;
 104
 105        /*
 106         * when a new extent is allocated, it is just reserved in memory
 107         * The actual extent isn't inserted into the extent allocation tree
 108         * until the delayed ref is processed.  must_insert_reserved is
 109         * used to flag a delayed ref so the accounting can be updated
 110         * when a full insert is done.
 111         *
 112         * It is possible the extent will be freed before it is ever
 113         * inserted into the extent allocation tree.  In this case
 114         * we need to update the in ram accounting to properly reflect
 115         * the free has happened.
 116         */
 117        unsigned int must_insert_reserved:1;
 118        unsigned int is_data:1;
 119        unsigned int is_system:1;
 120        unsigned int processing:1;
 121};
 122
 123struct btrfs_delayed_tree_ref {
 124        struct btrfs_delayed_ref_node node;
 125        u64 root;
 126        u64 parent;
 127        int level;
 128};
 129
 130struct btrfs_delayed_data_ref {
 131        struct btrfs_delayed_ref_node node;
 132        u64 root;
 133        u64 parent;
 134        u64 objectid;
 135        u64 offset;
 136};
 137
 138enum btrfs_delayed_ref_flags {
 139        /* Indicate that we are flushing delayed refs for the commit */
 140        BTRFS_DELAYED_REFS_FLUSHING,
 141};
 142
 143struct btrfs_delayed_ref_root {
 144        /* head ref rbtree */
 145        struct rb_root_cached href_root;
 146
 147        /* dirty extent records */
 148        struct rb_root dirty_extent_root;
 149
 150        /* this spin lock protects the rbtree and the entries inside */
 151        spinlock_t lock;
 152
 153        /* how many delayed ref updates we've queued, used by the
 154         * throttling code
 155         */
 156        atomic_t num_entries;
 157
 158        /* total number of head nodes in tree */
 159        unsigned long num_heads;
 160
 161        /* total number of head nodes ready for processing */
 162        unsigned long num_heads_ready;
 163
 164        u64 pending_csums;
 165
 166        unsigned long flags;
 167
 168        u64 run_delayed_start;
 169
 170        /*
 171         * To make qgroup to skip given root.
 172         * This is for snapshot, as btrfs_qgroup_inherit() will manually
 173         * modify counters for snapshot and its source, so we should skip
 174         * the snapshot in new_root/old_roots or it will get calculated twice
 175         */
 176        u64 qgroup_to_skip;
 177};
 178
 179enum btrfs_ref_type {
 180        BTRFS_REF_NOT_SET,
 181        BTRFS_REF_DATA,
 182        BTRFS_REF_METADATA,
 183        BTRFS_REF_LAST,
 184};
 185
 186struct btrfs_data_ref {
 187        /* For EXTENT_DATA_REF */
 188
 189        /* Original root this data extent belongs to */
 190        u64 owning_root;
 191
 192        /* Inode which refers to this data extent */
 193        u64 ino;
 194
 195        /*
 196         * file_offset - extent_offset
 197         *
 198         * file_offset is the key.offset of the EXTENT_DATA key.
 199         * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data.
 200         */
 201        u64 offset;
 202};
 203
 204struct btrfs_tree_ref {
 205        /*
 206         * Level of this tree block
 207         *
 208         * Shared for skinny (TREE_BLOCK_REF) and normal tree ref.
 209         */
 210        int level;
 211
 212        /*
 213         * Root which owns this tree block.
 214         *
 215         * For TREE_BLOCK_REF (skinny metadata, either inline or keyed)
 216         */
 217        u64 owning_root;
 218
 219        /* For non-skinny metadata, no special member needed */
 220};
 221
 222struct btrfs_ref {
 223        enum btrfs_ref_type type;
 224        int action;
 225
 226        /*
 227         * Whether this extent should go through qgroup record.
 228         *
 229         * Normally false, but for certain cases like delayed subtree scan,
 230         * setting this flag can hugely reduce qgroup overhead.
 231         */
 232        bool skip_qgroup;
 233
 234#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 235        /* Through which root is this modification. */
 236        u64 real_root;
 237#endif
 238        u64 bytenr;
 239        u64 len;
 240
 241        /* Bytenr of the parent tree block */
 242        u64 parent;
 243        union {
 244                struct btrfs_data_ref data_ref;
 245                struct btrfs_tree_ref tree_ref;
 246        };
 247};
 248
 249extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
 250extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
 251extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
 252extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
 253
 254int __init btrfs_delayed_ref_init(void);
 255void __cold btrfs_delayed_ref_exit(void);
 256
 257static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref,
 258                                int action, u64 bytenr, u64 len, u64 parent)
 259{
 260        generic_ref->action = action;
 261        generic_ref->bytenr = bytenr;
 262        generic_ref->len = len;
 263        generic_ref->parent = parent;
 264}
 265
 266static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref,
 267                                int level, u64 root, u64 mod_root, bool skip_qgroup)
 268{
 269#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 270        /* If @real_root not set, use @root as fallback */
 271        generic_ref->real_root = mod_root ?: root;
 272#endif
 273        generic_ref->tree_ref.level = level;
 274        generic_ref->tree_ref.owning_root = root;
 275        generic_ref->type = BTRFS_REF_METADATA;
 276        if (skip_qgroup || !(is_fstree(root) &&
 277                             (!mod_root || is_fstree(mod_root))))
 278                generic_ref->skip_qgroup = true;
 279        else
 280                generic_ref->skip_qgroup = false;
 281
 282}
 283
 284static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref,
 285                                u64 ref_root, u64 ino, u64 offset, u64 mod_root,
 286                                bool skip_qgroup)
 287{
 288#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 289        /* If @real_root not set, use @root as fallback */
 290        generic_ref->real_root = mod_root ?: ref_root;
 291#endif
 292        generic_ref->data_ref.owning_root = ref_root;
 293        generic_ref->data_ref.ino = ino;
 294        generic_ref->data_ref.offset = offset;
 295        generic_ref->type = BTRFS_REF_DATA;
 296        if (skip_qgroup || !(is_fstree(ref_root) &&
 297                             (!mod_root || is_fstree(mod_root))))
 298                generic_ref->skip_qgroup = true;
 299        else
 300                generic_ref->skip_qgroup = false;
 301}
 302
 303static inline struct btrfs_delayed_extent_op *
 304btrfs_alloc_delayed_extent_op(void)
 305{
 306        return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
 307}
 308
 309static inline void
 310btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
 311{
 312        if (op)
 313                kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
 314}
 315
 316static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
 317{
 318        WARN_ON(refcount_read(&ref->refs) == 0);
 319        if (refcount_dec_and_test(&ref->refs)) {
 320                WARN_ON(ref->in_tree);
 321                switch (ref->type) {
 322                case BTRFS_TREE_BLOCK_REF_KEY:
 323                case BTRFS_SHARED_BLOCK_REF_KEY:
 324                        kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 325                        break;
 326                case BTRFS_EXTENT_DATA_REF_KEY:
 327                case BTRFS_SHARED_DATA_REF_KEY:
 328                        kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
 329                        break;
 330                default:
 331                        BUG();
 332                }
 333        }
 334}
 335
 336static inline u64 btrfs_ref_head_to_space_flags(
 337                                struct btrfs_delayed_ref_head *head_ref)
 338{
 339        if (head_ref->is_data)
 340                return BTRFS_BLOCK_GROUP_DATA;
 341        else if (head_ref->is_system)
 342                return BTRFS_BLOCK_GROUP_SYSTEM;
 343        return BTRFS_BLOCK_GROUP_METADATA;
 344}
 345
 346static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head)
 347{
 348        if (refcount_dec_and_test(&head->refs))
 349                kmem_cache_free(btrfs_delayed_ref_head_cachep, head);
 350}
 351
 352int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 353                               struct btrfs_ref *generic_ref,
 354                               struct btrfs_delayed_extent_op *extent_op);
 355int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
 356                               struct btrfs_ref *generic_ref,
 357                               u64 reserved);
 358int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
 359                                u64 bytenr, u64 num_bytes,
 360                                struct btrfs_delayed_extent_op *extent_op);
 361void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
 362                              struct btrfs_delayed_ref_root *delayed_refs,
 363                              struct btrfs_delayed_ref_head *head);
 364
 365struct btrfs_delayed_ref_head *
 366btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
 367                            u64 bytenr);
 368int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
 369                           struct btrfs_delayed_ref_head *head);
 370static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
 371{
 372        mutex_unlock(&head->mutex);
 373}
 374void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
 375                           struct btrfs_delayed_ref_head *head);
 376
 377struct btrfs_delayed_ref_head *btrfs_select_ref_head(
 378                struct btrfs_delayed_ref_root *delayed_refs);
 379
 380int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
 381
 382void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr);
 383void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
 384int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
 385                                  enum btrfs_reserve_flush_enum flush);
 386void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
 387                                       struct btrfs_block_rsv *src,
 388                                       u64 num_bytes);
 389int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
 390bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
 391
 392/*
 393 * helper functions to cast a node into its container
 394 */
 395static inline struct btrfs_delayed_tree_ref *
 396btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
 397{
 398        return container_of(node, struct btrfs_delayed_tree_ref, node);
 399}
 400
 401static inline struct btrfs_delayed_data_ref *
 402btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
 403{
 404        return container_of(node, struct btrfs_delayed_data_ref, node);
 405}
 406
 407#endif
 408