linux/fs/btrfs/transaction.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#ifndef BTRFS_TRANSACTION_H
   7#define BTRFS_TRANSACTION_H
   8
   9#include <linux/refcount.h>
  10#include "btrfs_inode.h"
  11#include "delayed-ref.h"
  12#include "ctree.h"
  13
  14enum btrfs_trans_state {
  15        TRANS_STATE_RUNNING,
  16        TRANS_STATE_COMMIT_START,
  17        TRANS_STATE_COMMIT_DOING,
  18        TRANS_STATE_UNBLOCKED,
  19        TRANS_STATE_COMPLETED,
  20        TRANS_STATE_MAX,
  21};
  22
  23#define BTRFS_TRANS_HAVE_FREE_BGS       0
  24#define BTRFS_TRANS_DIRTY_BG_RUN        1
  25#define BTRFS_TRANS_CACHE_ENOSPC        2
  26
  27struct btrfs_transaction {
  28        u64 transid;
  29        /*
  30         * total external writers(USERSPACE/START/ATTACH) in this
  31         * transaction, it must be zero before the transaction is
  32         * being committed
  33         */
  34        atomic_t num_extwriters;
  35        /*
  36         * total writers in this transaction, it must be zero before the
  37         * transaction can end
  38         */
  39        atomic_t num_writers;
  40        refcount_t use_count;
  41
  42        unsigned long flags;
  43
  44        /* Be protected by fs_info->trans_lock when we want to change it. */
  45        enum btrfs_trans_state state;
  46        int aborted;
  47        struct list_head list;
  48        struct extent_io_tree dirty_pages;
  49        time64_t start_time;
  50        wait_queue_head_t writer_wait;
  51        wait_queue_head_t commit_wait;
  52        struct list_head pending_snapshots;
  53        struct list_head dev_update_list;
  54        struct list_head switch_commits;
  55        struct list_head dirty_bgs;
  56
  57        /*
  58         * There is no explicit lock which protects io_bgs, rather its
  59         * consistency is implied by the fact that all the sites which modify
  60         * it do so under some form of transaction critical section, namely:
  61         *
  62         * - btrfs_start_dirty_block_groups - This function can only ever be
  63         *   run by one of the transaction committers. Refer to
  64         *   BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction
  65         *
  66         * - btrfs_write_dirty_blockgroups - this is called by
  67         *   commit_cowonly_roots from transaction critical section
  68         *   (TRANS_STATE_COMMIT_DOING)
  69         *
  70         * - btrfs_cleanup_dirty_bgs - called on transaction abort
  71         */
  72        struct list_head io_bgs;
  73        struct list_head dropped_roots;
  74
  75        /*
  76         * we need to make sure block group deletion doesn't race with
  77         * free space cache writeout.  This mutex keeps them from stomping
  78         * on each other
  79         */
  80        struct mutex cache_write_mutex;
  81        spinlock_t dirty_bgs_lock;
  82        /* Protected by spin lock fs_info->unused_bgs_lock. */
  83        struct list_head deleted_bgs;
  84        spinlock_t dropped_roots_lock;
  85        struct btrfs_delayed_ref_root delayed_refs;
  86        struct btrfs_fs_info *fs_info;
  87};
  88
  89#define __TRANS_FREEZABLE       (1U << 0)
  90
  91#define __TRANS_START           (1U << 9)
  92#define __TRANS_ATTACH          (1U << 10)
  93#define __TRANS_JOIN            (1U << 11)
  94#define __TRANS_JOIN_NOLOCK     (1U << 12)
  95#define __TRANS_DUMMY           (1U << 13)
  96#define __TRANS_JOIN_NOSTART    (1U << 14)
  97
  98#define TRANS_START             (__TRANS_START | __TRANS_FREEZABLE)
  99#define TRANS_ATTACH            (__TRANS_ATTACH)
 100#define TRANS_JOIN              (__TRANS_JOIN | __TRANS_FREEZABLE)
 101#define TRANS_JOIN_NOLOCK       (__TRANS_JOIN_NOLOCK)
 102#define TRANS_JOIN_NOSTART      (__TRANS_JOIN_NOSTART)
 103
 104#define TRANS_EXTWRITERS        (__TRANS_START | __TRANS_ATTACH)
 105
 106#define BTRFS_SEND_TRANS_STUB   ((void *)1)
 107
 108struct btrfs_trans_handle {
 109        u64 transid;
 110        u64 bytes_reserved;
 111        u64 chunk_bytes_reserved;
 112        unsigned long delayed_ref_updates;
 113        struct btrfs_transaction *transaction;
 114        struct btrfs_block_rsv *block_rsv;
 115        struct btrfs_block_rsv *orig_rsv;
 116        refcount_t use_count;
 117        unsigned int type;
 118        short aborted;
 119        bool adding_csums;
 120        bool allocating_chunk;
 121        bool can_flush_pending_bgs;
 122        bool reloc_reserved;
 123        bool dirty;
 124        struct btrfs_root *root;
 125        struct btrfs_fs_info *fs_info;
 126        struct list_head new_bgs;
 127};
 128
 129struct btrfs_pending_snapshot {
 130        struct dentry *dentry;
 131        struct inode *dir;
 132        struct btrfs_root *root;
 133        struct btrfs_root_item *root_item;
 134        struct btrfs_root *snap;
 135        struct btrfs_qgroup_inherit *inherit;
 136        struct btrfs_path *path;
 137        /* block reservation for the operation */
 138        struct btrfs_block_rsv block_rsv;
 139        /* extra metadata reservation for relocation */
 140        int error;
 141        bool readonly;
 142        struct list_head list;
 143};
 144
 145static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
 146                                              struct inode *inode)
 147{
 148        spin_lock(&BTRFS_I(inode)->lock);
 149        BTRFS_I(inode)->last_trans = trans->transaction->transid;
 150        BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
 151        BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
 152        spin_unlock(&BTRFS_I(inode)->lock);
 153}
 154
 155/*
 156 * Make qgroup codes to skip given qgroupid, means the old/new_roots for
 157 * qgroup won't contain the qgroupid in it.
 158 */
 159static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans,
 160                                         u64 qgroupid)
 161{
 162        struct btrfs_delayed_ref_root *delayed_refs;
 163
 164        delayed_refs = &trans->transaction->delayed_refs;
 165        WARN_ON(delayed_refs->qgroup_to_skip);
 166        delayed_refs->qgroup_to_skip = qgroupid;
 167}
 168
 169static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
 170{
 171        struct btrfs_delayed_ref_root *delayed_refs;
 172
 173        delayed_refs = &trans->transaction->delayed_refs;
 174        WARN_ON(!delayed_refs->qgroup_to_skip);
 175        delayed_refs->qgroup_to_skip = 0;
 176}
 177
 178int btrfs_end_transaction(struct btrfs_trans_handle *trans);
 179struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
 180                                                   unsigned int num_items);
 181struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
 182                                        struct btrfs_root *root,
 183                                        unsigned int num_items,
 184                                        int min_factor);
 185struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
 186struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root);
 187struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
 188struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
 189struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
 190                                        struct btrfs_root *root);
 191int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
 192
 193void btrfs_add_dead_root(struct btrfs_root *root);
 194int btrfs_defrag_root(struct btrfs_root *root);
 195int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
 196int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
 197int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
 198                                   int wait_for_unblock);
 199
 200/*
 201 * Try to commit transaction asynchronously, so this is safe to call
 202 * even holding a spinlock.
 203 *
 204 * It's done by informing transaction_kthread to commit transaction without
 205 * waiting for commit interval.
 206 */
 207static inline void btrfs_commit_transaction_locksafe(
 208                struct btrfs_fs_info *fs_info)
 209{
 210        set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
 211        wake_up_process(fs_info->transaction_kthread);
 212}
 213int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
 214int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
 215void btrfs_throttle(struct btrfs_fs_info *fs_info);
 216int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
 217                                struct btrfs_root *root);
 218int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
 219                                struct extent_io_tree *dirty_pages, int mark);
 220int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
 221int btrfs_transaction_blocked(struct btrfs_fs_info *info);
 222int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
 223void btrfs_put_transaction(struct btrfs_transaction *transaction);
 224void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info);
 225void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
 226                            struct btrfs_root *root);
 227void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
 228
 229#endif
 230