linux/drivers/md/raid1.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _RAID1_H
   3#define _RAID1_H
   4
   5/*
   6 * each barrier unit size is 64MB fow now
   7 * note: it must be larger than RESYNC_DEPTH
   8 */
   9#define BARRIER_UNIT_SECTOR_BITS        17
  10#define BARRIER_UNIT_SECTOR_SIZE        (1<<17)
  11/*
  12 * In struct r1conf, the following members are related to I/O barrier
  13 * buckets,
  14 *      atomic_t        *nr_pending;
  15 *      atomic_t        *nr_waiting;
  16 *      atomic_t        *nr_queued;
  17 *      atomic_t        *barrier;
  18 * Each of them points to array of atomic_t variables, each array is
  19 * designed to have BARRIER_BUCKETS_NR elements and occupy a single
  20 * memory page. The data width of atomic_t variables is 4 bytes, equal
  21 * to 1<<(ilog2(sizeof(atomic_t))), BARRIER_BUCKETS_NR_BITS is defined
  22 * as (PAGE_SHIFT - ilog2(sizeof(int))) to make sure an array of
  23 * atomic_t variables with BARRIER_BUCKETS_NR elements just exactly
  24 * occupies a single memory page.
  25 */
  26#define BARRIER_BUCKETS_NR_BITS         (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
  27#define BARRIER_BUCKETS_NR              (1<<BARRIER_BUCKETS_NR_BITS)
  28
  29/* Note: raid1_info.rdev can be set to NULL asynchronously by raid1_remove_disk.
  30 * There are three safe ways to access raid1_info.rdev.
  31 * 1/ when holding mddev->reconfig_mutex
  32 * 2/ when resync/recovery is known to be happening - i.e. in code that is
  33 *    called as part of performing resync/recovery.
  34 * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
  35 *    and if it is non-NULL, increment rdev->nr_pending before dropping the
  36 *    RCU lock.
  37 * When .rdev is set to NULL, the nr_pending count checked again and if it has
  38 * been incremented, the pointer is put back in .rdev.
  39 */
  40
  41struct raid1_info {
  42        struct md_rdev  *rdev;
  43        sector_t        head_position;
  44
  45        /* When choose the best device for a read (read_balance())
  46         * we try to keep sequential reads one the same device
  47         */
  48        sector_t        next_seq_sect;
  49        sector_t        seq_start;
  50};
  51
  52/*
  53 * memory pools need a pointer to the mddev, so they can force an unplug
  54 * when memory is tight, and a count of the number of drives that the
  55 * pool was allocated for, so they know how much to allocate and free.
  56 * mddev->raid_disks cannot be used, as it can change while a pool is active
  57 * These two datums are stored in a kmalloced struct.
  58 * The 'raid_disks' here is twice the raid_disks in r1conf.
  59 * This allows space for each 'real' device can have a replacement in the
  60 * second half of the array.
  61 */
  62
  63struct pool_info {
  64        struct mddev *mddev;
  65        int     raid_disks;
  66};
  67
  68struct r1conf {
  69        struct mddev            *mddev;
  70        struct raid1_info       *mirrors;       /* twice 'raid_disks' to
  71                                                 * allow for replacements.
  72                                                 */
  73        int                     raid_disks;
  74
  75        spinlock_t              device_lock;
  76
  77        /* list of 'struct r1bio' that need to be processed by raid1d,
  78         * whether to retry a read, writeout a resync or recovery
  79         * block, or anything else.
  80         */
  81        struct list_head        retry_list;
  82        /* A separate list of r1bio which just need raid_end_bio_io called.
  83         * This mustn't happen for writes which had any errors if the superblock
  84         * needs to be written.
  85         */
  86        struct list_head        bio_end_io_list;
  87
  88        /* queue pending writes to be submitted on unplug */
  89        struct bio_list         pending_bio_list;
  90        int                     pending_count;
  91
  92        /* for use when syncing mirrors:
  93         * We don't allow both normal IO and resync/recovery IO at
  94         * the same time - resync/recovery can only happen when there
  95         * is no other IO.  So when either is active, the other has to wait.
  96         * See more details description in raid1.c near raise_barrier().
  97         */
  98        wait_queue_head_t       wait_barrier;
  99        spinlock_t              resync_lock;
 100        atomic_t                nr_sync_pending;
 101        atomic_t                *nr_pending;
 102        atomic_t                *nr_waiting;
 103        atomic_t                *nr_queued;
 104        atomic_t                *barrier;
 105        int                     array_frozen;
 106
 107        /* Set to 1 if a full sync is needed, (fresh device added).
 108         * Cleared when a sync completes.
 109         */
 110        int                     fullsync;
 111
 112        /* When the same as mddev->recovery_disabled we don't allow
 113         * recovery to be attempted as we expect a read error.
 114         */
 115        int                     recovery_disabled;
 116
 117        /* poolinfo contains information about the content of the
 118         * mempools - it changes when the array grows or shrinks
 119         */
 120        struct pool_info        *poolinfo;
 121        mempool_t               r1bio_pool;
 122        mempool_t               r1buf_pool;
 123
 124        struct bio_set          bio_split;
 125
 126        /* temporary buffer to synchronous IO when attempting to repair
 127         * a read error.
 128         */
 129        struct page             *tmppage;
 130
 131        /* When taking over an array from a different personality, we store
 132         * the new thread here until we fully activate the array.
 133         */
 134        struct md_thread        *thread;
 135
 136        /* Keep track of cluster resync window to send to other
 137         * nodes.
 138         */
 139        sector_t                cluster_sync_low;
 140        sector_t                cluster_sync_high;
 141
 142};
 143
 144/*
 145 * this is our 'private' RAID1 bio.
 146 *
 147 * it contains information about what kind of IO operations were started
 148 * for this RAID1 operation, and about their status:
 149 */
 150
 151struct r1bio {
 152        atomic_t                remaining; /* 'have we finished' count,
 153                                            * used from IRQ handlers
 154                                            */
 155        atomic_t                behind_remaining; /* number of write-behind ios remaining
 156                                                 * in this BehindIO request
 157                                                 */
 158        sector_t                sector;
 159        int                     sectors;
 160        unsigned long           state;
 161        struct mddev            *mddev;
 162        /*
 163         * original bio going to /dev/mdx
 164         */
 165        struct bio              *master_bio;
 166        /*
 167         * if the IO is in READ direction, then this is where we read
 168         */
 169        int                     read_disk;
 170
 171        struct list_head        retry_list;
 172
 173        /*
 174         * When R1BIO_BehindIO is set, we store pages for write behind
 175         * in behind_master_bio.
 176         */
 177        struct bio              *behind_master_bio;
 178
 179        /*
 180         * if the IO is in WRITE direction, then multiple bios are used.
 181         * We choose the number when they are allocated.
 182         */
 183        struct bio              *bios[0];
 184        /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
 185};
 186
 187/* bits for r1bio.state */
 188enum r1bio_state {
 189        R1BIO_Uptodate,
 190        R1BIO_IsSync,
 191        R1BIO_Degraded,
 192        R1BIO_BehindIO,
 193/* Set ReadError on bios that experience a readerror so that
 194 * raid1d knows what to do with them.
 195 */
 196        R1BIO_ReadError,
 197/* For write-behind requests, we call bi_end_io when
 198 * the last non-write-behind device completes, providing
 199 * any write was successful.  Otherwise we call when
 200 * any write-behind write succeeds, otherwise we call
 201 * with failure when last write completes (and all failed).
 202 * Record that bi_end_io was called with this flag...
 203 */
 204        R1BIO_Returned,
 205/* If a write for this request means we can clear some
 206 * known-bad-block records, we set this flag
 207 */
 208        R1BIO_MadeGood,
 209        R1BIO_WriteError,
 210        R1BIO_FailFast,
 211};
 212
 213static inline int sector_to_idx(sector_t sector)
 214{
 215        return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
 216                         BARRIER_BUCKETS_NR_BITS);
 217}
 218#endif
 219