linux/drivers/md/raid1-10.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Maximum size of each resync request */
   3#define RESYNC_BLOCK_SIZE (64*1024)
   4#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
   5
   6/*
   7 * Number of guaranteed raid bios in case of extreme VM load:
   8 */
   9#define NR_RAID_BIOS 256
  10
  11/* when we get a read error on a read-only array, we redirect to another
  12 * device without failing the first device, or trying to over-write to
  13 * correct the read error.  To keep track of bad blocks on a per-bio
  14 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
  15 */
  16#define IO_BLOCKED ((struct bio *)1)
  17/* When we successfully write to a known bad-block, we need to remove the
  18 * bad-block marking which must be done from process context.  So we record
  19 * the success by setting devs[n].bio to IO_MADE_GOOD
  20 */
  21#define IO_MADE_GOOD ((struct bio *)2)
  22
  23#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
  24
  25/* When there are this many requests queue to be written by
  26 * the raid thread, we become 'congested' to provide back-pressure
  27 * for writeback.
  28 */
  29static int max_queued_requests = 1024;
  30
  31/* for managing resync I/O pages */
  32struct resync_pages {
  33        void            *raid_bio;
  34        struct page     *pages[RESYNC_PAGES];
  35};
  36
  37static void rbio_pool_free(void *rbio, void *data)
  38{
  39        kfree(rbio);
  40}
  41
  42static inline int resync_alloc_pages(struct resync_pages *rp,
  43                                     gfp_t gfp_flags)
  44{
  45        int i;
  46
  47        for (i = 0; i < RESYNC_PAGES; i++) {
  48                rp->pages[i] = alloc_page(gfp_flags);
  49                if (!rp->pages[i])
  50                        goto out_free;
  51        }
  52
  53        return 0;
  54
  55out_free:
  56        while (--i >= 0)
  57                put_page(rp->pages[i]);
  58        return -ENOMEM;
  59}
  60
  61static inline void resync_free_pages(struct resync_pages *rp)
  62{
  63        int i;
  64
  65        for (i = 0; i < RESYNC_PAGES; i++)
  66                put_page(rp->pages[i]);
  67}
  68
  69static inline void resync_get_all_pages(struct resync_pages *rp)
  70{
  71        int i;
  72
  73        for (i = 0; i < RESYNC_PAGES; i++)
  74                get_page(rp->pages[i]);
  75}
  76
  77static inline struct page *resync_fetch_page(struct resync_pages *rp,
  78                                             unsigned idx)
  79{
  80        if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
  81                return NULL;
  82        return rp->pages[idx];
  83}
  84
  85/*
  86 * 'strct resync_pages' stores actual pages used for doing the resync
  87 *  IO, and it is per-bio, so make .bi_private points to it.
  88 */
  89static inline struct resync_pages *get_resync_pages(struct bio *bio)
  90{
  91        return bio->bi_private;
  92}
  93
  94/* generally called after bio_reset() for reseting bvec */
  95static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
  96                               int size)
  97{
  98        int idx = 0;
  99
 100        /* initialize bvec table again */
 101        do {
 102                struct page *page = resync_fetch_page(rp, idx);
 103                int len = min_t(int, size, PAGE_SIZE);
 104
 105                /*
 106                 * won't fail because the vec table is big
 107                 * enough to hold all these pages
 108                 */
 109                bio_add_page(bio, page, len, 0);
 110                size -= len;
 111        } while (idx++ < RESYNC_PAGES && size > 0);
 112}
 113