linux/drivers/md/bcache/writeback.h
<<
>>
Prefs
   1#ifndef _BCACHE_WRITEBACK_H
   2#define _BCACHE_WRITEBACK_H
   3
   4#define CUTOFF_WRITEBACK        40
   5#define CUTOFF_WRITEBACK_SYNC   70
   6
   7static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
   8{
   9        uint64_t i, ret = 0;
  10
  11        for (i = 0; i < d->nr_stripes; i++)
  12                ret += atomic_read(d->stripe_sectors_dirty + i);
  13
  14        return ret;
  15}
  16
  17static inline unsigned offset_to_stripe(struct bcache_device *d,
  18                                        uint64_t offset)
  19{
  20        do_div(offset, d->stripe_size);
  21        return offset;
  22}
  23
  24static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
  25                                           uint64_t offset,
  26                                           unsigned nr_sectors)
  27{
  28        unsigned stripe = offset_to_stripe(&dc->disk, offset);
  29
  30        while (1) {
  31                if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
  32                        return true;
  33
  34                if (nr_sectors <= dc->disk.stripe_size)
  35                        return false;
  36
  37                nr_sectors -= dc->disk.stripe_size;
  38                stripe++;
  39        }
  40}
  41
  42static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
  43                                    unsigned cache_mode, bool would_skip)
  44{
  45        unsigned in_use = dc->disk.c->gc_stats.in_use;
  46
  47        if (cache_mode != CACHE_MODE_WRITEBACK ||
  48            test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
  49            in_use > CUTOFF_WRITEBACK_SYNC)
  50                return false;
  51
  52        if (dc->partial_stripes_expensive &&
  53            bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
  54                                    bio_sectors(bio)))
  55                return true;
  56
  57        if (would_skip)
  58                return false;
  59
  60        return bio->bi_rw & REQ_SYNC ||
  61                in_use <= CUTOFF_WRITEBACK;
  62}
  63
  64static inline void bch_writeback_queue(struct cached_dev *dc)
  65{
  66        wake_up_process(dc->writeback_thread);
  67}
  68
  69static inline void bch_writeback_add(struct cached_dev *dc)
  70{
  71        if (!atomic_read(&dc->has_dirty) &&
  72            !atomic_xchg(&dc->has_dirty, 1)) {
  73                atomic_inc(&dc->count);
  74
  75                if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
  76                        SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
  77                        /* XXX: should do this synchronously */
  78                        bch_write_bdev_super(dc, NULL);
  79                }
  80
  81                bch_writeback_queue(dc);
  82        }
  83}
  84
  85void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
  86
  87void bch_sectors_dirty_init(struct cached_dev *dc);
  88void bch_cached_dev_writeback_init(struct cached_dev *);
  89int bch_cached_dev_writeback_start(struct cached_dev *);
  90
  91#endif
  92