linux/drivers/md/bcache/io.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Some low level IO code, and hacks for various block layer limitations
   4 *
   5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   6 * Copyright 2012 Google, Inc.
   7 */
   8
   9#include "bcache.h"
  10#include "bset.h"
  11#include "debug.h"
  12
  13#include <linux/blkdev.h>
  14
  15/* Bios with headers */
  16
  17void bch_bbio_free(struct bio *bio, struct cache_set *c)
  18{
  19        struct bbio *b = container_of(bio, struct bbio, bio);
  20
  21        mempool_free(b, &c->bio_meta);
  22}
  23
  24struct bio *bch_bbio_alloc(struct cache_set *c)
  25{
  26        struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
  27        struct bio *bio = &b->bio;
  28
  29        bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
  30
  31        return bio;
  32}
  33
  34void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
  35{
  36        struct bbio *b = container_of(bio, struct bbio, bio);
  37
  38        bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
  39        bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
  40
  41        b->submit_time_us = local_clock_us();
  42        closure_bio_submit(c, bio, bio->bi_private);
  43}
  44
  45void bch_submit_bbio(struct bio *bio, struct cache_set *c,
  46                     struct bkey *k, unsigned int ptr)
  47{
  48        struct bbio *b = container_of(bio, struct bbio, bio);
  49
  50        bch_bkey_copy_single_ptr(&b->key, k, ptr);
  51        __bch_submit_bbio(bio, c);
  52}
  53
  54/* IO errors */
  55void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
  56{
  57        unsigned int errors;
  58
  59        WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
  60
  61        errors = atomic_add_return(1, &dc->io_errors);
  62        if (errors < dc->error_limit)
  63                pr_err("%s: IO error on backing device, unrecoverable",
  64                        dc->backing_dev_name);
  65        else
  66                bch_cached_dev_error(dc);
  67}
  68
  69void bch_count_io_errors(struct cache *ca,
  70                         blk_status_t error,
  71                         int is_read,
  72                         const char *m)
  73{
  74        /*
  75         * The halflife of an error is:
  76         * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
  77         */
  78
  79        if (ca->set->error_decay) {
  80                unsigned int count = atomic_inc_return(&ca->io_count);
  81
  82                while (count > ca->set->error_decay) {
  83                        unsigned int errors;
  84                        unsigned int old = count;
  85                        unsigned int new = count - ca->set->error_decay;
  86
  87                        /*
  88                         * First we subtract refresh from count; each time we
  89                         * successfully do so, we rescale the errors once:
  90                         */
  91
  92                        count = atomic_cmpxchg(&ca->io_count, old, new);
  93
  94                        if (count == old) {
  95                                count = new;
  96
  97                                errors = atomic_read(&ca->io_errors);
  98                                do {
  99                                        old = errors;
 100                                        new = ((uint64_t) errors * 127) / 128;
 101                                        errors = atomic_cmpxchg(&ca->io_errors,
 102                                                                old, new);
 103                                } while (old != errors);
 104                        }
 105                }
 106        }
 107
 108        if (error) {
 109                unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
 110                                                    &ca->io_errors);
 111                errors >>= IO_ERROR_SHIFT;
 112
 113                if (errors < ca->set->error_limit)
 114                        pr_err("%s: IO error on %s%s",
 115                               ca->cache_dev_name, m,
 116                               is_read ? ", recovering." : ".");
 117                else
 118                        bch_cache_set_error(ca->set,
 119                                            "%s: too many IO errors %s",
 120                                            ca->cache_dev_name, m);
 121        }
 122}
 123
 124void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
 125                              blk_status_t error, const char *m)
 126{
 127        struct bbio *b = container_of(bio, struct bbio, bio);
 128        struct cache *ca = PTR_CACHE(c, &b->key, 0);
 129        int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
 130
 131        unsigned int threshold = op_is_write(bio_op(bio))
 132                ? c->congested_write_threshold_us
 133                : c->congested_read_threshold_us;
 134
 135        if (threshold) {
 136                unsigned int t = local_clock_us();
 137                int us = t - b->submit_time_us;
 138                int congested = atomic_read(&c->congested);
 139
 140                if (us > (int) threshold) {
 141                        int ms = us / 1024;
 142
 143                        c->congested_last_us = t;
 144
 145                        ms = min(ms, CONGESTED_MAX + congested);
 146                        atomic_sub(ms, &c->congested);
 147                } else if (congested < 0)
 148                        atomic_inc(&c->congested);
 149        }
 150
 151        bch_count_io_errors(ca, error, is_read, m);
 152}
 153
 154void bch_bbio_endio(struct cache_set *c, struct bio *bio,
 155                    blk_status_t error, const char *m)
 156{
 157        struct closure *cl = bio->bi_private;
 158
 159        bch_bbio_count_io_errors(c, bio, error, m);
 160        bio_put(bio);
 161        closure_put(cl);
 162}
 163