1
2
3
4
5
6
7
8#include "bcache.h"
9#include "bset.h"
10#include "debug.h"
11
12#include <linux/blkdev.h>
13
14
15
16void bch_bbio_free(struct bio *bio, struct cache_set *c)
17{
18 struct bbio *b = container_of(bio, struct bbio, bio);
19 mempool_free(b, c->bio_meta);
20}
21
22struct bio *bch_bbio_alloc(struct cache_set *c)
23{
24 struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
25 struct bio *bio = &b->bio;
26
27 bio_init(bio);
28 bio->bi_max_vecs = bucket_pages(c);
29 bio->bi_io_vec = bio->bi_inline_vecs;
30
31 return bio;
32}
33
34void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
35{
36 struct bbio *b = container_of(bio, struct bbio, bio);
37
38 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
39 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
40
41 b->submit_time_us = local_clock_us();
42 closure_bio_submit(bio, bio->bi_private);
43}
44
45void bch_submit_bbio(struct bio *bio, struct cache_set *c,
46 struct bkey *k, unsigned ptr)
47{
48 struct bbio *b = container_of(bio, struct bbio, bio);
49 bch_bkey_copy_single_ptr(&b->key, k, ptr);
50 __bch_submit_bbio(bio, c);
51}
52
53
54
55void bch_count_io_errors(struct cache *ca, int error, const char *m)
56{
57
58
59
60
61
62 if (ca->set->error_decay) {
63 unsigned count = atomic_inc_return(&ca->io_count);
64
65 while (count > ca->set->error_decay) {
66 unsigned errors;
67 unsigned old = count;
68 unsigned new = count - ca->set->error_decay;
69
70
71
72
73
74
75 count = atomic_cmpxchg(&ca->io_count, old, new);
76
77 if (count == old) {
78 count = new;
79
80 errors = atomic_read(&ca->io_errors);
81 do {
82 old = errors;
83 new = ((uint64_t) errors * 127) / 128;
84 errors = atomic_cmpxchg(&ca->io_errors,
85 old, new);
86 } while (old != errors);
87 }
88 }
89 }
90
91 if (error) {
92 char buf[BDEVNAME_SIZE];
93 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
94 &ca->io_errors);
95 errors >>= IO_ERROR_SHIFT;
96
97 if (errors < ca->set->error_limit)
98 pr_err("%s: IO error on %s, recovering",
99 bdevname(ca->bdev, buf), m);
100 else
101 bch_cache_set_error(ca->set,
102 "%s: too many IO errors %s",
103 bdevname(ca->bdev, buf), m);
104 }
105}
106
107void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
108 int error, const char *m)
109{
110 struct bbio *b = container_of(bio, struct bbio, bio);
111 struct cache *ca = PTR_CACHE(c, &b->key, 0);
112
113 unsigned threshold = op_is_write(bio_op(bio))
114 ? c->congested_write_threshold_us
115 : c->congested_read_threshold_us;
116
117 if (threshold) {
118 unsigned t = local_clock_us();
119
120 int us = t - b->submit_time_us;
121 int congested = atomic_read(&c->congested);
122
123 if (us > (int) threshold) {
124 int ms = us / 1024;
125 c->congested_last_us = t;
126
127 ms = min(ms, CONGESTED_MAX + congested);
128 atomic_sub(ms, &c->congested);
129 } else if (congested < 0)
130 atomic_inc(&c->congested);
131 }
132
133 bch_count_io_errors(ca, error, m);
134}
135
136void bch_bbio_endio(struct cache_set *c, struct bio *bio,
137 int error, const char *m)
138{
139 struct closure *cl = bio->bi_private;
140
141 bch_bbio_count_io_errors(c, bio, error, m);
142 bio_put(bio);
143 closure_put(cl);
144}
145