1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#define WriteTransient 0
48#define ReadTransient 1
49#define WritePersistent 2
50#define ReadPersistent 3
51#define WriteAll 4
52#define ReadFixable 5
53#define Modes 6
54
55#define ClearErrors 31
56#define ClearFaults 30
57
58#define AllPersist 100
59#define NoPersist 101
60
61#define ModeMask 0x1f
62#define ModeShift 5
63
64#define MaxFault 50
65#include <linux/blkdev.h>
66#include <linux/raid/md_u.h>
67#include "md.h"
68#include <linux/seq_file.h>
69
70
71static void faulty_fail(struct bio *bio, int error)
72{
73 struct bio *b = bio->bi_private;
74
75 b->bi_size = bio->bi_size;
76 b->bi_sector = bio->bi_sector;
77
78 bio_put(bio);
79
80 bio_io_error(b);
81}
82
83typedef struct faulty_conf {
84 int period[Modes];
85 atomic_t counters[Modes];
86 sector_t faults[MaxFault];
87 int modes[MaxFault];
88 int nfaults;
89 mdk_rdev_t *rdev;
90} conf_t;
91
92static int check_mode(conf_t *conf, int mode)
93{
94 if (conf->period[mode] == 0 &&
95 atomic_read(&conf->counters[mode]) <= 0)
96 return 0;
97
98
99 if (atomic_dec_and_test(&conf->counters[mode])) {
100 if (conf->period[mode])
101 atomic_set(&conf->counters[mode], conf->period[mode]);
102 return 1;
103 }
104 return 0;
105}
106
107static int check_sector(conf_t *conf, sector_t start, sector_t end, int dir)
108{
109
110 int i;
111 for (i=0; i<conf->nfaults; i++)
112 if (conf->faults[i] >= start &&
113 conf->faults[i] < end) {
114
115 switch (conf->modes[i] * 2 + dir) {
116 case WritePersistent*2+WRITE: return 1;
117 case ReadPersistent*2+READ: return 1;
118 case ReadFixable*2+READ: return 1;
119 case ReadFixable*2+WRITE:
120 conf->modes[i] = NoPersist;
121 return 0;
122 case AllPersist*2+READ:
123 case AllPersist*2+WRITE: return 1;
124 default:
125 return 0;
126 }
127 }
128 return 0;
129}
130
131static void add_sector(conf_t *conf, sector_t start, int mode)
132{
133 int i;
134 int n = conf->nfaults;
135 for (i=0; i<conf->nfaults; i++)
136 if (conf->faults[i] == start) {
137 switch(mode) {
138 case NoPersist: conf->modes[i] = mode; return;
139 case WritePersistent:
140 if (conf->modes[i] == ReadPersistent ||
141 conf->modes[i] == ReadFixable)
142 conf->modes[i] = AllPersist;
143 else
144 conf->modes[i] = WritePersistent;
145 return;
146 case ReadPersistent:
147 if (conf->modes[i] == WritePersistent)
148 conf->modes[i] = AllPersist;
149 else
150 conf->modes[i] = ReadPersistent;
151 return;
152 case ReadFixable:
153 if (conf->modes[i] == WritePersistent ||
154 conf->modes[i] == ReadPersistent)
155 conf->modes[i] = AllPersist;
156 else
157 conf->modes[i] = ReadFixable;
158 return;
159 }
160 } else if (conf->modes[i] == NoPersist)
161 n = i;
162
163 if (n >= MaxFault)
164 return;
165 conf->faults[n] = start;
166 conf->modes[n] = mode;
167 if (conf->nfaults == n)
168 conf->nfaults = n+1;
169}
170
171static int make_request(struct request_queue *q, struct bio *bio)
172{
173 mddev_t *mddev = q->queuedata;
174 conf_t *conf = (conf_t*)mddev->private;
175 int failit = 0;
176
177 if (bio_data_dir(bio) == WRITE) {
178
179 if (atomic_read(&conf->counters[WriteAll])) {
180
181
182
183 bio_endio(bio, -EIO);
184 return 0;
185 }
186
187 if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9),
188 WRITE))
189 failit = 1;
190 if (check_mode(conf, WritePersistent)) {
191 add_sector(conf, bio->bi_sector, WritePersistent);
192 failit = 1;
193 }
194 if (check_mode(conf, WriteTransient))
195 failit = 1;
196 } else {
197
198 if (check_sector(conf, bio->bi_sector, bio->bi_sector + (bio->bi_size>>9),
199 READ))
200 failit = 1;
201 if (check_mode(conf, ReadTransient))
202 failit = 1;
203 if (check_mode(conf, ReadPersistent)) {
204 add_sector(conf, bio->bi_sector, ReadPersistent);
205 failit = 1;
206 }
207 if (check_mode(conf, ReadFixable)) {
208 add_sector(conf, bio->bi_sector, ReadFixable);
209 failit = 1;
210 }
211 }
212 if (failit) {
213 struct bio *b = bio_clone(bio, GFP_NOIO);
214 b->bi_bdev = conf->rdev->bdev;
215 b->bi_private = bio;
216 b->bi_end_io = faulty_fail;
217 generic_make_request(b);
218 return 0;
219 } else {
220 bio->bi_bdev = conf->rdev->bdev;
221 return 1;
222 }
223}
224
225static void status(struct seq_file *seq, mddev_t *mddev)
226{
227 conf_t *conf = (conf_t*)mddev->private;
228 int n;
229
230 if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
231 seq_printf(seq, " WriteTransient=%d(%d)",
232 n, conf->period[WriteTransient]);
233
234 if ((n=atomic_read(&conf->counters[ReadTransient])) != 0)
235 seq_printf(seq, " ReadTransient=%d(%d)",
236 n, conf->period[ReadTransient]);
237
238 if ((n=atomic_read(&conf->counters[WritePersistent])) != 0)
239 seq_printf(seq, " WritePersistent=%d(%d)",
240 n, conf->period[WritePersistent]);
241
242 if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0)
243 seq_printf(seq, " ReadPersistent=%d(%d)",
244 n, conf->period[ReadPersistent]);
245
246
247 if ((n=atomic_read(&conf->counters[ReadFixable])) != 0)
248 seq_printf(seq, " ReadFixable=%d(%d)",
249 n, conf->period[ReadFixable]);
250
251 if ((n=atomic_read(&conf->counters[WriteAll])) != 0)
252 seq_printf(seq, " WriteAll");
253
254 seq_printf(seq, " nfaults=%d", conf->nfaults);
255}
256
257
258static int reshape(mddev_t *mddev)
259{
260 int mode = mddev->new_layout & ModeMask;
261 int count = mddev->new_layout >> ModeShift;
262 conf_t *conf = mddev->private;
263
264 if (mddev->new_layout < 0)
265 return 0;
266
267
268 if (mode == ClearFaults)
269 conf->nfaults = 0;
270 else if (mode == ClearErrors) {
271 int i;
272 for (i=0 ; i < Modes ; i++) {
273 conf->period[i] = 0;
274 atomic_set(&conf->counters[i], 0);
275 }
276 } else if (mode < Modes) {
277 conf->period[mode] = count;
278 if (!count) count++;
279 atomic_set(&conf->counters[mode], count);
280 } else
281 return -EINVAL;
282 mddev->new_layout = -1;
283 mddev->layout = -1;
284 return 0;
285}
286
287static sector_t faulty_size(mddev_t *mddev, sector_t sectors, int raid_disks)
288{
289 WARN_ONCE(raid_disks,
290 "%s does not support generic reshape\n", __func__);
291
292 if (sectors == 0)
293 return mddev->dev_sectors;
294
295 return sectors;
296}
297
298static int run(mddev_t *mddev)
299{
300 mdk_rdev_t *rdev;
301 int i;
302 conf_t *conf;
303
304 if (md_check_no_bitmap(mddev))
305 return -EINVAL;
306
307 conf = kmalloc(sizeof(*conf), GFP_KERNEL);
308 if (!conf)
309 return -ENOMEM;
310
311 for (i=0; i<Modes; i++) {
312 atomic_set(&conf->counters[i], 0);
313 conf->period[i] = 0;
314 }
315 conf->nfaults = 0;
316
317 list_for_each_entry(rdev, &mddev->disks, same_set)
318 conf->rdev = rdev;
319
320 md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
321 mddev->private = conf;
322
323 reshape(mddev);
324
325 return 0;
326}
327
328static int stop(mddev_t *mddev)
329{
330 conf_t *conf = (conf_t *)mddev->private;
331
332 kfree(conf);
333 mddev->private = NULL;
334 return 0;
335}
336
337static struct mdk_personality faulty_personality =
338{
339 .name = "faulty",
340 .level = LEVEL_FAULTY,
341 .owner = THIS_MODULE,
342 .make_request = make_request,
343 .run = run,
344 .stop = stop,
345 .status = status,
346 .check_reshape = reshape,
347 .size = faulty_size,
348};
349
350static int __init raid_init(void)
351{
352 return register_md_personality(&faulty_personality);
353}
354
355static void raid_exit(void)
356{
357 unregister_md_personality(&faulty_personality);
358}
359
360module_init(raid_init);
361module_exit(raid_exit);
362MODULE_LICENSE("GPL");
363MODULE_ALIAS("md-personality-10");
364MODULE_ALIAS("md-faulty");
365MODULE_ALIAS("md-level--5");
366