1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#define WriteTransient 0
48#define ReadTransient 1
49#define WritePersistent 2
50#define ReadPersistent 3
51#define WriteAll 4
52#define ReadFixable 5
53#define Modes 6
54
55#define ClearErrors 31
56#define ClearFaults 30
57
58#define AllPersist 100
59#define NoPersist 101
60
61#define ModeMask 0x1f
62#define ModeShift 5
63
64#define MaxFault 50
65#include <linux/blkdev.h>
66#include <linux/module.h>
67#include <linux/raid/md_u.h>
68#include <linux/slab.h>
69#include "md.h"
70#include <linux/seq_file.h>
71
72
73static void faulty_fail(struct bio *bio, int error)
74{
75 struct bio *b = bio->bi_private;
76
77 b->bi_size = bio->bi_size;
78 b->bi_sector = bio->bi_sector;
79
80 bio_put(bio);
81
82 bio_io_error(b);
83}
84
85struct faulty_conf {
86 int period[Modes];
87 atomic_t counters[Modes];
88 sector_t faults[MaxFault];
89 int modes[MaxFault];
90 int nfaults;
91 struct md_rdev *rdev;
92};
93
94static int check_mode(struct faulty_conf *conf, int mode)
95{
96 if (conf->period[mode] == 0 &&
97 atomic_read(&conf->counters[mode]) <= 0)
98 return 0;
99
100
101 if (atomic_dec_and_test(&conf->counters[mode])) {
102 if (conf->period[mode])
103 atomic_set(&conf->counters[mode], conf->period[mode]);
104 return 1;
105 }
106 return 0;
107}
108
109static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir)
110{
111
112 int i;
113 for (i=0; i<conf->nfaults; i++)
114 if (conf->faults[i] >= start &&
115 conf->faults[i] < end) {
116
117 switch (conf->modes[i] * 2 + dir) {
118 case WritePersistent*2+WRITE: return 1;
119 case ReadPersistent*2+READ: return 1;
120 case ReadFixable*2+READ: return 1;
121 case ReadFixable*2+WRITE:
122 conf->modes[i] = NoPersist;
123 return 0;
124 case AllPersist*2+READ:
125 case AllPersist*2+WRITE: return 1;
126 default:
127 return 0;
128 }
129 }
130 return 0;
131}
132
133static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
134{
135 int i;
136 int n = conf->nfaults;
137 for (i=0; i<conf->nfaults; i++)
138 if (conf->faults[i] == start) {
139 switch(mode) {
140 case NoPersist: conf->modes[i] = mode; return;
141 case WritePersistent:
142 if (conf->modes[i] == ReadPersistent ||
143 conf->modes[i] == ReadFixable)
144 conf->modes[i] = AllPersist;
145 else
146 conf->modes[i] = WritePersistent;
147 return;
148 case ReadPersistent:
149 if (conf->modes[i] == WritePersistent)
150 conf->modes[i] = AllPersist;
151 else
152 conf->modes[i] = ReadPersistent;
153 return;
154 case ReadFixable:
155 if (conf->modes[i] == WritePersistent ||
156 conf->modes[i] == ReadPersistent)
157 conf->modes[i] = AllPersist;
158 else
159 conf->modes[i] = ReadFixable;
160 return;
161 }
162 } else if (conf->modes[i] == NoPersist)
163 n = i;
164
165 if (n >= MaxFault)
166 return;
167 conf->faults[n] = start;
168 conf->modes[n] = mode;
169 if (conf->nfaults == n)
170 conf->nfaults = n+1;
171}
172
173static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
174{
175 struct faulty_conf *conf = mddev->private;
176 int failit = 0;
177 unsigned int max_sectors = blk_queue_get_max_sectors(mddev->queue,
178 bio->bi_rw);
179 const unsigned long do_discard = (bio->bi_rw
180 & (REQ_DISCARD | REQ_SECURE));
181 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
182
183 if (!do_discard && !do_same && bio_sectors(bio) > max_sectors) {
184 struct bio_pair2 *bp = bio_split2(bio, max_sectors);
185 if (!bp) {
186 bio_io_error(bio);
187 return true;
188 }
189
190 generic_make_request(bp->bio1);
191 generic_make_request(bp->bio2);
192 bio_pair2_release(bp);
193 return true;
194 }
195
196 if (bio_data_dir(bio) == WRITE) {
197
198 if (atomic_read(&conf->counters[WriteAll])) {
199
200
201
202 bio_endio(bio, -EIO);
203 return true;
204 }
205
206 if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
207 failit = 1;
208 if (check_mode(conf, WritePersistent)) {
209 add_sector(conf, bio->bi_sector, WritePersistent);
210 failit = 1;
211 }
212 if (check_mode(conf, WriteTransient))
213 failit = 1;
214 } else {
215
216 if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
217 failit = 1;
218 if (check_mode(conf, ReadTransient))
219 failit = 1;
220 if (check_mode(conf, ReadPersistent)) {
221 add_sector(conf, bio->bi_sector, ReadPersistent);
222 failit = 1;
223 }
224 if (check_mode(conf, ReadFixable)) {
225 add_sector(conf, bio->bi_sector, ReadFixable);
226 failit = 1;
227 }
228 }
229 if (failit) {
230 struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev);
231
232 b->bi_bdev = conf->rdev->bdev;
233 b->bi_private = bio;
234 b->bi_end_io = faulty_fail;
235 bio = b;
236 } else
237 bio->bi_bdev = conf->rdev->bdev;
238
239 generic_make_request(bio);
240 return true;
241}
242
243static void faulty_status(struct seq_file *seq, struct mddev *mddev)
244{
245 struct faulty_conf *conf = mddev->private;
246 int n;
247
248 if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
249 seq_printf(seq, " WriteTransient=%d(%d)",
250 n, conf->period[WriteTransient]);
251
252 if ((n=atomic_read(&conf->counters[ReadTransient])) != 0)
253 seq_printf(seq, " ReadTransient=%d(%d)",
254 n, conf->period[ReadTransient]);
255
256 if ((n=atomic_read(&conf->counters[WritePersistent])) != 0)
257 seq_printf(seq, " WritePersistent=%d(%d)",
258 n, conf->period[WritePersistent]);
259
260 if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0)
261 seq_printf(seq, " ReadPersistent=%d(%d)",
262 n, conf->period[ReadPersistent]);
263
264
265 if ((n=atomic_read(&conf->counters[ReadFixable])) != 0)
266 seq_printf(seq, " ReadFixable=%d(%d)",
267 n, conf->period[ReadFixable]);
268
269 if ((n=atomic_read(&conf->counters[WriteAll])) != 0)
270 seq_printf(seq, " WriteAll");
271
272 seq_printf(seq, " nfaults=%d", conf->nfaults);
273}
274
275
276static int faulty_reshape(struct mddev *mddev)
277{
278 int mode = mddev->new_layout & ModeMask;
279 int count = mddev->new_layout >> ModeShift;
280 struct faulty_conf *conf = mddev->private;
281
282 if (mddev->new_layout < 0)
283 return 0;
284
285
286 if (mode == ClearFaults)
287 conf->nfaults = 0;
288 else if (mode == ClearErrors) {
289 int i;
290 for (i=0 ; i < Modes ; i++) {
291 conf->period[i] = 0;
292 atomic_set(&conf->counters[i], 0);
293 }
294 } else if (mode < Modes) {
295 conf->period[mode] = count;
296 if (!count) count++;
297 atomic_set(&conf->counters[mode], count);
298 } else
299 return -EINVAL;
300 mddev->new_layout = -1;
301 mddev->layout = -1;
302 return 0;
303}
304
305static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks)
306{
307 WARN_ONCE(raid_disks,
308 "%s does not support generic reshape\n", __func__);
309
310 if (sectors == 0)
311 return mddev->dev_sectors;
312
313 return sectors;
314}
315
316static int faulty_run(struct mddev *mddev)
317{
318 struct md_rdev *rdev;
319 int i;
320 struct faulty_conf *conf;
321
322 if (md_check_no_bitmap(mddev))
323 return -EINVAL;
324
325 conf = kmalloc(sizeof(*conf), GFP_KERNEL);
326 if (!conf)
327 return -ENOMEM;
328
329 for (i=0; i<Modes; i++) {
330 atomic_set(&conf->counters[i], 0);
331 conf->period[i] = 0;
332 }
333 conf->nfaults = 0;
334
335 rdev_for_each(rdev, mddev) {
336 conf->rdev = rdev;
337 disk_stack_limits(mddev->gendisk, rdev->bdev,
338 rdev->data_offset << 9);
339 }
340
341 md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
342 mddev->private = conf;
343
344 faulty_reshape(mddev);
345
346 return 0;
347}
348
349static void faulty_free(struct mddev *mddev, void *priv)
350{
351 struct faulty_conf *conf = priv;
352
353 kfree(conf);
354}
355
356static struct md_personality faulty_personality =
357{
358 .name = "faulty",
359 .level = LEVEL_FAULTY,
360 .owner = THIS_MODULE,
361 .make_request = faulty_make_request,
362 .run = faulty_run,
363 .free = faulty_free,
364 .status = faulty_status,
365 .check_reshape = faulty_reshape,
366 .size = faulty_size,
367};
368
369static int __init raid_init(void)
370{
371 return register_md_personality(&faulty_personality);
372}
373
374static void raid_exit(void)
375{
376 unregister_md_personality(&faulty_personality);
377}
378
379module_init(raid_init);
380module_exit(raid_exit);
381MODULE_LICENSE("GPL");
382MODULE_DESCRIPTION("Fault injection personality for MD");
383MODULE_ALIAS("md-personality-10");
384MODULE_ALIAS("md-faulty");
385MODULE_ALIAS("md-level--5");
386