1
2
3
4
5
6
7
8#include <linux/slab.h>
9#include <linux/module.h>
10
11#include "md.h"
12#include "raid1.h"
13#include "raid5.h"
14#include "raid10.h"
15#include "bitmap.h"
16
17#include <linux/device-mapper.h>
18
19#define DM_MSG_PREFIX "raid"
20#define MAX_RAID_DEVICES 253
21
22static bool devices_handle_discard_safely = false;
23
24
25
26
27
28#define FirstUse 10
29
30struct raid_dev {
31
32
33
34
35
36
37
38
39
40
41
42
43 struct dm_dev *meta_dev;
44 struct dm_dev *data_dev;
45 struct md_rdev rdev;
46};
47
48
49
50
51#define CTR_FLAG_SYNC 0x1
52#define CTR_FLAG_NOSYNC 0x2
53#define CTR_FLAG_REBUILD 0x4
54#define CTR_FLAG_DAEMON_SLEEP 0x8
55#define CTR_FLAG_MIN_RECOVERY_RATE 0x10
56#define CTR_FLAG_MAX_RECOVERY_RATE 0x20
57#define CTR_FLAG_MAX_WRITE_BEHIND 0x40
58#define CTR_FLAG_STRIPE_CACHE 0x80
59#define CTR_FLAG_REGION_SIZE 0x100
60#define CTR_FLAG_RAID10_COPIES 0x200
61#define CTR_FLAG_RAID10_FORMAT 0x400
62
63struct raid_set {
64 struct dm_target *ti;
65
66 uint32_t bitmap_loaded;
67 uint32_t ctr_flags;
68
69 struct mddev md;
70 struct raid_type *raid_type;
71 struct dm_target_callbacks callbacks;
72
73 struct raid_dev dev[0];
74};
75
76
77static struct raid_type {
78 const char *name;
79 const char *descr;
80 const unsigned parity_devs;
81 const unsigned minimal_devs;
82 const unsigned level;
83 const unsigned algorithm;
84} raid_types[] = {
85 {"raid0", "RAID0 (striping)", 0, 2, 0, 0 },
86 {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 },
87 {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX },
88 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
89 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
90 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
91 {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
92 {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
93 {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
94 {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
95 {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
96};
97
98static char *raid10_md_layout_to_format(int layout)
99{
100
101
102
103
104 if ((layout & 0x10000) && (layout & 0x20000))
105 return "offset";
106
107 if ((layout & 0xFF) > 1)
108 return "near";
109
110 return "far";
111}
112
113static unsigned raid10_md_layout_to_copies(int layout)
114{
115 if ((layout & 0xFF) > 1)
116 return layout & 0xFF;
117 return (layout >> 8) & 0xFF;
118}
119
120static int raid10_format_to_md_layout(char *format, unsigned copies)
121{
122 unsigned n = 1, f = 1;
123
124 if (!strcasecmp("near", format))
125 n = copies;
126 else
127 f = copies;
128
129 if (!strcasecmp("offset", format))
130 return 0x30000 | (f << 8) | n;
131
132 if (!strcasecmp("far", format))
133 return 0x20000 | (f << 8) | n;
134
135 return (f << 8) | n;
136}
137
138static struct raid_type *get_raid_type(char *name)
139{
140 int i;
141
142 for (i = 0; i < ARRAY_SIZE(raid_types); i++)
143 if (!strcmp(raid_types[i].name, name))
144 return &raid_types[i];
145
146 return NULL;
147}
148
149static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
150{
151 unsigned i;
152 struct raid_set *rs;
153
154 if (raid_devs <= raid_type->parity_devs) {
155 ti->error = "Insufficient number of devices";
156 return ERR_PTR(-EINVAL);
157 }
158
159 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
160 if (!rs) {
161 ti->error = "Cannot allocate raid context";
162 return ERR_PTR(-ENOMEM);
163 }
164
165 mddev_init(&rs->md);
166
167 rs->ti = ti;
168 rs->raid_type = raid_type;
169 rs->md.raid_disks = raid_devs;
170 rs->md.level = raid_type->level;
171 rs->md.new_level = rs->md.level;
172 rs->md.layout = raid_type->algorithm;
173 rs->md.new_layout = rs->md.layout;
174 rs->md.delta_disks = 0;
175 rs->md.recovery_cp = 0;
176
177 for (i = 0; i < raid_devs; i++)
178 md_rdev_init(&rs->dev[i].rdev);
179
180
181
182
183
184
185
186
187
188
189 return rs;
190}
191
192static void context_free(struct raid_set *rs)
193{
194 int i;
195
196 for (i = 0; i < rs->md.raid_disks; i++) {
197 if (rs->dev[i].meta_dev)
198 dm_put_device(rs->ti, rs->dev[i].meta_dev);
199 md_rdev_clear(&rs->dev[i].rdev);
200 if (rs->dev[i].data_dev)
201 dm_put_device(rs->ti, rs->dev[i].data_dev);
202 }
203
204 kfree(rs);
205}
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223static int dev_parms(struct raid_set *rs, char **argv)
224{
225 int i;
226 int rebuild = 0;
227 int metadata_available = 0;
228 int ret = 0;
229
230 for (i = 0; i < rs->md.raid_disks; i++, argv += 2) {
231 rs->dev[i].rdev.raid_disk = i;
232
233 rs->dev[i].meta_dev = NULL;
234 rs->dev[i].data_dev = NULL;
235
236
237
238
239
240 rs->dev[i].rdev.data_offset = 0;
241 rs->dev[i].rdev.mddev = &rs->md;
242
243 if (strcmp(argv[0], "-")) {
244 ret = dm_get_device(rs->ti, argv[0],
245 dm_table_get_mode(rs->ti->table),
246 &rs->dev[i].meta_dev);
247 rs->ti->error = "RAID metadata device lookup failure";
248 if (ret)
249 return ret;
250
251 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
252 if (!rs->dev[i].rdev.sb_page)
253 return -ENOMEM;
254 }
255
256 if (!strcmp(argv[1], "-")) {
257 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
258 (!rs->dev[i].rdev.recovery_offset)) {
259 rs->ti->error = "Drive designated for rebuild not specified";
260 return -EINVAL;
261 }
262
263 rs->ti->error = "No data device supplied with metadata device";
264 if (rs->dev[i].meta_dev)
265 return -EINVAL;
266
267 continue;
268 }
269
270 ret = dm_get_device(rs->ti, argv[1],
271 dm_table_get_mode(rs->ti->table),
272 &rs->dev[i].data_dev);
273 if (ret) {
274 rs->ti->error = "RAID device lookup failure";
275 return ret;
276 }
277
278 if (rs->dev[i].meta_dev) {
279 metadata_available = 1;
280 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
281 }
282 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
283 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
284 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
285 rebuild++;
286 }
287
288 if (metadata_available) {
289 rs->md.external = 0;
290 rs->md.persistent = 1;
291 rs->md.major_version = 2;
292 } else if (rebuild && !rs->md.recovery_cp) {
293
294
295
296
297
298
299
300
301
302
303
304 DMERR("Unable to rebuild drive while array is not in-sync");
305 rs->ti->error = "RAID device lookup failure";
306 return -EINVAL;
307 }
308
309 return 0;
310}
311
312
313
314
315
316
317
318
319
320
321
322static int validate_region_size(struct raid_set *rs, unsigned long region_size)
323{
324 unsigned long min_region_size = rs->ti->len / (1 << 21);
325
326 if (!region_size) {
327
328
329
330 if (min_region_size > (1 << 13)) {
331
332 if (min_region_size & (min_region_size - 1))
333 region_size = 1 << fls(region_size);
334 DMINFO("Choosing default region size of %lu sectors",
335 region_size);
336 } else {
337 DMINFO("Choosing default region size of 4MiB");
338 region_size = 1 << 13;
339 }
340 } else {
341
342
343
344 if (region_size > rs->ti->len) {
345 rs->ti->error = "Supplied region size is too large";
346 return -EINVAL;
347 }
348
349 if (region_size < min_region_size) {
350 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
351 region_size, min_region_size);
352 rs->ti->error = "Supplied region size is too small";
353 return -EINVAL;
354 }
355
356 if (!is_power_of_2(region_size)) {
357 rs->ti->error = "Region size is not a power of 2";
358 return -EINVAL;
359 }
360
361 if (region_size < rs->md.chunk_sectors) {
362 rs->ti->error = "Region size is smaller than the chunk size";
363 return -EINVAL;
364 }
365 }
366
367
368
369
370 rs->md.bitmap_info.chunksize = (region_size << 9);
371
372 return 0;
373}
374
375
376
377
378
379
380
381
382
383
384static int validate_raid_redundancy(struct raid_set *rs)
385{
386 unsigned i, rebuild_cnt = 0;
387 unsigned rebuilds_per_group = 0, copies, d;
388 unsigned group_size, last_group_start;
389
390 for (i = 0; i < rs->md.raid_disks; i++)
391 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
392 !rs->dev[i].rdev.sb_page)
393 rebuild_cnt++;
394
395 switch (rs->raid_type->level) {
396 case 1:
397 if (rebuild_cnt >= rs->md.raid_disks)
398 goto too_many;
399 break;
400 case 4:
401 case 5:
402 case 6:
403 if (rebuild_cnt > rs->raid_type->parity_devs)
404 goto too_many;
405 break;
406 case 10:
407 copies = raid10_md_layout_to_copies(rs->md.layout);
408 if (rebuild_cnt < copies)
409 break;
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
426 for (i = 0; i < rs->md.raid_disks * copies; i++) {
427 if (!(i % copies))
428 rebuilds_per_group = 0;
429 d = i % rs->md.raid_disks;
430 if ((!rs->dev[d].rdev.sb_page ||
431 !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
432 (++rebuilds_per_group >= copies))
433 goto too_many;
434 }
435 break;
436 }
437
438
439
440
441
442
443
444
445
446
447
448
449
450 group_size = (rs->md.raid_disks / copies);
451 last_group_start = (rs->md.raid_disks / group_size) - 1;
452 last_group_start *= group_size;
453 for (i = 0; i < rs->md.raid_disks; i++) {
454 if (!(i % copies) && !(i > last_group_start))
455 rebuilds_per_group = 0;
456 if ((!rs->dev[i].rdev.sb_page ||
457 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
458 (++rebuilds_per_group >= copies))
459 goto too_many;
460 }
461 break;
462 default:
463 if (rebuild_cnt)
464 return -EINVAL;
465 }
466
467 return 0;
468
469too_many:
470 return -EINVAL;
471}
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496static int parse_raid_params(struct raid_set *rs, char **argv,
497 unsigned num_raid_params)
498{
499 char *raid10_format = "near";
500 unsigned raid10_copies = 2;
501 unsigned i;
502 unsigned long value, region_size = 0;
503 sector_t sectors_per_dev = rs->ti->len;
504 sector_t max_io_len;
505 char *key;
506
507
508
509
510
511 if ((kstrtoul(argv[0], 10, &value) < 0)) {
512 rs->ti->error = "Bad chunk size";
513 return -EINVAL;
514 } else if (rs->raid_type->level == 1) {
515 if (value)
516 DMERR("Ignoring chunk size parameter for RAID 1");
517 value = 0;
518 } else if (!is_power_of_2(value)) {
519 rs->ti->error = "Chunk size must be a power of 2";
520 return -EINVAL;
521 } else if (value < 8) {
522 rs->ti->error = "Chunk size value is too small";
523 return -EINVAL;
524 }
525
526 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
527 argv++;
528 num_raid_params--;
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547 for (i = 0; i < rs->md.raid_disks; i++) {
548 set_bit(In_sync, &rs->dev[i].rdev.flags);
549 rs->dev[i].rdev.recovery_offset = MaxSector;
550 }
551
552
553
554
555 for (i = 0; i < num_raid_params; i++) {
556 if (!strcasecmp(argv[i], "nosync")) {
557 rs->md.recovery_cp = MaxSector;
558 rs->ctr_flags |= CTR_FLAG_NOSYNC;
559 continue;
560 }
561 if (!strcasecmp(argv[i], "sync")) {
562 rs->md.recovery_cp = 0;
563 rs->ctr_flags |= CTR_FLAG_SYNC;
564 continue;
565 }
566
567
568 if ((i + 1) >= num_raid_params) {
569 rs->ti->error = "Wrong number of raid parameters given";
570 return -EINVAL;
571 }
572
573 key = argv[i++];
574
575
576 if (!strcasecmp(key, "raid10_format")) {
577 if (rs->raid_type->level != 10) {
578 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
579 return -EINVAL;
580 }
581 if (strcmp("near", argv[i]) &&
582 strcmp("far", argv[i]) &&
583 strcmp("offset", argv[i])) {
584 rs->ti->error = "Invalid 'raid10_format' value given";
585 return -EINVAL;
586 }
587 raid10_format = argv[i];
588 rs->ctr_flags |= CTR_FLAG_RAID10_FORMAT;
589 continue;
590 }
591
592 if (kstrtoul(argv[i], 10, &value) < 0) {
593 rs->ti->error = "Bad numerical argument given in raid params";
594 return -EINVAL;
595 }
596
597
598 if (!strcasecmp(key, "rebuild")) {
599 if (value >= rs->md.raid_disks) {
600 rs->ti->error = "Invalid rebuild index given";
601 return -EINVAL;
602 }
603 clear_bit(In_sync, &rs->dev[value].rdev.flags);
604 rs->dev[value].rdev.recovery_offset = 0;
605 rs->ctr_flags |= CTR_FLAG_REBUILD;
606 } else if (!strcasecmp(key, "write_mostly")) {
607 if (rs->raid_type->level != 1) {
608 rs->ti->error = "write_mostly option is only valid for RAID1";
609 return -EINVAL;
610 }
611 if (value >= rs->md.raid_disks) {
612 rs->ti->error = "Invalid write_mostly drive index given";
613 return -EINVAL;
614 }
615 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
616 } else if (!strcasecmp(key, "max_write_behind")) {
617 if (rs->raid_type->level != 1) {
618 rs->ti->error = "max_write_behind option is only valid for RAID1";
619 return -EINVAL;
620 }
621 rs->ctr_flags |= CTR_FLAG_MAX_WRITE_BEHIND;
622
623
624
625
626
627 value /= 2;
628 if (value > COUNTER_MAX) {
629 rs->ti->error = "Max write-behind limit out of range";
630 return -EINVAL;
631 }
632 rs->md.bitmap_info.max_write_behind = value;
633 } else if (!strcasecmp(key, "daemon_sleep")) {
634 rs->ctr_flags |= CTR_FLAG_DAEMON_SLEEP;
635 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
636 rs->ti->error = "daemon sleep period out of range";
637 return -EINVAL;
638 }
639 rs->md.bitmap_info.daemon_sleep = value;
640 } else if (!strcasecmp(key, "stripe_cache")) {
641 rs->ctr_flags |= CTR_FLAG_STRIPE_CACHE;
642
643
644
645
646
647 value /= 2;
648
649 if ((rs->raid_type->level != 5) &&
650 (rs->raid_type->level != 6)) {
651 rs->ti->error = "Inappropriate argument: stripe_cache";
652 return -EINVAL;
653 }
654 if (raid5_set_cache_size(&rs->md, (int)value)) {
655 rs->ti->error = "Bad stripe_cache size";
656 return -EINVAL;
657 }
658 } else if (!strcasecmp(key, "min_recovery_rate")) {
659 rs->ctr_flags |= CTR_FLAG_MIN_RECOVERY_RATE;
660 if (value > INT_MAX) {
661 rs->ti->error = "min_recovery_rate out of range";
662 return -EINVAL;
663 }
664 rs->md.sync_speed_min = (int)value;
665 } else if (!strcasecmp(key, "max_recovery_rate")) {
666 rs->ctr_flags |= CTR_FLAG_MAX_RECOVERY_RATE;
667 if (value > INT_MAX) {
668 rs->ti->error = "max_recovery_rate out of range";
669 return -EINVAL;
670 }
671 rs->md.sync_speed_max = (int)value;
672 } else if (!strcasecmp(key, "region_size")) {
673 rs->ctr_flags |= CTR_FLAG_REGION_SIZE;
674 region_size = value;
675 } else if (!strcasecmp(key, "raid10_copies") &&
676 (rs->raid_type->level == 10)) {
677 if ((value < 2) || (value > 0xFF)) {
678 rs->ti->error = "Bad value for 'raid10_copies'";
679 return -EINVAL;
680 }
681 rs->ctr_flags |= CTR_FLAG_RAID10_COPIES;
682 raid10_copies = value;
683 } else {
684 DMERR("Unable to parse RAID parameter: %s", key);
685 rs->ti->error = "Unable to parse RAID parameters";
686 return -EINVAL;
687 }
688 }
689
690 if (validate_region_size(rs, region_size))
691 return -EINVAL;
692
693 if (rs->md.chunk_sectors)
694 max_io_len = rs->md.chunk_sectors;
695 else
696 max_io_len = region_size;
697
698 if (dm_set_target_max_io_len(rs->ti, max_io_len))
699 return -EINVAL;
700
701 if (rs->raid_type->level == 10) {
702 if (raid10_copies > rs->md.raid_disks) {
703 rs->ti->error = "Not enough devices to satisfy specification";
704 return -EINVAL;
705 }
706
707
708
709
710
711 if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
712 rs->ti->error = "Too many copies for given RAID10 format.";
713 return -EINVAL;
714 }
715
716
717 sectors_per_dev = rs->ti->len * raid10_copies;
718 sector_div(sectors_per_dev, rs->md.raid_disks);
719
720 rs->md.layout = raid10_format_to_md_layout(raid10_format,
721 raid10_copies);
722 rs->md.new_layout = rs->md.layout;
723 } else if ((!rs->raid_type->level || rs->raid_type->level > 1) &&
724 sector_div(sectors_per_dev,
725 (rs->md.raid_disks - rs->raid_type->parity_devs))) {
726 rs->ti->error = "Target length not divisible by number of data devices";
727 return -EINVAL;
728 }
729 rs->md.dev_sectors = sectors_per_dev;
730
731
732 rs->md.persistent = 0;
733 rs->md.external = 1;
734
735 return 0;
736}
737
738static void do_table_event(struct work_struct *ws)
739{
740 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
741
742 dm_table_event(rs->ti->table);
743}
744
745static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
746{
747 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
748
749 return mddev_congested(&rs->md, bits);
750}
751
752
753
754
755
756#define DM_RAID_MAGIC 0x64526D44
757struct dm_raid_superblock {
758 __le32 magic;
759 __le32 features;
760
761 __le32 num_devices;
762 __le32 array_position;
763
764 __le64 events;
765 __le64 failed_devices;
766
767
768
769
770
771 __le64 disk_recovery_offset;
772
773
774
775
776
777 __le64 array_resync_offset;
778
779
780
781
782 __le32 level;
783 __le32 layout;
784 __le32 stripe_sectors;
785
786
787} __packed;
788
789static int read_disk_sb(struct md_rdev *rdev, int size)
790{
791 BUG_ON(!rdev->sb_page);
792
793 if (rdev->sb_loaded)
794 return 0;
795
796 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
797 DMERR("Failed to read superblock of device at position %d",
798 rdev->raid_disk);
799 md_error(rdev->mddev, rdev);
800 return -EINVAL;
801 }
802
803 rdev->sb_loaded = 1;
804
805 return 0;
806}
807
808static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
809{
810 int i;
811 uint64_t failed_devices;
812 struct dm_raid_superblock *sb;
813 struct raid_set *rs = container_of(mddev, struct raid_set, md);
814
815 sb = page_address(rdev->sb_page);
816 failed_devices = le64_to_cpu(sb->failed_devices);
817
818 for (i = 0; i < mddev->raid_disks; i++)
819 if (!rs->dev[i].data_dev ||
820 test_bit(Faulty, &(rs->dev[i].rdev.flags)))
821 failed_devices |= (1ULL << i);
822
823 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
824
825 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
826 sb->features = cpu_to_le32(0);
827
828 sb->num_devices = cpu_to_le32(mddev->raid_disks);
829 sb->array_position = cpu_to_le32(rdev->raid_disk);
830
831 sb->events = cpu_to_le64(mddev->events);
832 sb->failed_devices = cpu_to_le64(failed_devices);
833
834 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
835 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
836
837 sb->level = cpu_to_le32(mddev->level);
838 sb->layout = cpu_to_le32(mddev->layout);
839 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
840}
841
842
843
844
845
846
847
848
849
850static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
851{
852 int ret;
853 struct dm_raid_superblock *sb;
854 struct dm_raid_superblock *refsb;
855 uint64_t events_sb, events_refsb;
856
857 rdev->sb_start = 0;
858 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
859 if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
860 DMERR("superblock size of a logical block is no longer valid");
861 return -EINVAL;
862 }
863
864 ret = read_disk_sb(rdev, rdev->sb_size);
865 if (ret)
866 return ret;
867
868 sb = page_address(rdev->sb_page);
869
870
871
872
873
874
875 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
876 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
877 super_sync(rdev->mddev, rdev);
878
879 set_bit(FirstUse, &rdev->flags);
880
881
882 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
883
884
885 return refdev ? 0 : 1;
886 }
887
888 if (!refdev)
889 return 1;
890
891 events_sb = le64_to_cpu(sb->events);
892
893 refsb = page_address(refdev->sb_page);
894 events_refsb = le64_to_cpu(refsb->events);
895
896 return (events_sb > events_refsb) ? 1 : 0;
897}
898
899static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
900{
901 int role;
902 struct raid_set *rs = container_of(mddev, struct raid_set, md);
903 uint64_t events_sb;
904 uint64_t failed_devices;
905 struct dm_raid_superblock *sb;
906 uint32_t new_devs = 0;
907 uint32_t rebuilds = 0;
908 struct md_rdev *r;
909 struct dm_raid_superblock *sb2;
910
911 sb = page_address(rdev->sb_page);
912 events_sb = le64_to_cpu(sb->events);
913 failed_devices = le64_to_cpu(sb->failed_devices);
914
915
916
917
918 mddev->events = events_sb ? : 1;
919
920
921
922
923 if (le32_to_cpu(sb->level) != mddev->level) {
924 DMERR("Reshaping arrays not yet supported. (RAID level change)");
925 return -EINVAL;
926 }
927 if (le32_to_cpu(sb->layout) != mddev->layout) {
928 DMERR("Reshaping arrays not yet supported. (RAID layout change)");
929 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
930 DMERR(" Old layout: %s w/ %d copies",
931 raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
932 raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
933 DMERR(" New layout: %s w/ %d copies",
934 raid10_md_layout_to_format(mddev->layout),
935 raid10_md_layout_to_copies(mddev->layout));
936 return -EINVAL;
937 }
938 if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
939 DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
940 return -EINVAL;
941 }
942
943
944 if ((rs->raid_type->level != 1) &&
945 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
946 DMERR("Reshaping arrays not yet supported. (device count change)");
947 return -EINVAL;
948 }
949
950 if (!(rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)))
951 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
952
953
954
955
956
957
958
959
960
961
962
963
964 rdev_for_each(r, mddev) {
965 if (!test_bit(In_sync, &r->flags)) {
966 DMINFO("Device %d specified for rebuild: "
967 "Clearing superblock", r->raid_disk);
968 rebuilds++;
969 } else if (test_bit(FirstUse, &r->flags))
970 new_devs++;
971 }
972
973 if (!rebuilds) {
974 if (new_devs == mddev->raid_disks) {
975 DMINFO("Superblocks created for new array");
976 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
977 } else if (new_devs) {
978 DMERR("New device injected "
979 "into existing array without 'rebuild' "
980 "parameter specified");
981 return -EINVAL;
982 }
983 } else if (new_devs) {
984 DMERR("'rebuild' devices cannot be "
985 "injected into an array with other first-time devices");
986 return -EINVAL;
987 } else if (mddev->recovery_cp != MaxSector) {
988 DMERR("'rebuild' specified while array is not in-sync");
989 return -EINVAL;
990 }
991
992
993
994
995
996 rdev_for_each(r, mddev) {
997 if (!r->sb_page)
998 continue;
999 sb2 = page_address(r->sb_page);
1000 sb2->failed_devices = 0;
1001
1002
1003
1004
1005 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
1006 role = le32_to_cpu(sb2->array_position);
1007 if (role != r->raid_disk) {
1008 if (rs->raid_type->level != 1) {
1009 rs->ti->error = "Cannot change device "
1010 "positions in RAID array";
1011 return -EINVAL;
1012 }
1013 DMINFO("RAID1 device #%d now at position #%d",
1014 role, r->raid_disk);
1015 }
1016
1017
1018
1019
1020
1021 if (failed_devices & (1 << role))
1022 set_bit(Faulty, &r->flags);
1023 }
1024 }
1025
1026 return 0;
1027}
1028
1029static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
1030{
1031 struct mddev *mddev = &rs->md;
1032 struct dm_raid_superblock *sb = page_address(rdev->sb_page);
1033
1034
1035
1036
1037
1038 if (!mddev->events && super_init_validation(mddev, rdev))
1039 return -EINVAL;
1040
1041
1042 mddev->bitmap_info.offset = (rs->raid_type->level) ? to_sector(4096) : 0;
1043 rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
1044
1045 if (!test_bit(FirstUse, &rdev->flags)) {
1046 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
1047 if (rdev->recovery_offset != MaxSector)
1048 clear_bit(In_sync, &rdev->flags);
1049 }
1050
1051
1052
1053
1054 if (test_bit(Faulty, &rdev->flags)) {
1055 clear_bit(Faulty, &rdev->flags);
1056 clear_bit(In_sync, &rdev->flags);
1057 rdev->saved_raid_disk = rdev->raid_disk;
1058 rdev->recovery_offset = 0;
1059 }
1060
1061 clear_bit(FirstUse, &rdev->flags);
1062
1063 return 0;
1064}
1065
1066
1067
1068
1069static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
1070{
1071 int ret;
1072 struct raid_dev *dev;
1073 struct md_rdev *rdev, *tmp, *freshest;
1074 struct mddev *mddev = &rs->md;
1075
1076 freshest = NULL;
1077 rdev_for_each_safe(rdev, tmp, mddev) {
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 rdev->sectors = to_sector(i_size_read(rdev->bdev->bd_inode));
1089
1090 if (rs->ctr_flags & CTR_FLAG_SYNC)
1091 continue;
1092
1093 if (!rdev->meta_bdev)
1094 continue;
1095
1096 ret = super_load(rdev, freshest);
1097
1098 switch (ret) {
1099 case 1:
1100 freshest = rdev;
1101 break;
1102 case 0:
1103 break;
1104 default:
1105 dev = container_of(rdev, struct raid_dev, rdev);
1106 if (dev->meta_dev)
1107 dm_put_device(ti, dev->meta_dev);
1108
1109 dev->meta_dev = NULL;
1110 rdev->meta_bdev = NULL;
1111
1112 if (rdev->sb_page)
1113 put_page(rdev->sb_page);
1114
1115 rdev->sb_page = NULL;
1116
1117 rdev->sb_loaded = 0;
1118
1119
1120
1121
1122
1123
1124
1125 if (dev->data_dev)
1126 dm_put_device(ti, dev->data_dev);
1127
1128 dev->data_dev = NULL;
1129 rdev->bdev = NULL;
1130
1131 list_del(&rdev->same_set);
1132 }
1133 }
1134
1135 if (!freshest)
1136 return 0;
1137
1138 if (validate_raid_redundancy(rs)) {
1139 rs->ti->error = "Insufficient redundancy to activate array";
1140 return -EINVAL;
1141 }
1142
1143
1144
1145
1146
1147 ti->error = "Unable to assemble array: Invalid superblocks";
1148 if (super_validate(rs, freshest))
1149 return -EINVAL;
1150
1151 rdev_for_each(rdev, mddev)
1152 if ((rdev != freshest) && super_validate(rs, rdev))
1153 return -EINVAL;
1154
1155 return 0;
1156}
1157
1158
1159
1160
1161
1162static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
1163{
1164 int i;
1165 bool raid456;
1166
1167
1168 ti->discards_supported = false;
1169
1170
1171 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
1172
1173 for (i = 0; i < rs->md.raid_disks; i++) {
1174 struct request_queue *q;
1175
1176 if (!rs->dev[i].rdev.bdev)
1177 continue;
1178
1179 q = bdev_get_queue(rs->dev[i].rdev.bdev);
1180 if (!q || !blk_queue_discard(q))
1181 return;
1182
1183 if (raid456) {
1184 if (!q->limits.discard_zeroes_data)
1185 return;
1186 if (!devices_handle_discard_safely) {
1187 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
1188 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
1189 return;
1190 }
1191 }
1192 }
1193
1194
1195 ti->discards_supported = true;
1196
1197
1198
1199
1200
1201 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
1202 ti->num_discard_bios = 1;
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
1215{
1216 int ret;
1217 struct raid_type *rt;
1218 unsigned long num_raid_params, num_raid_devs;
1219 struct raid_set *rs = NULL;
1220
1221
1222 if (argc < 2) {
1223 ti->error = "Too few arguments";
1224 return -EINVAL;
1225 }
1226
1227
1228 rt = get_raid_type(argv[0]);
1229 if (!rt) {
1230 ti->error = "Unrecognised raid_type";
1231 return -EINVAL;
1232 }
1233 argc--;
1234 argv++;
1235
1236
1237 if (kstrtoul(argv[0], 10, &num_raid_params) < 0) {
1238 ti->error = "Cannot understand number of RAID parameters";
1239 return -EINVAL;
1240 }
1241 argc--;
1242 argv++;
1243
1244
1245 if (num_raid_params >= argc) {
1246 ti->error = "Arguments do not agree with counts given";
1247 return -EINVAL;
1248 }
1249
1250 if ((kstrtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
1251 (num_raid_devs > MAX_RAID_DEVICES)) {
1252 ti->error = "Cannot understand number of raid devices";
1253 return -EINVAL;
1254 }
1255
1256 argc -= num_raid_params + 1;
1257 if (argc != (num_raid_devs * 2)) {
1258 ti->error = "Supplied RAID devices does not match the count given";
1259 return -EINVAL;
1260 }
1261
1262 rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
1263 if (IS_ERR(rs))
1264 return PTR_ERR(rs);
1265
1266 ret = parse_raid_params(rs, argv, (unsigned)num_raid_params);
1267 if (ret)
1268 goto bad;
1269
1270 argv += num_raid_params + 1;
1271
1272 ret = dev_parms(rs, argv);
1273 if (ret)
1274 goto bad;
1275
1276 rs->md.sync_super = super_sync;
1277 ret = analyse_superblocks(ti, rs);
1278 if (ret)
1279 goto bad;
1280
1281 INIT_WORK(&rs->md.event_work, do_table_event);
1282 ti->private = rs;
1283 ti->num_flush_bios = 1;
1284
1285
1286
1287
1288 configure_discard_support(ti, rs);
1289
1290
1291 mddev_lock_nointr(&rs->md);
1292 ret = md_run(&rs->md);
1293 rs->md.in_sync = 0;
1294 mddev_unlock(&rs->md);
1295
1296 if (ret) {
1297 ti->error = "Fail to run raid array";
1298 goto bad;
1299 }
1300
1301 if (ti->len != rs->md.array_sectors) {
1302 ti->error = "Array size does not match requested target length";
1303 ret = -EINVAL;
1304 goto size_mismatch;
1305 }
1306 rs->callbacks.congested_fn = raid_is_congested;
1307 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
1308
1309 mddev_suspend(&rs->md);
1310 return 0;
1311
1312size_mismatch:
1313 md_stop(&rs->md);
1314bad:
1315 context_free(rs);
1316
1317 return ret;
1318}
1319
1320static void raid_dtr(struct dm_target *ti)
1321{
1322 struct raid_set *rs = ti->private;
1323
1324 list_del_init(&rs->callbacks.list);
1325 md_stop(&rs->md);
1326 context_free(rs);
1327}
1328
1329static int raid_map(struct dm_target *ti, struct bio *bio)
1330{
1331 struct raid_set *rs = ti->private;
1332 struct mddev *mddev = &rs->md;
1333
1334 mddev->pers->make_request(mddev, bio);
1335
1336 return DM_MAPIO_SUBMITTED;
1337}
1338
1339static const char *decipher_sync_action(struct mddev *mddev)
1340{
1341 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
1342 return "frozen";
1343
1344 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1345 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
1346 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1347 return "reshape";
1348
1349 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1350 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1351 return "resync";
1352 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1353 return "check";
1354 return "repair";
1355 }
1356
1357 if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
1358 return "recover";
1359 }
1360
1361 return "idle";
1362}
1363
1364static void raid_status(struct dm_target *ti, status_type_t type,
1365 unsigned status_flags, char *result, unsigned maxlen)
1366{
1367 struct raid_set *rs = ti->private;
1368 unsigned raid_param_cnt = 1;
1369 unsigned sz = 0;
1370 int i, array_in_sync = 0;
1371 sector_t sync;
1372
1373 switch (type) {
1374 case STATUSTYPE_INFO:
1375 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
1376
1377 if (rs->raid_type->level) {
1378 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
1379 sync = rs->md.curr_resync_completed;
1380 else
1381 sync = rs->md.recovery_cp;
1382
1383 if (sync >= rs->md.resync_max_sectors) {
1384
1385
1386
1387 array_in_sync = 1;
1388 sync = rs->md.resync_max_sectors;
1389 } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
1390
1391
1392
1393
1394
1395 array_in_sync = 1;
1396 } else {
1397
1398
1399
1400
1401
1402
1403 for (i = 0; i < rs->md.raid_disks; i++)
1404 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
1405 array_in_sync = 1;
1406 }
1407 } else {
1408
1409 array_in_sync = 1;
1410 sync = rs->md.resync_max_sectors;
1411 }
1412
1413
1414
1415
1416
1417
1418
1419 for (i = 0; i < rs->md.raid_disks; i++) {
1420 if (test_bit(Faulty, &rs->dev[i].rdev.flags))
1421 DMEMIT("D");
1422 else if (!array_in_sync ||
1423 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1424 DMEMIT("a");
1425 else
1426 DMEMIT("A");
1427 }
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437 DMEMIT(" %llu/%llu",
1438 (unsigned long long) sync,
1439 (unsigned long long) rs->md.resync_max_sectors);
1440
1441
1442
1443
1444
1445
1446 DMEMIT(" %s", decipher_sync_action(&rs->md));
1447
1448
1449
1450
1451
1452
1453 DMEMIT(" %llu",
1454 (strcmp(rs->md.last_sync_action, "check")) ? 0 :
1455 (unsigned long long)
1456 atomic64_read(&rs->md.resync_mismatches));
1457 break;
1458 case STATUSTYPE_TABLE:
1459
1460 for (i = 0; i < rs->md.raid_disks; i++) {
1461 if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
1462 rs->dev[i].data_dev &&
1463 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1464 raid_param_cnt += 2;
1465 if (rs->dev[i].data_dev &&
1466 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1467 raid_param_cnt += 2;
1468 }
1469
1470 raid_param_cnt += (hweight32(rs->ctr_flags & ~CTR_FLAG_REBUILD) * 2);
1471 if (rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC))
1472 raid_param_cnt--;
1473
1474 DMEMIT("%s %u %u", rs->raid_type->name,
1475 raid_param_cnt, rs->md.chunk_sectors);
1476
1477 if ((rs->ctr_flags & CTR_FLAG_SYNC) &&
1478 (rs->md.recovery_cp == MaxSector))
1479 DMEMIT(" sync");
1480 if (rs->ctr_flags & CTR_FLAG_NOSYNC)
1481 DMEMIT(" nosync");
1482
1483 for (i = 0; i < rs->md.raid_disks; i++)
1484 if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
1485 rs->dev[i].data_dev &&
1486 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1487 DMEMIT(" rebuild %u", i);
1488
1489 if (rs->ctr_flags & CTR_FLAG_DAEMON_SLEEP)
1490 DMEMIT(" daemon_sleep %lu",
1491 rs->md.bitmap_info.daemon_sleep);
1492
1493 if (rs->ctr_flags & CTR_FLAG_MIN_RECOVERY_RATE)
1494 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
1495
1496 if (rs->ctr_flags & CTR_FLAG_MAX_RECOVERY_RATE)
1497 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
1498
1499 for (i = 0; i < rs->md.raid_disks; i++)
1500 if (rs->dev[i].data_dev &&
1501 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1502 DMEMIT(" write_mostly %u", i);
1503
1504 if (rs->ctr_flags & CTR_FLAG_MAX_WRITE_BEHIND)
1505 DMEMIT(" max_write_behind %lu",
1506 rs->md.bitmap_info.max_write_behind);
1507
1508 if (rs->ctr_flags & CTR_FLAG_STRIPE_CACHE) {
1509 struct r5conf *conf = rs->md.private;
1510
1511
1512 DMEMIT(" stripe_cache %d",
1513 conf ? conf->max_nr_stripes * 2 : 0);
1514 }
1515
1516 if (rs->ctr_flags & CTR_FLAG_REGION_SIZE)
1517 DMEMIT(" region_size %lu",
1518 rs->md.bitmap_info.chunksize >> 9);
1519
1520 if (rs->ctr_flags & CTR_FLAG_RAID10_COPIES)
1521 DMEMIT(" raid10_copies %u",
1522 raid10_md_layout_to_copies(rs->md.layout));
1523
1524 if (rs->ctr_flags & CTR_FLAG_RAID10_FORMAT)
1525 DMEMIT(" raid10_format %s",
1526 raid10_md_layout_to_format(rs->md.layout));
1527
1528 DMEMIT(" %d", rs->md.raid_disks);
1529 for (i = 0; i < rs->md.raid_disks; i++) {
1530 if (rs->dev[i].meta_dev)
1531 DMEMIT(" %s", rs->dev[i].meta_dev->name);
1532 else
1533 DMEMIT(" -");
1534
1535 if (rs->dev[i].data_dev)
1536 DMEMIT(" %s", rs->dev[i].data_dev->name);
1537 else
1538 DMEMIT(" -");
1539 }
1540 }
1541}
1542
1543static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
1544{
1545 struct raid_set *rs = ti->private;
1546 struct mddev *mddev = &rs->md;
1547
1548 if (!strcasecmp(argv[0], "reshape")) {
1549 DMERR("Reshape not supported.");
1550 return -EINVAL;
1551 }
1552
1553 if (!mddev->pers || !mddev->pers->sync_request)
1554 return -EINVAL;
1555
1556 if (!strcasecmp(argv[0], "frozen"))
1557 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1558 else
1559 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1560
1561 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
1562 if (mddev->sync_thread) {
1563 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1564 md_reap_sync_thread(mddev);
1565 }
1566 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1567 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1568 return -EBUSY;
1569 else if (!strcasecmp(argv[0], "resync"))
1570 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1571 else if (!strcasecmp(argv[0], "recover")) {
1572 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
1573 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1574 } else {
1575 if (!strcasecmp(argv[0], "check"))
1576 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1577 else if (!!strcasecmp(argv[0], "repair"))
1578 return -EINVAL;
1579 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
1580 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1581 }
1582 if (mddev->ro == 2) {
1583
1584
1585
1586 mddev->ro = 0;
1587 if (!mddev->suspended)
1588 md_wakeup_thread(mddev->sync_thread);
1589 }
1590 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1591 if (!mddev->suspended)
1592 md_wakeup_thread(mddev->thread);
1593
1594 return 0;
1595}
1596
1597static int raid_iterate_devices(struct dm_target *ti,
1598 iterate_devices_callout_fn fn, void *data)
1599{
1600 struct raid_set *rs = ti->private;
1601 unsigned i;
1602 int ret = 0;
1603
1604 for (i = 0; !ret && i < rs->md.raid_disks; i++)
1605 if (rs->dev[i].data_dev)
1606 ret = fn(ti,
1607 rs->dev[i].data_dev,
1608 0,
1609 rs->md.dev_sectors,
1610 data);
1611
1612 return ret;
1613}
1614
1615static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
1616{
1617 struct raid_set *rs = ti->private;
1618 unsigned chunk_size = rs->md.chunk_sectors << 9;
1619 struct r5conf *conf = rs->md.private;
1620
1621 blk_limits_io_min(limits, chunk_size);
1622 blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
1623}
1624
1625static void raid_presuspend(struct dm_target *ti)
1626{
1627 struct raid_set *rs = ti->private;
1628
1629 md_stop_writes(&rs->md);
1630}
1631
1632static void raid_postsuspend(struct dm_target *ti)
1633{
1634 struct raid_set *rs = ti->private;
1635
1636 mddev_suspend(&rs->md);
1637}
1638
1639static void attempt_restore_of_faulty_devices(struct raid_set *rs)
1640{
1641 int i;
1642 uint64_t failed_devices, cleared_failed_devices = 0;
1643 unsigned long flags;
1644 struct dm_raid_superblock *sb;
1645 struct md_rdev *r;
1646
1647 for (i = 0; i < rs->md.raid_disks; i++) {
1648 r = &rs->dev[i].rdev;
1649 if (test_bit(Faulty, &r->flags) && r->sb_page &&
1650 sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) {
1651 DMINFO("Faulty %s device #%d has readable super block."
1652 " Attempting to revive it.",
1653 rs->raid_type->name, i);
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664 if ((r->raid_disk >= 0) &&
1665 (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
1666
1667 continue;
1668
1669 r->raid_disk = i;
1670 r->saved_raid_disk = i;
1671 flags = r->flags;
1672 clear_bit(Faulty, &r->flags);
1673 clear_bit(WriteErrorSeen, &r->flags);
1674 clear_bit(In_sync, &r->flags);
1675 if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
1676 r->raid_disk = -1;
1677 r->saved_raid_disk = -1;
1678 r->flags = flags;
1679 } else {
1680 r->recovery_offset = 0;
1681 cleared_failed_devices |= 1 << i;
1682 }
1683 }
1684 }
1685 if (cleared_failed_devices) {
1686 rdev_for_each(r, &rs->md) {
1687 sb = page_address(r->sb_page);
1688 failed_devices = le64_to_cpu(sb->failed_devices);
1689 failed_devices &= ~cleared_failed_devices;
1690 sb->failed_devices = cpu_to_le64(failed_devices);
1691 }
1692 }
1693}
1694
1695static void raid_resume(struct dm_target *ti)
1696{
1697 struct raid_set *rs = ti->private;
1698
1699 if (rs->raid_type->level) {
1700 set_bit(MD_CHANGE_DEVS, &rs->md.flags);
1701
1702 if (!rs->bitmap_loaded) {
1703 bitmap_load(&rs->md);
1704 rs->bitmap_loaded = 1;
1705 } else {
1706
1707
1708
1709
1710
1711 attempt_restore_of_faulty_devices(rs);
1712 }
1713
1714 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
1715 }
1716
1717 mddev_resume(&rs->md);
1718}
1719
1720static int raid_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1721 struct bio_vec *biovec, int max_size)
1722{
1723 struct raid_set *rs = ti->private;
1724 struct md_personality *pers = rs->md.pers;
1725
1726 if (pers && pers->mergeable_bvec)
1727 return min(max_size, pers->mergeable_bvec(&rs->md, bvm, biovec));
1728
1729
1730
1731
1732
1733
1734
1735 return rs->md.chunk_sectors;
1736}
1737
1738static struct target_type raid_target = {
1739 .name = "raid",
1740 .version = {1, 7, 0},
1741 .module = THIS_MODULE,
1742 .ctr = raid_ctr,
1743 .dtr = raid_dtr,
1744 .map = raid_map,
1745 .status = raid_status,
1746 .message = raid_message,
1747 .iterate_devices = raid_iterate_devices,
1748 .io_hints = raid_io_hints,
1749 .presuspend = raid_presuspend,
1750 .postsuspend = raid_postsuspend,
1751 .resume = raid_resume,
1752 .merge = raid_merge,
1753};
1754
1755static int __init dm_raid_init(void)
1756{
1757 DMINFO("Loading target version %u.%u.%u",
1758 raid_target.version[0],
1759 raid_target.version[1],
1760 raid_target.version[2]);
1761 return dm_register_target(&raid_target);
1762}
1763
1764static void __exit dm_raid_exit(void)
1765{
1766 dm_unregister_target(&raid_target);
1767}
1768
1769module_init(dm_raid_init);
1770module_exit(dm_raid_exit);
1771
1772module_param(devices_handle_discard_safely, bool, 0644);
1773MODULE_PARM_DESC(devices_handle_discard_safely,
1774 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
1775
1776MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
1777MODULE_ALIAS("dm-raid1");
1778MODULE_ALIAS("dm-raid10");
1779MODULE_ALIAS("dm-raid4");
1780MODULE_ALIAS("dm-raid5");
1781MODULE_ALIAS("dm-raid6");
1782MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
1783MODULE_LICENSE("GPL");
1784