1
2
3
4
5
6
7
8#include <linux/slab.h>
9#include <linux/module.h>
10
11#include "md.h"
12#include "raid1.h"
13#include "raid5.h"
14#include "raid10.h"
15#include "md-bitmap.h"
16
17#include <linux/device-mapper.h>
18
19#define DM_MSG_PREFIX "raid"
20#define MAX_RAID_DEVICES 253
21
22
23
24
25#define MIN_FREE_RESHAPE_SPACE to_sector(4*4096)
26
27
28
29
30#define MIN_RAID456_JOURNAL_SPACE (4*2048)
31
32static bool devices_handle_discard_safely = false;
33
34
35
36
37
38#define FirstUse 10
39
40struct raid_dev {
41
42
43
44
45
46
47
48
49
50
51
52
53 struct dm_dev *meta_dev;
54 struct dm_dev *data_dev;
55 struct md_rdev rdev;
56};
57
58
59
60
61
62
63
64#define __CTR_FLAG_SYNC 0
65#define __CTR_FLAG_NOSYNC 1
66#define __CTR_FLAG_REBUILD 2
67#define __CTR_FLAG_DAEMON_SLEEP 3
68#define __CTR_FLAG_MIN_RECOVERY_RATE 4
69#define __CTR_FLAG_MAX_RECOVERY_RATE 5
70#define __CTR_FLAG_MAX_WRITE_BEHIND 6
71#define __CTR_FLAG_WRITE_MOSTLY 7
72#define __CTR_FLAG_STRIPE_CACHE 8
73#define __CTR_FLAG_REGION_SIZE 9
74#define __CTR_FLAG_RAID10_COPIES 10
75#define __CTR_FLAG_RAID10_FORMAT 11
76
77#define __CTR_FLAG_DELTA_DISKS 12
78#define __CTR_FLAG_DATA_OFFSET 13
79#define __CTR_FLAG_RAID10_USE_NEAR_SETS 14
80
81
82#define __CTR_FLAG_JOURNAL_DEV 15
83
84
85#define __CTR_FLAG_JOURNAL_MODE 16
86
87
88
89
90#define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC)
91#define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC)
92#define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD)
93#define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP)
94#define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE)
95#define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE)
96#define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND)
97#define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY)
98#define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE)
99#define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE)
100#define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES)
101#define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT)
102#define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
103#define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
104#define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
105#define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV)
106#define CTR_FLAG_JOURNAL_MODE (1 << __CTR_FLAG_JOURNAL_MODE)
107
108
109
110
111
112
113
114#define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
115
116
117#define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \
118 CTR_FLAG_RAID10_USE_NEAR_SETS)
119
120
121#define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
122 CTR_FLAG_WRITE_MOSTLY | \
123 CTR_FLAG_DAEMON_SLEEP | \
124 CTR_FLAG_MIN_RECOVERY_RATE | \
125 CTR_FLAG_MAX_RECOVERY_RATE | \
126 CTR_FLAG_MAX_WRITE_BEHIND | \
127 CTR_FLAG_STRIPE_CACHE | \
128 CTR_FLAG_REGION_SIZE | \
129 CTR_FLAG_RAID10_COPIES | \
130 CTR_FLAG_RAID10_FORMAT | \
131 CTR_FLAG_DELTA_DISKS | \
132 CTR_FLAG_DATA_OFFSET | \
133 CTR_FLAG_JOURNAL_DEV | \
134 CTR_FLAG_JOURNAL_MODE)
135
136
137
138
139#define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET)
140
141
142#define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
143 CTR_FLAG_REBUILD | \
144 CTR_FLAG_WRITE_MOSTLY | \
145 CTR_FLAG_DAEMON_SLEEP | \
146 CTR_FLAG_MIN_RECOVERY_RATE | \
147 CTR_FLAG_MAX_RECOVERY_RATE | \
148 CTR_FLAG_MAX_WRITE_BEHIND | \
149 CTR_FLAG_REGION_SIZE | \
150 CTR_FLAG_DELTA_DISKS | \
151 CTR_FLAG_DATA_OFFSET)
152
153
154#define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
155 CTR_FLAG_REBUILD | \
156 CTR_FLAG_DAEMON_SLEEP | \
157 CTR_FLAG_MIN_RECOVERY_RATE | \
158 CTR_FLAG_MAX_RECOVERY_RATE | \
159 CTR_FLAG_REGION_SIZE | \
160 CTR_FLAG_RAID10_COPIES | \
161 CTR_FLAG_RAID10_FORMAT | \
162 CTR_FLAG_DELTA_DISKS | \
163 CTR_FLAG_DATA_OFFSET | \
164 CTR_FLAG_RAID10_USE_NEAR_SETS)
165
166
167
168
169
170
171
172
173#define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
174 CTR_FLAG_REBUILD | \
175 CTR_FLAG_DAEMON_SLEEP | \
176 CTR_FLAG_MIN_RECOVERY_RATE | \
177 CTR_FLAG_MAX_RECOVERY_RATE | \
178 CTR_FLAG_STRIPE_CACHE | \
179 CTR_FLAG_REGION_SIZE | \
180 CTR_FLAG_DELTA_DISKS | \
181 CTR_FLAG_DATA_OFFSET | \
182 CTR_FLAG_JOURNAL_DEV | \
183 CTR_FLAG_JOURNAL_MODE)
184
185#define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \
186 CTR_FLAG_REBUILD | \
187 CTR_FLAG_DAEMON_SLEEP | \
188 CTR_FLAG_MIN_RECOVERY_RATE | \
189 CTR_FLAG_MAX_RECOVERY_RATE | \
190 CTR_FLAG_STRIPE_CACHE | \
191 CTR_FLAG_REGION_SIZE | \
192 CTR_FLAG_DELTA_DISKS | \
193 CTR_FLAG_DATA_OFFSET | \
194 CTR_FLAG_JOURNAL_DEV | \
195 CTR_FLAG_JOURNAL_MODE)
196
197
198
199
200
201
202
203
204
205
206#define RT_FLAG_RS_PRERESUMED 0
207#define RT_FLAG_RS_RESUMED 1
208#define RT_FLAG_RS_BITMAP_LOADED 2
209#define RT_FLAG_UPDATE_SBS 3
210#define RT_FLAG_RESHAPE_RS 4
211#define RT_FLAG_RS_SUSPENDED 5
212#define RT_FLAG_RS_IN_SYNC 6
213#define RT_FLAG_RS_RESYNCING 7
214#define RT_FLAG_RS_GROW 8
215
216
217#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
218
219
220
221
222struct rs_layout {
223 int new_level;
224 int new_layout;
225 int new_chunk_sectors;
226};
227
228struct raid_set {
229 struct dm_target *ti;
230
231 uint32_t stripe_cache_entries;
232 unsigned long ctr_flags;
233 unsigned long runtime_flags;
234
235 uint64_t rebuild_disks[DISKS_ARRAY_ELEMS];
236
237 int raid_disks;
238 int delta_disks;
239 int data_offset;
240 int raid10_copies;
241 int requested_bitmap_chunk_sectors;
242
243 struct mddev md;
244 struct raid_type *raid_type;
245
246 sector_t array_sectors;
247 sector_t dev_sectors;
248
249
250 struct journal_dev {
251 struct dm_dev *dev;
252 struct md_rdev rdev;
253 int mode;
254 } journal_dev;
255
256 struct raid_dev dev[];
257};
258
259static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
260{
261 struct mddev *mddev = &rs->md;
262
263 l->new_level = mddev->new_level;
264 l->new_layout = mddev->new_layout;
265 l->new_chunk_sectors = mddev->new_chunk_sectors;
266}
267
268static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
269{
270 struct mddev *mddev = &rs->md;
271
272 mddev->new_level = l->new_level;
273 mddev->new_layout = l->new_layout;
274 mddev->new_chunk_sectors = l->new_chunk_sectors;
275}
276
277
278#define ALGORITHM_RAID10_DEFAULT 0
279#define ALGORITHM_RAID10_NEAR 1
280#define ALGORITHM_RAID10_OFFSET 2
281#define ALGORITHM_RAID10_FAR 3
282
283
284static struct raid_type {
285 const char *name;
286 const char *descr;
287 const unsigned int parity_devs;
288 const unsigned int minimal_devs;
289 const unsigned int level;
290 const unsigned int algorithm;
291} raid_types[] = {
292 {"raid0", "raid0 (striping)", 0, 2, 0, 0 },
293 {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 },
294 {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR},
295 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
296 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
297 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
298 {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
299 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
300 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
301 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
302 {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
303 {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
304 {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
305 {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
306 {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE},
307 {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6},
308 {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6},
309 {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6},
310 {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6},
311 {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6}
312};
313
314
315static bool __within_range(long v, long min, long max)
316{
317 return v >= min && v <= max;
318}
319
320
321static struct arg_name_flag {
322 const unsigned long flag;
323 const char *name;
324} __arg_name_flags[] = {
325 { CTR_FLAG_SYNC, "sync"},
326 { CTR_FLAG_NOSYNC, "nosync"},
327 { CTR_FLAG_REBUILD, "rebuild"},
328 { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
329 { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
330 { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
331 { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
332 { CTR_FLAG_WRITE_MOSTLY, "write_mostly"},
333 { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
334 { CTR_FLAG_REGION_SIZE, "region_size"},
335 { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
336 { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
337 { CTR_FLAG_DATA_OFFSET, "data_offset"},
338 { CTR_FLAG_DELTA_DISKS, "delta_disks"},
339 { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"},
340 { CTR_FLAG_JOURNAL_DEV, "journal_dev" },
341 { CTR_FLAG_JOURNAL_MODE, "journal_mode" },
342};
343
344
345static const char *dm_raid_arg_name_by_flag(const uint32_t flag)
346{
347 if (hweight32(flag) == 1) {
348 struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags);
349
350 while (anf-- > __arg_name_flags)
351 if (flag & anf->flag)
352 return anf->name;
353
354 } else
355 DMERR("%s called with more than one flag!", __func__);
356
357 return NULL;
358}
359
360
361static struct {
362 const int mode;
363 const char *param;
364} _raid456_journal_mode[] = {
365 { R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" },
366 { R5C_JOURNAL_MODE_WRITE_BACK , "writeback" }
367};
368
369
370static int dm_raid_journal_mode_to_md(const char *mode)
371{
372 int m = ARRAY_SIZE(_raid456_journal_mode);
373
374 while (m--)
375 if (!strcasecmp(mode, _raid456_journal_mode[m].param))
376 return _raid456_journal_mode[m].mode;
377
378 return -EINVAL;
379}
380
381
382static const char *md_journal_mode_to_dm_raid(const int mode)
383{
384 int m = ARRAY_SIZE(_raid456_journal_mode);
385
386 while (m--)
387 if (mode == _raid456_journal_mode[m].mode)
388 return _raid456_journal_mode[m].param;
389
390 return "unknown";
391}
392
393
394
395
396
397
398
399static bool rs_is_raid0(struct raid_set *rs)
400{
401 return !rs->md.level;
402}
403
404
405static bool rs_is_raid1(struct raid_set *rs)
406{
407 return rs->md.level == 1;
408}
409
410
411static bool rs_is_raid10(struct raid_set *rs)
412{
413 return rs->md.level == 10;
414}
415
416
417static bool rs_is_raid6(struct raid_set *rs)
418{
419 return rs->md.level == 6;
420}
421
422
423static bool rs_is_raid456(struct raid_set *rs)
424{
425 return __within_range(rs->md.level, 4, 6);
426}
427
428
429static bool __is_raid10_far(int layout);
430static bool rs_is_reshapable(struct raid_set *rs)
431{
432 return rs_is_raid456(rs) ||
433 (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout));
434}
435
436
437static bool rs_is_recovering(struct raid_set *rs)
438{
439 return rs->md.recovery_cp < rs->md.dev_sectors;
440}
441
442
443static bool rs_is_reshaping(struct raid_set *rs)
444{
445 return rs->md.reshape_position != MaxSector;
446}
447
448
449
450
451
452
453static bool rt_is_raid0(struct raid_type *rt)
454{
455 return !rt->level;
456}
457
458
459static bool rt_is_raid1(struct raid_type *rt)
460{
461 return rt->level == 1;
462}
463
464
465static bool rt_is_raid10(struct raid_type *rt)
466{
467 return rt->level == 10;
468}
469
470
471static bool rt_is_raid45(struct raid_type *rt)
472{
473 return __within_range(rt->level, 4, 5);
474}
475
476
477static bool rt_is_raid6(struct raid_type *rt)
478{
479 return rt->level == 6;
480}
481
482
483static bool rt_is_raid456(struct raid_type *rt)
484{
485 return __within_range(rt->level, 4, 6);
486}
487
488
489
490static unsigned long __valid_flags(struct raid_set *rs)
491{
492 if (rt_is_raid0(rs->raid_type))
493 return RAID0_VALID_FLAGS;
494 else if (rt_is_raid1(rs->raid_type))
495 return RAID1_VALID_FLAGS;
496 else if (rt_is_raid10(rs->raid_type))
497 return RAID10_VALID_FLAGS;
498 else if (rt_is_raid45(rs->raid_type))
499 return RAID45_VALID_FLAGS;
500 else if (rt_is_raid6(rs->raid_type))
501 return RAID6_VALID_FLAGS;
502
503 return 0;
504}
505
506
507
508
509
510
511static int rs_check_for_valid_flags(struct raid_set *rs)
512{
513 if (rs->ctr_flags & ~__valid_flags(rs)) {
514 rs->ti->error = "Invalid flags combination";
515 return -EINVAL;
516 }
517
518 return 0;
519}
520
521
522#define RAID10_OFFSET (1 << 16)
523#define RAID10_BROCKEN_USE_FAR_SETS (1 << 17)
524#define RAID10_USE_FAR_SETS (1 << 18)
525#define RAID10_FAR_COPIES_SHIFT 8
526
527
528static unsigned int __raid10_near_copies(int layout)
529{
530 return layout & 0xFF;
531}
532
533
534static unsigned int __raid10_far_copies(int layout)
535{
536 return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT);
537}
538
539
540static bool __is_raid10_offset(int layout)
541{
542 return !!(layout & RAID10_OFFSET);
543}
544
545
546static bool __is_raid10_near(int layout)
547{
548 return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1;
549}
550
551
552static bool __is_raid10_far(int layout)
553{
554 return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1;
555}
556
557
558static const char *raid10_md_layout_to_format(int layout)
559{
560
561
562
563
564
565
566 if (__is_raid10_offset(layout))
567 return "offset";
568
569 if (__raid10_near_copies(layout) > 1)
570 return "near";
571
572 if (__raid10_far_copies(layout) > 1)
573 return "far";
574
575 return "unknown";
576}
577
578
579static int raid10_name_to_format(const char *name)
580{
581 if (!strcasecmp(name, "near"))
582 return ALGORITHM_RAID10_NEAR;
583 else if (!strcasecmp(name, "offset"))
584 return ALGORITHM_RAID10_OFFSET;
585 else if (!strcasecmp(name, "far"))
586 return ALGORITHM_RAID10_FAR;
587
588 return -EINVAL;
589}
590
591
592static unsigned int raid10_md_layout_to_copies(int layout)
593{
594 return max(__raid10_near_copies(layout), __raid10_far_copies(layout));
595}
596
597
598static int raid10_format_to_md_layout(struct raid_set *rs,
599 unsigned int algorithm,
600 unsigned int copies)
601{
602 unsigned int n = 1, f = 1, r = 0;
603
604
605
606
607
608
609
610
611
612 if (algorithm == ALGORITHM_RAID10_DEFAULT ||
613 algorithm == ALGORITHM_RAID10_NEAR)
614 n = copies;
615
616 else if (algorithm == ALGORITHM_RAID10_OFFSET) {
617 f = copies;
618 r = RAID10_OFFSET;
619 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
620 r |= RAID10_USE_FAR_SETS;
621
622 } else if (algorithm == ALGORITHM_RAID10_FAR) {
623 f = copies;
624 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
625 r |= RAID10_USE_FAR_SETS;
626
627 } else
628 return -EINVAL;
629
630 return r | (f << RAID10_FAR_COPIES_SHIFT) | n;
631}
632
633
634
635static bool __got_raid10(struct raid_type *rtp, const int layout)
636{
637 if (rtp->level == 10) {
638 switch (rtp->algorithm) {
639 case ALGORITHM_RAID10_DEFAULT:
640 case ALGORITHM_RAID10_NEAR:
641 return __is_raid10_near(layout);
642 case ALGORITHM_RAID10_OFFSET:
643 return __is_raid10_offset(layout);
644 case ALGORITHM_RAID10_FAR:
645 return __is_raid10_far(layout);
646 default:
647 break;
648 }
649 }
650
651 return false;
652}
653
654
655static struct raid_type *get_raid_type(const char *name)
656{
657 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
658
659 while (rtp-- > raid_types)
660 if (!strcasecmp(rtp->name, name))
661 return rtp;
662
663 return NULL;
664}
665
666
667static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
668{
669 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
670
671 while (rtp-- > raid_types) {
672
673 if (rtp->level == level &&
674 (__got_raid10(rtp, layout) || rtp->algorithm == layout))
675 return rtp;
676 }
677
678 return NULL;
679}
680
681
682static void rs_set_rdev_sectors(struct raid_set *rs)
683{
684 struct mddev *mddev = &rs->md;
685 struct md_rdev *rdev;
686
687
688
689
690
691 rdev_for_each(rdev, mddev)
692 if (!test_bit(Journal, &rdev->flags))
693 rdev->sectors = mddev->dev_sectors;
694}
695
696
697
698
699static void rs_set_capacity(struct raid_set *rs)
700{
701 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
702
703 set_capacity_and_notify(gendisk, rs->md.array_sectors);
704}
705
706
707
708
709
710static void rs_set_cur(struct raid_set *rs)
711{
712 struct mddev *mddev = &rs->md;
713
714 mddev->new_level = mddev->level;
715 mddev->new_layout = mddev->layout;
716 mddev->new_chunk_sectors = mddev->chunk_sectors;
717}
718
719
720
721
722
723static void rs_set_new(struct raid_set *rs)
724{
725 struct mddev *mddev = &rs->md;
726
727 mddev->level = mddev->new_level;
728 mddev->layout = mddev->new_layout;
729 mddev->chunk_sectors = mddev->new_chunk_sectors;
730 mddev->raid_disks = rs->raid_disks;
731 mddev->delta_disks = 0;
732}
733
734static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type,
735 unsigned int raid_devs)
736{
737 unsigned int i;
738 struct raid_set *rs;
739
740 if (raid_devs <= raid_type->parity_devs) {
741 ti->error = "Insufficient number of devices";
742 return ERR_PTR(-EINVAL);
743 }
744
745 rs = kzalloc(struct_size(rs, dev, raid_devs), GFP_KERNEL);
746 if (!rs) {
747 ti->error = "Cannot allocate raid context";
748 return ERR_PTR(-ENOMEM);
749 }
750
751 mddev_init(&rs->md);
752
753 rs->raid_disks = raid_devs;
754 rs->delta_disks = 0;
755
756 rs->ti = ti;
757 rs->raid_type = raid_type;
758 rs->stripe_cache_entries = 256;
759 rs->md.raid_disks = raid_devs;
760 rs->md.level = raid_type->level;
761 rs->md.new_level = rs->md.level;
762 rs->md.layout = raid_type->algorithm;
763 rs->md.new_layout = rs->md.layout;
764 rs->md.delta_disks = 0;
765 rs->md.recovery_cp = MaxSector;
766
767 for (i = 0; i < raid_devs; i++)
768 md_rdev_init(&rs->dev[i].rdev);
769
770
771
772
773
774
775
776
777
778
779 return rs;
780}
781
782
783static void raid_set_free(struct raid_set *rs)
784{
785 int i;
786
787 if (rs->journal_dev.dev) {
788 md_rdev_clear(&rs->journal_dev.rdev);
789 dm_put_device(rs->ti, rs->journal_dev.dev);
790 }
791
792 for (i = 0; i < rs->raid_disks; i++) {
793 if (rs->dev[i].meta_dev)
794 dm_put_device(rs->ti, rs->dev[i].meta_dev);
795 md_rdev_clear(&rs->dev[i].rdev);
796 if (rs->dev[i].data_dev)
797 dm_put_device(rs->ti, rs->dev[i].data_dev);
798 }
799
800 kfree(rs);
801}
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
820{
821 int i;
822 int rebuild = 0;
823 int metadata_available = 0;
824 int r = 0;
825 const char *arg;
826
827
828 arg = dm_shift_arg(as);
829 if (!arg)
830 return -EINVAL;
831
832 for (i = 0; i < rs->raid_disks; i++) {
833 rs->dev[i].rdev.raid_disk = i;
834
835 rs->dev[i].meta_dev = NULL;
836 rs->dev[i].data_dev = NULL;
837
838
839
840
841
842 rs->dev[i].rdev.data_offset = 0;
843 rs->dev[i].rdev.new_data_offset = 0;
844 rs->dev[i].rdev.mddev = &rs->md;
845
846 arg = dm_shift_arg(as);
847 if (!arg)
848 return -EINVAL;
849
850 if (strcmp(arg, "-")) {
851 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
852 &rs->dev[i].meta_dev);
853 if (r) {
854 rs->ti->error = "RAID metadata device lookup failure";
855 return r;
856 }
857
858 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
859 if (!rs->dev[i].rdev.sb_page) {
860 rs->ti->error = "Failed to allocate superblock page";
861 return -ENOMEM;
862 }
863 }
864
865 arg = dm_shift_arg(as);
866 if (!arg)
867 return -EINVAL;
868
869 if (!strcmp(arg, "-")) {
870 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
871 (!rs->dev[i].rdev.recovery_offset)) {
872 rs->ti->error = "Drive designated for rebuild not specified";
873 return -EINVAL;
874 }
875
876 if (rs->dev[i].meta_dev) {
877 rs->ti->error = "No data device supplied with metadata device";
878 return -EINVAL;
879 }
880
881 continue;
882 }
883
884 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
885 &rs->dev[i].data_dev);
886 if (r) {
887 rs->ti->error = "RAID device lookup failure";
888 return r;
889 }
890
891 if (rs->dev[i].meta_dev) {
892 metadata_available = 1;
893 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
894 }
895 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
896 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
897 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
898 rebuild++;
899 }
900
901 if (rs->journal_dev.dev)
902 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
903
904 if (metadata_available) {
905 rs->md.external = 0;
906 rs->md.persistent = 1;
907 rs->md.major_version = 2;
908 } else if (rebuild && !rs->md.recovery_cp) {
909
910
911
912
913
914
915
916
917
918
919
920 rs->ti->error = "Unable to rebuild drive while array is not in-sync";
921 return -EINVAL;
922 }
923
924 return 0;
925}
926
927
928
929
930
931
932
933
934
935
936
937static int validate_region_size(struct raid_set *rs, unsigned long region_size)
938{
939 unsigned long min_region_size = rs->ti->len / (1 << 21);
940
941 if (rs_is_raid0(rs))
942 return 0;
943
944 if (!region_size) {
945
946
947
948 if (min_region_size > (1 << 13)) {
949
950 region_size = roundup_pow_of_two(min_region_size);
951 DMINFO("Choosing default region size of %lu sectors",
952 region_size);
953 } else {
954 DMINFO("Choosing default region size of 4MiB");
955 region_size = 1 << 13;
956 }
957 } else {
958
959
960
961 if (region_size > rs->ti->len) {
962 rs->ti->error = "Supplied region size is too large";
963 return -EINVAL;
964 }
965
966 if (region_size < min_region_size) {
967 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
968 region_size, min_region_size);
969 rs->ti->error = "Supplied region size is too small";
970 return -EINVAL;
971 }
972
973 if (!is_power_of_2(region_size)) {
974 rs->ti->error = "Region size is not a power of 2";
975 return -EINVAL;
976 }
977
978 if (region_size < rs->md.chunk_sectors) {
979 rs->ti->error = "Region size is smaller than the chunk size";
980 return -EINVAL;
981 }
982 }
983
984
985
986
987 rs->md.bitmap_info.chunksize = to_bytes(region_size);
988
989 return 0;
990}
991
992
993
994
995
996
997
998
999
1000
1001static int validate_raid_redundancy(struct raid_set *rs)
1002{
1003 unsigned int i, rebuild_cnt = 0;
1004 unsigned int rebuilds_per_group = 0, copies;
1005 unsigned int group_size, last_group_start;
1006
1007 for (i = 0; i < rs->md.raid_disks; i++)
1008 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
1009 !rs->dev[i].rdev.sb_page)
1010 rebuild_cnt++;
1011
1012 switch (rs->md.level) {
1013 case 0:
1014 break;
1015 case 1:
1016 if (rebuild_cnt >= rs->md.raid_disks)
1017 goto too_many;
1018 break;
1019 case 4:
1020 case 5:
1021 case 6:
1022 if (rebuild_cnt > rs->raid_type->parity_devs)
1023 goto too_many;
1024 break;
1025 case 10:
1026 copies = raid10_md_layout_to_copies(rs->md.new_layout);
1027 if (copies < 2) {
1028 DMERR("Bogus raid10 data copies < 2!");
1029 return -EINVAL;
1030 }
1031
1032 if (rebuild_cnt < copies)
1033 break;
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 if (__is_raid10_near(rs->md.new_layout)) {
1050 for (i = 0; i < rs->md.raid_disks; i++) {
1051 if (!(i % copies))
1052 rebuilds_per_group = 0;
1053 if ((!rs->dev[i].rdev.sb_page ||
1054 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1055 (++rebuilds_per_group >= copies))
1056 goto too_many;
1057 }
1058 break;
1059 }
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 group_size = (rs->md.raid_disks / copies);
1074 last_group_start = (rs->md.raid_disks / group_size) - 1;
1075 last_group_start *= group_size;
1076 for (i = 0; i < rs->md.raid_disks; i++) {
1077 if (!(i % copies) && !(i > last_group_start))
1078 rebuilds_per_group = 0;
1079 if ((!rs->dev[i].rdev.sb_page ||
1080 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1081 (++rebuilds_per_group >= copies))
1082 goto too_many;
1083 }
1084 break;
1085 default:
1086 if (rebuild_cnt)
1087 return -EINVAL;
1088 }
1089
1090 return 0;
1091
1092too_many:
1093 return -EINVAL;
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1122 unsigned int num_raid_params)
1123{
1124 int value, raid10_format = ALGORITHM_RAID10_DEFAULT;
1125 unsigned int raid10_copies = 2;
1126 unsigned int i, write_mostly = 0;
1127 unsigned int region_size = 0;
1128 sector_t max_io_len;
1129 const char *arg, *key;
1130 struct raid_dev *rd;
1131 struct raid_type *rt = rs->raid_type;
1132
1133 arg = dm_shift_arg(as);
1134 num_raid_params--;
1135
1136 if (kstrtoint(arg, 10, &value) < 0) {
1137 rs->ti->error = "Bad numerical argument given for chunk_size";
1138 return -EINVAL;
1139 }
1140
1141
1142
1143
1144
1145 if (rt_is_raid1(rt)) {
1146 if (value)
1147 DMERR("Ignoring chunk size parameter for RAID 1");
1148 value = 0;
1149 } else if (!is_power_of_2(value)) {
1150 rs->ti->error = "Chunk size must be a power of 2";
1151 return -EINVAL;
1152 } else if (value < 8) {
1153 rs->ti->error = "Chunk size value is too small";
1154 return -EINVAL;
1155 }
1156
1157 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176 for (i = 0; i < rs->raid_disks; i++) {
1177 set_bit(In_sync, &rs->dev[i].rdev.flags);
1178 rs->dev[i].rdev.recovery_offset = MaxSector;
1179 }
1180
1181
1182
1183
1184 for (i = 0; i < num_raid_params; i++) {
1185 key = dm_shift_arg(as);
1186 if (!key) {
1187 rs->ti->error = "Not enough raid parameters given";
1188 return -EINVAL;
1189 }
1190
1191 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) {
1192 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1193 rs->ti->error = "Only one 'nosync' argument allowed";
1194 return -EINVAL;
1195 }
1196 continue;
1197 }
1198 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
1199 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) {
1200 rs->ti->error = "Only one 'sync' argument allowed";
1201 return -EINVAL;
1202 }
1203 continue;
1204 }
1205 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
1206 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1207 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed";
1208 return -EINVAL;
1209 }
1210 continue;
1211 }
1212
1213 arg = dm_shift_arg(as);
1214 i++;
1215 if (!arg) {
1216 rs->ti->error = "Wrong number of raid parameters given";
1217 return -EINVAL;
1218 }
1219
1220
1221
1222
1223
1224 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) {
1225 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
1226 rs->ti->error = "Only one 'raid10_format' argument pair allowed";
1227 return -EINVAL;
1228 }
1229 if (!rt_is_raid10(rt)) {
1230 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
1231 return -EINVAL;
1232 }
1233 raid10_format = raid10_name_to_format(arg);
1234 if (raid10_format < 0) {
1235 rs->ti->error = "Invalid 'raid10_format' value given";
1236 return raid10_format;
1237 }
1238 continue;
1239 }
1240
1241
1242 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV))) {
1243 int r;
1244 struct md_rdev *jdev;
1245
1246 if (test_and_set_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1247 rs->ti->error = "Only one raid4/5/6 set journaling device allowed";
1248 return -EINVAL;
1249 }
1250 if (!rt_is_raid456(rt)) {
1251 rs->ti->error = "'journal_dev' is an invalid parameter for this RAID type";
1252 return -EINVAL;
1253 }
1254 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
1255 &rs->journal_dev.dev);
1256 if (r) {
1257 rs->ti->error = "raid4/5/6 journal device lookup failure";
1258 return r;
1259 }
1260 jdev = &rs->journal_dev.rdev;
1261 md_rdev_init(jdev);
1262 jdev->mddev = &rs->md;
1263 jdev->bdev = rs->journal_dev.dev->bdev;
1264 jdev->sectors = to_sector(i_size_read(jdev->bdev->bd_inode));
1265 if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) {
1266 rs->ti->error = "No space for raid4/5/6 journal";
1267 return -ENOSPC;
1268 }
1269 rs->journal_dev.mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
1270 set_bit(Journal, &jdev->flags);
1271 continue;
1272 }
1273
1274
1275 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE))) {
1276 int r;
1277
1278 if (!test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1279 rs->ti->error = "raid4/5/6 'journal_mode' is invalid without 'journal_dev'";
1280 return -EINVAL;
1281 }
1282 if (test_and_set_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
1283 rs->ti->error = "Only one raid4/5/6 'journal_mode' argument allowed";
1284 return -EINVAL;
1285 }
1286 r = dm_raid_journal_mode_to_md(arg);
1287 if (r < 0) {
1288 rs->ti->error = "Invalid 'journal_mode' argument";
1289 return r;
1290 }
1291 rs->journal_dev.mode = r;
1292 continue;
1293 }
1294
1295
1296
1297
1298 if (kstrtoint(arg, 10, &value) < 0) {
1299 rs->ti->error = "Bad numerical argument given in raid params";
1300 return -EINVAL;
1301 }
1302
1303 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) {
1304
1305
1306
1307
1308
1309 if (!__within_range(value, 0, rs->raid_disks - 1)) {
1310 rs->ti->error = "Invalid rebuild index given";
1311 return -EINVAL;
1312 }
1313
1314 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) {
1315 rs->ti->error = "rebuild for this index already given";
1316 return -EINVAL;
1317 }
1318
1319 rd = rs->dev + value;
1320 clear_bit(In_sync, &rd->rdev.flags);
1321 clear_bit(Faulty, &rd->rdev.flags);
1322 rd->rdev.recovery_offset = 0;
1323 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags);
1324 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
1325 if (!rt_is_raid1(rt)) {
1326 rs->ti->error = "write_mostly option is only valid for RAID1";
1327 return -EINVAL;
1328 }
1329
1330 if (!__within_range(value, 0, rs->md.raid_disks - 1)) {
1331 rs->ti->error = "Invalid write_mostly index given";
1332 return -EINVAL;
1333 }
1334
1335 write_mostly++;
1336 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
1337 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
1338 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
1339 if (!rt_is_raid1(rt)) {
1340 rs->ti->error = "max_write_behind option is only valid for RAID1";
1341 return -EINVAL;
1342 }
1343
1344 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) {
1345 rs->ti->error = "Only one max_write_behind argument pair allowed";
1346 return -EINVAL;
1347 }
1348
1349
1350
1351
1352
1353 if (value < 0 || value / 2 > COUNTER_MAX) {
1354 rs->ti->error = "Max write-behind limit out of range";
1355 return -EINVAL;
1356 }
1357
1358 rs->md.bitmap_info.max_write_behind = value / 2;
1359 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
1360 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) {
1361 rs->ti->error = "Only one daemon_sleep argument pair allowed";
1362 return -EINVAL;
1363 }
1364 if (value < 0) {
1365 rs->ti->error = "daemon sleep period out of range";
1366 return -EINVAL;
1367 }
1368 rs->md.bitmap_info.daemon_sleep = value;
1369 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
1370
1371 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
1372 rs->ti->error = "Only one data_offset argument pair allowed";
1373 return -EINVAL;
1374 }
1375
1376 if (value < 0 ||
1377 (value && (value < MIN_FREE_RESHAPE_SPACE || value % to_sector(PAGE_SIZE)))) {
1378 rs->ti->error = "Bogus data_offset value";
1379 return -EINVAL;
1380 }
1381 rs->data_offset = value;
1382 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) {
1383
1384 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
1385 rs->ti->error = "Only one delta_disks argument pair allowed";
1386 return -EINVAL;
1387 }
1388
1389 if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) {
1390 rs->ti->error = "Too many delta_disk requested";
1391 return -EINVAL;
1392 }
1393
1394 rs->delta_disks = value;
1395 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) {
1396 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) {
1397 rs->ti->error = "Only one stripe_cache argument pair allowed";
1398 return -EINVAL;
1399 }
1400
1401 if (!rt_is_raid456(rt)) {
1402 rs->ti->error = "Inappropriate argument: stripe_cache";
1403 return -EINVAL;
1404 }
1405
1406 if (value < 0) {
1407 rs->ti->error = "Bogus stripe cache entries value";
1408 return -EINVAL;
1409 }
1410 rs->stripe_cache_entries = value;
1411 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
1412 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1413 rs->ti->error = "Only one min_recovery_rate argument pair allowed";
1414 return -EINVAL;
1415 }
1416
1417 if (value < 0) {
1418 rs->ti->error = "min_recovery_rate out of range";
1419 return -EINVAL;
1420 }
1421 rs->md.sync_speed_min = value;
1422 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
1423 if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) {
1424 rs->ti->error = "Only one max_recovery_rate argument pair allowed";
1425 return -EINVAL;
1426 }
1427
1428 if (value < 0) {
1429 rs->ti->error = "max_recovery_rate out of range";
1430 return -EINVAL;
1431 }
1432 rs->md.sync_speed_max = value;
1433 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) {
1434 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) {
1435 rs->ti->error = "Only one region_size argument pair allowed";
1436 return -EINVAL;
1437 }
1438
1439 region_size = value;
1440 rs->requested_bitmap_chunk_sectors = value;
1441 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) {
1442 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) {
1443 rs->ti->error = "Only one raid10_copies argument pair allowed";
1444 return -EINVAL;
1445 }
1446
1447 if (!__within_range(value, 2, rs->md.raid_disks)) {
1448 rs->ti->error = "Bad value for 'raid10_copies'";
1449 return -EINVAL;
1450 }
1451
1452 raid10_copies = value;
1453 } else {
1454 DMERR("Unable to parse RAID parameter: %s", key);
1455 rs->ti->error = "Unable to parse RAID parameter";
1456 return -EINVAL;
1457 }
1458 }
1459
1460 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) &&
1461 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1462 rs->ti->error = "sync and nosync are mutually exclusive";
1463 return -EINVAL;
1464 }
1465
1466 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) &&
1467 (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ||
1468 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) {
1469 rs->ti->error = "sync/nosync and rebuild are mutually exclusive";
1470 return -EINVAL;
1471 }
1472
1473 if (write_mostly >= rs->md.raid_disks) {
1474 rs->ti->error = "Can't set all raid1 devices to write_mostly";
1475 return -EINVAL;
1476 }
1477
1478 if (rs->md.sync_speed_max &&
1479 rs->md.sync_speed_min > rs->md.sync_speed_max) {
1480 rs->ti->error = "Bogus recovery rates";
1481 return -EINVAL;
1482 }
1483
1484 if (validate_region_size(rs, region_size))
1485 return -EINVAL;
1486
1487 if (rs->md.chunk_sectors)
1488 max_io_len = rs->md.chunk_sectors;
1489 else
1490 max_io_len = region_size;
1491
1492 if (dm_set_target_max_io_len(rs->ti, max_io_len))
1493 return -EINVAL;
1494
1495 if (rt_is_raid10(rt)) {
1496 if (raid10_copies > rs->md.raid_disks) {
1497 rs->ti->error = "Not enough devices to satisfy specification";
1498 return -EINVAL;
1499 }
1500
1501 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
1502 if (rs->md.new_layout < 0) {
1503 rs->ti->error = "Error getting raid10 format";
1504 return rs->md.new_layout;
1505 }
1506
1507 rt = get_raid_type_by_ll(10, rs->md.new_layout);
1508 if (!rt) {
1509 rs->ti->error = "Failed to recognize new raid10 layout";
1510 return -EINVAL;
1511 }
1512
1513 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
1514 rt->algorithm == ALGORITHM_RAID10_NEAR) &&
1515 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1516 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
1517 return -EINVAL;
1518 }
1519 }
1520
1521 rs->raid10_copies = raid10_copies;
1522
1523
1524 rs->md.persistent = 0;
1525 rs->md.external = 1;
1526
1527
1528 return rs_check_for_valid_flags(rs);
1529}
1530
1531
1532static int rs_set_raid456_stripe_cache(struct raid_set *rs)
1533{
1534 int r;
1535 struct r5conf *conf;
1536 struct mddev *mddev = &rs->md;
1537 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2;
1538 uint32_t nr_stripes = rs->stripe_cache_entries;
1539
1540 if (!rt_is_raid456(rs->raid_type)) {
1541 rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size";
1542 return -EINVAL;
1543 }
1544
1545 if (nr_stripes < min_stripes) {
1546 DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size",
1547 nr_stripes, min_stripes);
1548 nr_stripes = min_stripes;
1549 }
1550
1551 conf = mddev->private;
1552 if (!conf) {
1553 rs->ti->error = "Cannot change stripe_cache size on inactive RAID set";
1554 return -EINVAL;
1555 }
1556
1557
1558 if (conf->min_nr_stripes != nr_stripes) {
1559 r = raid5_set_cache_size(mddev, nr_stripes);
1560 if (r) {
1561 rs->ti->error = "Failed to set raid4/5/6 stripe cache size";
1562 return r;
1563 }
1564
1565 DMINFO("%u stripe cache entries", nr_stripes);
1566 }
1567
1568 return 0;
1569}
1570
1571
1572static unsigned int mddev_data_stripes(struct raid_set *rs)
1573{
1574 return rs->md.raid_disks - rs->raid_type->parity_devs;
1575}
1576
1577
1578static unsigned int rs_data_stripes(struct raid_set *rs)
1579{
1580 return rs->raid_disks - rs->raid_type->parity_devs;
1581}
1582
1583
1584
1585
1586
1587static sector_t __rdev_sectors(struct raid_set *rs)
1588{
1589 int i;
1590
1591 for (i = 0; i < rs->md.raid_disks; i++) {
1592 struct md_rdev *rdev = &rs->dev[i].rdev;
1593
1594 if (!test_bit(Journal, &rdev->flags) &&
1595 rdev->bdev && rdev->sectors)
1596 return rdev->sectors;
1597 }
1598
1599 return 0;
1600}
1601
1602
1603static int _check_data_dev_sectors(struct raid_set *rs)
1604{
1605 sector_t ds = ~0;
1606 struct md_rdev *rdev;
1607
1608 rdev_for_each(rdev, &rs->md)
1609 if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
1610 ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode)));
1611 if (ds < rs->md.dev_sectors) {
1612 rs->ti->error = "Component device(s) too small";
1613 return -EINVAL;
1614 }
1615 }
1616
1617 return 0;
1618}
1619
1620
1621static int rs_set_dev_and_array_sectors(struct raid_set *rs, sector_t sectors, bool use_mddev)
1622{
1623 int delta_disks;
1624 unsigned int data_stripes;
1625 sector_t array_sectors = sectors, dev_sectors = sectors;
1626 struct mddev *mddev = &rs->md;
1627
1628 if (use_mddev) {
1629 delta_disks = mddev->delta_disks;
1630 data_stripes = mddev_data_stripes(rs);
1631 } else {
1632 delta_disks = rs->delta_disks;
1633 data_stripes = rs_data_stripes(rs);
1634 }
1635
1636
1637 if (rt_is_raid1(rs->raid_type))
1638 ;
1639 else if (rt_is_raid10(rs->raid_type)) {
1640 if (rs->raid10_copies < 2 ||
1641 delta_disks < 0) {
1642 rs->ti->error = "Bogus raid10 data copies or delta disks";
1643 return -EINVAL;
1644 }
1645
1646 dev_sectors *= rs->raid10_copies;
1647 if (sector_div(dev_sectors, data_stripes))
1648 goto bad;
1649
1650 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1651 if (sector_div(array_sectors, rs->raid10_copies))
1652 goto bad;
1653
1654 } else if (sector_div(dev_sectors, data_stripes))
1655 goto bad;
1656
1657 else
1658
1659 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1660
1661 mddev->array_sectors = array_sectors;
1662 mddev->dev_sectors = dev_sectors;
1663 rs_set_rdev_sectors(rs);
1664
1665 return _check_data_dev_sectors(rs);
1666bad:
1667 rs->ti->error = "Target length not divisible by number of data devices";
1668 return -EINVAL;
1669}
1670
1671
1672static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1673{
1674
1675 if (rs_is_raid0(rs))
1676 rs->md.recovery_cp = MaxSector;
1677
1678
1679
1680
1681
1682 else if (rs_is_raid6(rs))
1683 rs->md.recovery_cp = dev_sectors;
1684
1685
1686
1687
1688 else
1689 rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
1690 ? MaxSector : dev_sectors;
1691}
1692
1693static void do_table_event(struct work_struct *ws)
1694{
1695 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
1696
1697 smp_rmb();
1698 if (!rs_is_reshaping(rs)) {
1699 if (rs_is_raid10(rs))
1700 rs_set_rdev_sectors(rs);
1701 rs_set_capacity(rs);
1702 }
1703 dm_table_event(rs->ti->table);
1704}
1705
1706
1707
1708
1709
1710
1711
1712static int rs_check_takeover(struct raid_set *rs)
1713{
1714 struct mddev *mddev = &rs->md;
1715 unsigned int near_copies;
1716
1717 if (rs->md.degraded) {
1718 rs->ti->error = "Can't takeover degraded raid set";
1719 return -EPERM;
1720 }
1721
1722 if (rs_is_reshaping(rs)) {
1723 rs->ti->error = "Can't takeover reshaping raid set";
1724 return -EPERM;
1725 }
1726
1727 switch (mddev->level) {
1728 case 0:
1729
1730 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1731 mddev->raid_disks == 1)
1732 return 0;
1733
1734
1735 if (mddev->new_level == 10 &&
1736 !(rs->raid_disks % mddev->raid_disks))
1737 return 0;
1738
1739
1740 if (__within_range(mddev->new_level, 4, 6) &&
1741 mddev->new_layout == ALGORITHM_PARITY_N &&
1742 mddev->raid_disks > 1)
1743 return 0;
1744
1745 break;
1746
1747 case 10:
1748
1749 if (__is_raid10_offset(mddev->layout))
1750 break;
1751
1752 near_copies = __raid10_near_copies(mddev->layout);
1753
1754
1755 if (mddev->new_level == 0) {
1756
1757 if (near_copies > 1 &&
1758 !(mddev->raid_disks % near_copies)) {
1759 mddev->raid_disks /= near_copies;
1760 mddev->delta_disks = mddev->raid_disks;
1761 return 0;
1762 }
1763
1764
1765 if (near_copies == 1 &&
1766 __raid10_far_copies(mddev->layout) > 1)
1767 return 0;
1768
1769 break;
1770 }
1771
1772
1773 if (mddev->new_level == 1 &&
1774 max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks)
1775 return 0;
1776
1777
1778 if (__within_range(mddev->new_level, 4, 5) &&
1779 mddev->raid_disks == 2)
1780 return 0;
1781 break;
1782
1783 case 1:
1784
1785 if (__within_range(mddev->new_level, 4, 5) &&
1786 mddev->raid_disks == 2) {
1787 mddev->degraded = 1;
1788 return 0;
1789 }
1790
1791
1792 if (mddev->new_level == 0 &&
1793 mddev->raid_disks == 1)
1794 return 0;
1795
1796
1797 if (mddev->new_level == 10)
1798 return 0;
1799 break;
1800
1801 case 4:
1802
1803 if (mddev->new_level == 0)
1804 return 0;
1805
1806
1807 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1808 mddev->raid_disks == 2)
1809 return 0;
1810
1811
1812 if (__within_range(mddev->new_level, 5, 6) &&
1813 mddev->layout == ALGORITHM_PARITY_N)
1814 return 0;
1815 break;
1816
1817 case 5:
1818
1819 if (mddev->new_level == 0 &&
1820 mddev->layout == ALGORITHM_PARITY_N)
1821 return 0;
1822
1823
1824 if (mddev->new_level == 4 &&
1825 mddev->layout == ALGORITHM_PARITY_N)
1826 return 0;
1827
1828
1829 if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) &&
1830 mddev->raid_disks == 2)
1831 return 0;
1832
1833
1834 if (mddev->new_level == 6 &&
1835 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1836 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6)))
1837 return 0;
1838 break;
1839
1840 case 6:
1841
1842 if (mddev->new_level == 0 &&
1843 mddev->layout == ALGORITHM_PARITY_N)
1844 return 0;
1845
1846
1847 if (mddev->new_level == 4 &&
1848 mddev->layout == ALGORITHM_PARITY_N)
1849 return 0;
1850
1851
1852 if (mddev->new_level == 5 &&
1853 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1854 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
1855 return 0;
1856
1857 default:
1858 break;
1859 }
1860
1861 rs->ti->error = "takeover not possible";
1862 return -EINVAL;
1863}
1864
1865
1866static bool rs_takeover_requested(struct raid_set *rs)
1867{
1868 return rs->md.new_level != rs->md.level;
1869}
1870
1871
1872static bool rs_reshape_requested(struct raid_set *rs)
1873{
1874 bool change;
1875 struct mddev *mddev = &rs->md;
1876
1877 if (rs_takeover_requested(rs))
1878 return false;
1879
1880 if (rs_is_raid0(rs))
1881 return false;
1882
1883 change = mddev->new_layout != mddev->layout ||
1884 mddev->new_chunk_sectors != mddev->chunk_sectors ||
1885 rs->delta_disks;
1886
1887
1888 if (rs_is_raid1(rs)) {
1889 if (rs->delta_disks)
1890 return !!rs->delta_disks;
1891
1892 return !change &&
1893 mddev->raid_disks != rs->raid_disks;
1894 }
1895
1896 if (rs_is_raid10(rs))
1897 return change &&
1898 !__is_raid10_far(mddev->new_layout) &&
1899 rs->delta_disks >= 0;
1900
1901 return change;
1902}
1903
1904
1905#define FEATURE_FLAG_SUPPORTS_V190 0x1
1906
1907
1908#define SB_FLAG_RESHAPE_ACTIVE 0x1
1909#define SB_FLAG_RESHAPE_BACKWARDS 0x2
1910
1911
1912
1913
1914
1915#define DM_RAID_MAGIC 0x64526D44
1916struct dm_raid_superblock {
1917 __le32 magic;
1918 __le32 compat_features;
1919
1920 __le32 num_devices;
1921 __le32 array_position;
1922
1923 __le64 events;
1924 __le64 failed_devices;
1925
1926
1927
1928
1929
1930
1931 __le64 disk_recovery_offset;
1932
1933
1934
1935
1936
1937 __le64 array_resync_offset;
1938
1939
1940
1941
1942 __le32 level;
1943 __le32 layout;
1944 __le32 stripe_sectors;
1945
1946
1947
1948
1949
1950
1951
1952 __le32 flags;
1953
1954
1955
1956
1957
1958 __le64 reshape_position;
1959
1960
1961
1962
1963 __le32 new_level;
1964 __le32 new_layout;
1965 __le32 new_stripe_sectors;
1966 __le32 delta_disks;
1967
1968 __le64 array_sectors;
1969
1970
1971
1972
1973
1974
1975
1976 __le64 data_offset;
1977 __le64 new_data_offset;
1978
1979 __le64 sectors;
1980
1981
1982
1983
1984
1985 __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1];
1986
1987 __le32 incompat_features;
1988
1989
1990} __packed;
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003static int rs_check_reshape(struct raid_set *rs)
2004{
2005 struct mddev *mddev = &rs->md;
2006
2007 if (!mddev->pers || !mddev->pers->check_reshape)
2008 rs->ti->error = "Reshape not supported";
2009 else if (mddev->degraded)
2010 rs->ti->error = "Can't reshape degraded raid set";
2011 else if (rs_is_recovering(rs))
2012 rs->ti->error = "Convert request on recovering raid set prohibited";
2013 else if (rs_is_reshaping(rs))
2014 rs->ti->error = "raid set already reshaping!";
2015 else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs)))
2016 rs->ti->error = "Reshaping only supported for raid1/4/5/6/10";
2017 else
2018 return 0;
2019
2020 return -EPERM;
2021}
2022
2023static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
2024{
2025 BUG_ON(!rdev->sb_page);
2026
2027 if (rdev->sb_loaded && !force_reload)
2028 return 0;
2029
2030 rdev->sb_loaded = 0;
2031
2032 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) {
2033 DMERR("Failed to read superblock of device at position %d",
2034 rdev->raid_disk);
2035 md_error(rdev->mddev, rdev);
2036 set_bit(Faulty, &rdev->flags);
2037 return -EIO;
2038 }
2039
2040 rdev->sb_loaded = 1;
2041
2042 return 0;
2043}
2044
2045static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2046{
2047 failed_devices[0] = le64_to_cpu(sb->failed_devices);
2048 memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices));
2049
2050 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2051 int i = ARRAY_SIZE(sb->extended_failed_devices);
2052
2053 while (i--)
2054 failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]);
2055 }
2056}
2057
2058static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2059{
2060 int i = ARRAY_SIZE(sb->extended_failed_devices);
2061
2062 sb->failed_devices = cpu_to_le64(failed_devices[0]);
2063 while (i--)
2064 sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]);
2065}
2066
2067
2068
2069
2070
2071
2072static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
2073{
2074 bool update_failed_devices = false;
2075 unsigned int i;
2076 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2077 struct dm_raid_superblock *sb;
2078 struct raid_set *rs = container_of(mddev, struct raid_set, md);
2079
2080
2081 if (!rdev->meta_bdev)
2082 return;
2083
2084 BUG_ON(!rdev->sb_page);
2085
2086 sb = page_address(rdev->sb_page);
2087
2088 sb_retrieve_failed_devices(sb, failed_devices);
2089
2090 for (i = 0; i < rs->raid_disks; i++)
2091 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) {
2092 update_failed_devices = true;
2093 set_bit(i, (void *) failed_devices);
2094 }
2095
2096 if (update_failed_devices)
2097 sb_update_failed_devices(sb, failed_devices);
2098
2099 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
2100 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2101
2102 sb->num_devices = cpu_to_le32(mddev->raid_disks);
2103 sb->array_position = cpu_to_le32(rdev->raid_disk);
2104
2105 sb->events = cpu_to_le64(mddev->events);
2106
2107 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
2108 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
2109
2110 sb->level = cpu_to_le32(mddev->level);
2111 sb->layout = cpu_to_le32(mddev->layout);
2112 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
2113
2114
2115
2116
2117
2118
2119 sb->new_level = cpu_to_le32(mddev->new_level);
2120 sb->new_layout = cpu_to_le32(mddev->new_layout);
2121 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
2122
2123 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2124
2125 smp_rmb();
2126 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2127 if (le64_to_cpu(sb->reshape_position) != MaxSector) {
2128
2129 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE);
2130
2131 if (mddev->delta_disks < 0 || mddev->reshape_backwards)
2132 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS);
2133 } else {
2134
2135 sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS));
2136 }
2137
2138 sb->array_sectors = cpu_to_le64(mddev->array_sectors);
2139 sb->data_offset = cpu_to_le64(rdev->data_offset);
2140 sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
2141 sb->sectors = cpu_to_le64(rdev->sectors);
2142 sb->incompat_features = cpu_to_le32(0);
2143
2144
2145 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
2146}
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
2157{
2158 int r;
2159 struct dm_raid_superblock *sb;
2160 struct dm_raid_superblock *refsb;
2161 uint64_t events_sb, events_refsb;
2162
2163 r = read_disk_sb(rdev, rdev->sb_size, false);
2164 if (r)
2165 return r;
2166
2167 sb = page_address(rdev->sb_page);
2168
2169
2170
2171
2172
2173
2174 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
2175 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
2176 super_sync(rdev->mddev, rdev);
2177
2178 set_bit(FirstUse, &rdev->flags);
2179 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2180
2181
2182 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
2183
2184
2185 return refdev ? 0 : 1;
2186 }
2187
2188 if (!refdev)
2189 return 1;
2190
2191 events_sb = le64_to_cpu(sb->events);
2192
2193 refsb = page_address(refdev->sb_page);
2194 events_refsb = le64_to_cpu(refsb->events);
2195
2196 return (events_sb > events_refsb) ? 1 : 0;
2197}
2198
2199static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2200{
2201 int role;
2202 unsigned int d;
2203 struct mddev *mddev = &rs->md;
2204 uint64_t events_sb;
2205 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2206 struct dm_raid_superblock *sb;
2207 uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0;
2208 struct md_rdev *r;
2209 struct dm_raid_superblock *sb2;
2210
2211 sb = page_address(rdev->sb_page);
2212 events_sb = le64_to_cpu(sb->events);
2213
2214
2215
2216
2217 mddev->events = events_sb ? : 1;
2218
2219 mddev->reshape_position = MaxSector;
2220
2221 mddev->raid_disks = le32_to_cpu(sb->num_devices);
2222 mddev->level = le32_to_cpu(sb->level);
2223 mddev->layout = le32_to_cpu(sb->layout);
2224 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
2225
2226
2227
2228
2229
2230 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2231
2232 mddev->new_level = le32_to_cpu(sb->new_level);
2233 mddev->new_layout = le32_to_cpu(sb->new_layout);
2234 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
2235 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
2236 mddev->array_sectors = le64_to_cpu(sb->array_sectors);
2237
2238
2239 if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) {
2240 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
2241 DMERR("Reshape requested but raid set is still reshaping");
2242 return -EINVAL;
2243 }
2244
2245 if (mddev->delta_disks < 0 ||
2246 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS)))
2247 mddev->reshape_backwards = 1;
2248 else
2249 mddev->reshape_backwards = 0;
2250
2251 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
2252 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout);
2253 }
2254
2255 } else {
2256
2257
2258
2259 struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
2260 struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
2261
2262 if (rs_takeover_requested(rs)) {
2263 if (rt_cur && rt_new)
2264 DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)",
2265 rt_cur->name, rt_new->name);
2266 else
2267 DMERR("Takeover raid sets not yet supported by metadata. (raid level change)");
2268 return -EINVAL;
2269 } else if (rs_reshape_requested(rs)) {
2270 DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)");
2271 if (mddev->layout != mddev->new_layout) {
2272 if (rt_cur && rt_new)
2273 DMERR(" current layout %s vs new layout %s",
2274 rt_cur->name, rt_new->name);
2275 else
2276 DMERR(" current layout 0x%X vs new layout 0x%X",
2277 le32_to_cpu(sb->layout), mddev->new_layout);
2278 }
2279 if (mddev->chunk_sectors != mddev->new_chunk_sectors)
2280 DMERR(" current stripe sectors %u vs new stripe sectors %u",
2281 mddev->chunk_sectors, mddev->new_chunk_sectors);
2282 if (rs->delta_disks)
2283 DMERR(" current %u disks vs new %u disks",
2284 mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
2285 if (rs_is_raid10(rs)) {
2286 DMERR(" Old layout: %s w/ %u copies",
2287 raid10_md_layout_to_format(mddev->layout),
2288 raid10_md_layout_to_copies(mddev->layout));
2289 DMERR(" New layout: %s w/ %u copies",
2290 raid10_md_layout_to_format(mddev->new_layout),
2291 raid10_md_layout_to_copies(mddev->new_layout));
2292 }
2293 return -EINVAL;
2294 }
2295
2296 DMINFO("Discovered old metadata format; upgrading to extended metadata format");
2297 }
2298
2299 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
2300 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317 d = 0;
2318 rdev_for_each(r, mddev) {
2319 if (test_bit(Journal, &rdev->flags))
2320 continue;
2321
2322 if (test_bit(FirstUse, &r->flags))
2323 new_devs++;
2324
2325 if (!test_bit(In_sync, &r->flags)) {
2326 DMINFO("Device %d specified for rebuild; clearing superblock",
2327 r->raid_disk);
2328 rebuilds++;
2329
2330 if (test_bit(FirstUse, &r->flags))
2331 rebuild_and_new++;
2332 }
2333
2334 d++;
2335 }
2336
2337 if (new_devs == rs->raid_disks || !rebuilds) {
2338
2339 if (new_devs == rs->raid_disks) {
2340 DMINFO("Superblocks created for new raid set");
2341 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2342 } else if (new_devs != rebuilds &&
2343 new_devs != rs->delta_disks) {
2344 DMERR("New device injected into existing raid set without "
2345 "'delta_disks' or 'rebuild' parameter specified");
2346 return -EINVAL;
2347 }
2348 } else if (new_devs && new_devs != rebuilds) {
2349 DMERR("%u 'rebuild' devices cannot be injected into"
2350 " a raid set with %u other first-time devices",
2351 rebuilds, new_devs);
2352 return -EINVAL;
2353 } else if (rebuilds) {
2354 if (rebuild_and_new && rebuilds != rebuild_and_new) {
2355 DMERR("new device%s provided without 'rebuild'",
2356 new_devs > 1 ? "s" : "");
2357 return -EINVAL;
2358 } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
2359 DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
2360 (unsigned long long) mddev->recovery_cp);
2361 return -EINVAL;
2362 } else if (rs_is_reshaping(rs)) {
2363 DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
2364 (unsigned long long) mddev->reshape_position);
2365 return -EINVAL;
2366 }
2367 }
2368
2369
2370
2371
2372
2373 sb_retrieve_failed_devices(sb, failed_devices);
2374 rdev_for_each(r, mddev) {
2375 if (test_bit(Journal, &rdev->flags) ||
2376 !r->sb_page)
2377 continue;
2378 sb2 = page_address(r->sb_page);
2379 sb2->failed_devices = 0;
2380 memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices));
2381
2382
2383
2384
2385 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
2386 role = le32_to_cpu(sb2->array_position);
2387 if (role < 0)
2388 continue;
2389
2390 if (role != r->raid_disk) {
2391 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
2392 if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
2393 rs->raid_disks % rs->raid10_copies) {
2394 rs->ti->error =
2395 "Cannot change raid10 near set to odd # of devices!";
2396 return -EINVAL;
2397 }
2398
2399 sb2->array_position = cpu_to_le32(r->raid_disk);
2400
2401 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) &&
2402 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) &&
2403 !rt_is_raid1(rs->raid_type)) {
2404 rs->ti->error = "Cannot change device positions in raid set";
2405 return -EINVAL;
2406 }
2407
2408 DMINFO("raid device #%d now at position #%d", role, r->raid_disk);
2409 }
2410
2411
2412
2413
2414
2415 if (test_bit(role, (void *) failed_devices))
2416 set_bit(Faulty, &r->flags);
2417 }
2418 }
2419
2420 return 0;
2421}
2422
2423static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2424{
2425 struct mddev *mddev = &rs->md;
2426 struct dm_raid_superblock *sb;
2427
2428 if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0)
2429 return 0;
2430
2431 sb = page_address(rdev->sb_page);
2432
2433
2434
2435
2436
2437 if (!mddev->events && super_init_validation(rs, rdev))
2438 return -EINVAL;
2439
2440 if (le32_to_cpu(sb->compat_features) &&
2441 le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
2442 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
2443 return -EINVAL;
2444 }
2445
2446 if (sb->incompat_features) {
2447 rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet";
2448 return -EINVAL;
2449 }
2450
2451
2452 mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
2453 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
2454
2455 if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
2456
2457
2458
2459
2460
2461 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
2462 rdev->sectors = le64_to_cpu(sb->sectors);
2463
2464 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
2465 if (rdev->recovery_offset == MaxSector)
2466 set_bit(In_sync, &rdev->flags);
2467
2468
2469
2470
2471 else if (!rs_is_reshaping(rs))
2472 clear_bit(In_sync, &rdev->flags);
2473 }
2474
2475
2476
2477
2478 if (test_and_clear_bit(Faulty, &rdev->flags)) {
2479 rdev->recovery_offset = 0;
2480 clear_bit(In_sync, &rdev->flags);
2481 rdev->saved_raid_disk = rdev->raid_disk;
2482 }
2483
2484
2485 rdev->data_offset = le64_to_cpu(sb->data_offset);
2486 rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
2487
2488 return 0;
2489}
2490
2491
2492
2493
2494static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2495{
2496 int r;
2497 struct md_rdev *rdev, *freshest;
2498 struct mddev *mddev = &rs->md;
2499
2500 freshest = NULL;
2501 rdev_for_each(rdev, mddev) {
2502 if (test_bit(Journal, &rdev->flags))
2503 continue;
2504
2505 if (!rdev->meta_bdev)
2506 continue;
2507
2508
2509 rdev->sb_start = 0;
2510 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
2511 if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
2512 DMERR("superblock size of a logical block is no longer valid");
2513 return -EINVAL;
2514 }
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
2526 continue;
2527
2528 r = super_load(rdev, freshest);
2529
2530 switch (r) {
2531 case 1:
2532 freshest = rdev;
2533 break;
2534 case 0:
2535 break;
2536 default:
2537
2538
2539
2540
2541
2542 if (rs_is_raid0(rs))
2543 continue;
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553 rdev->raid_disk = rdev->saved_raid_disk = -1;
2554 break;
2555 }
2556 }
2557
2558 if (!freshest)
2559 return 0;
2560
2561
2562
2563
2564
2565 rs->ti->error = "Unable to assemble array: Invalid superblocks";
2566 if (super_validate(rs, freshest))
2567 return -EINVAL;
2568
2569 if (validate_raid_redundancy(rs)) {
2570 rs->ti->error = "Insufficient redundancy to activate array";
2571 return -EINVAL;
2572 }
2573
2574 rdev_for_each(rdev, mddev)
2575 if (!test_bit(Journal, &rdev->flags) &&
2576 rdev != freshest &&
2577 super_validate(rs, rdev))
2578 return -EINVAL;
2579 return 0;
2580}
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590static int rs_adjust_data_offsets(struct raid_set *rs)
2591{
2592 sector_t data_offset = 0, new_data_offset = 0;
2593 struct md_rdev *rdev;
2594
2595
2596 if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
2597 if (!rs_is_reshapable(rs))
2598 goto out;
2599
2600 return 0;
2601 }
2602
2603
2604 rdev = &rs->dev[0].rdev;
2605
2606 if (rs->delta_disks < 0) {
2607
2608
2609
2610
2611
2612
2613
2614
2615 data_offset = 0;
2616 new_data_offset = rs->data_offset;
2617
2618 } else if (rs->delta_disks > 0) {
2619
2620
2621
2622
2623
2624
2625
2626
2627 data_offset = rs->data_offset;
2628 new_data_offset = 0;
2629
2630 } else {
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649 data_offset = rs->data_offset ? rdev->data_offset : 0;
2650 new_data_offset = data_offset ? 0 : rs->data_offset;
2651 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2652 }
2653
2654
2655
2656
2657 if (rs->data_offset &&
2658 to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
2659 rs->ti->error = data_offset ? "No space for forward reshape" :
2660 "No space for backward reshape";
2661 return -ENOSPC;
2662 }
2663out:
2664
2665
2666
2667
2668 if (rs->md.recovery_cp < rs->md.dev_sectors)
2669 rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
2670
2671
2672 rdev_for_each(rdev, &rs->md) {
2673 if (!test_bit(Journal, &rdev->flags)) {
2674 rdev->data_offset = data_offset;
2675 rdev->new_data_offset = new_data_offset;
2676 }
2677 }
2678
2679 return 0;
2680}
2681
2682
2683static void __reorder_raid_disk_indexes(struct raid_set *rs)
2684{
2685 int i = 0;
2686 struct md_rdev *rdev;
2687
2688 rdev_for_each(rdev, &rs->md) {
2689 if (!test_bit(Journal, &rdev->flags)) {
2690 rdev->raid_disk = i++;
2691 rdev->saved_raid_disk = rdev->new_raid_disk = -1;
2692 }
2693 }
2694}
2695
2696
2697
2698
2699static int rs_setup_takeover(struct raid_set *rs)
2700{
2701 struct mddev *mddev = &rs->md;
2702 struct md_rdev *rdev;
2703 unsigned int d = mddev->raid_disks = rs->raid_disks;
2704 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
2705
2706 if (rt_is_raid10(rs->raid_type)) {
2707 if (rs_is_raid0(rs)) {
2708
2709 __reorder_raid_disk_indexes(rs);
2710
2711
2712 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
2713 rs->raid10_copies);
2714 } else if (rs_is_raid1(rs))
2715
2716 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2717 rs->raid_disks);
2718 else
2719 return -EINVAL;
2720
2721 }
2722
2723 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2724 mddev->recovery_cp = MaxSector;
2725
2726 while (d--) {
2727 rdev = &rs->dev[d].rdev;
2728
2729 if (test_bit(d, (void *) rs->rebuild_disks)) {
2730 clear_bit(In_sync, &rdev->flags);
2731 clear_bit(Faulty, &rdev->flags);
2732 mddev->recovery_cp = rdev->recovery_offset = 0;
2733
2734 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2735 }
2736
2737 rdev->new_data_offset = new_data_offset;
2738 }
2739
2740 return 0;
2741}
2742
2743
2744static int rs_prepare_reshape(struct raid_set *rs)
2745{
2746 bool reshape;
2747 struct mddev *mddev = &rs->md;
2748
2749 if (rs_is_raid10(rs)) {
2750 if (rs->raid_disks != mddev->raid_disks &&
2751 __is_raid10_near(mddev->layout) &&
2752 rs->raid10_copies &&
2753 rs->raid10_copies != __raid10_near_copies(mddev->layout)) {
2754
2755
2756
2757
2758
2759
2760 if (rs->raid_disks % rs->raid10_copies) {
2761 rs->ti->error = "Can't reshape raid10 mirror groups";
2762 return -EINVAL;
2763 }
2764
2765
2766 __reorder_raid_disk_indexes(rs);
2767 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2768 rs->raid10_copies);
2769 mddev->new_layout = mddev->layout;
2770 reshape = false;
2771 } else
2772 reshape = true;
2773
2774 } else if (rs_is_raid456(rs))
2775 reshape = true;
2776
2777 else if (rs_is_raid1(rs)) {
2778 if (rs->delta_disks) {
2779
2780 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks;
2781 reshape = true;
2782 } else {
2783
2784 mddev->raid_disks = rs->raid_disks;
2785 reshape = false;
2786 }
2787 } else {
2788 rs->ti->error = "Called with bogus raid type";
2789 return -EINVAL;
2790 }
2791
2792 if (reshape) {
2793 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
2794 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2795 } else if (mddev->raid_disks < rs->raid_disks)
2796
2797 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2798
2799 return 0;
2800}
2801
2802
2803static sector_t _get_reshape_sectors(struct raid_set *rs)
2804{
2805 struct md_rdev *rdev;
2806 sector_t reshape_sectors = 0;
2807
2808 rdev_for_each(rdev, &rs->md)
2809 if (!test_bit(Journal, &rdev->flags)) {
2810 reshape_sectors = (rdev->data_offset > rdev->new_data_offset) ?
2811 rdev->data_offset - rdev->new_data_offset :
2812 rdev->new_data_offset - rdev->data_offset;
2813 break;
2814 }
2815
2816 return max(reshape_sectors, (sector_t) rs->data_offset);
2817}
2818
2819
2820
2821
2822
2823
2824
2825
2826static int rs_setup_reshape(struct raid_set *rs)
2827{
2828 int r = 0;
2829 unsigned int cur_raid_devs, d;
2830 sector_t reshape_sectors = _get_reshape_sectors(rs);
2831 struct mddev *mddev = &rs->md;
2832 struct md_rdev *rdev;
2833
2834 mddev->delta_disks = rs->delta_disks;
2835 cur_raid_devs = mddev->raid_disks;
2836
2837
2838 if (mddev->delta_disks &&
2839 mddev->layout != mddev->new_layout) {
2840 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks);
2841 mddev->new_layout = mddev->layout;
2842 }
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867 if (rs->delta_disks > 0) {
2868
2869 for (d = cur_raid_devs; d < rs->raid_disks; d++) {
2870 rdev = &rs->dev[d].rdev;
2871 clear_bit(In_sync, &rdev->flags);
2872
2873
2874
2875
2876
2877 rdev->saved_raid_disk = -1;
2878 rdev->raid_disk = d;
2879
2880 rdev->sectors = mddev->dev_sectors;
2881 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
2882 }
2883
2884 mddev->reshape_backwards = 0;
2885
2886
2887 } else if (rs->delta_disks < 0) {
2888 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, true);
2889 mddev->reshape_backwards = 1;
2890
2891
2892 } else {
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
2915 }
2916
2917
2918
2919
2920
2921 if (!mddev->reshape_backwards)
2922 rdev_for_each(rdev, &rs->md)
2923 if (!test_bit(Journal, &rdev->flags))
2924 rdev->sectors += reshape_sectors;
2925
2926 return r;
2927}
2928
2929
2930
2931
2932
2933static void configure_discard_support(struct raid_set *rs)
2934{
2935 int i;
2936 bool raid456;
2937 struct dm_target *ti = rs->ti;
2938
2939
2940
2941
2942 raid456 = rs_is_raid456(rs);
2943
2944 for (i = 0; i < rs->raid_disks; i++) {
2945 struct request_queue *q;
2946
2947 if (!rs->dev[i].rdev.bdev)
2948 continue;
2949
2950 q = bdev_get_queue(rs->dev[i].rdev.bdev);
2951 if (!q || !blk_queue_discard(q))
2952 return;
2953
2954 if (raid456) {
2955 if (!devices_handle_discard_safely) {
2956 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
2957 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
2958 return;
2959 }
2960 }
2961 }
2962
2963 ti->num_discard_bios = 1;
2964}
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2980{
2981 int r;
2982 bool resize = false;
2983 struct raid_type *rt;
2984 unsigned int num_raid_params, num_raid_devs;
2985 sector_t sb_array_sectors, rdev_sectors, reshape_sectors;
2986 struct raid_set *rs = NULL;
2987 const char *arg;
2988 struct rs_layout rs_layout;
2989 struct dm_arg_set as = { argc, argv }, as_nrd;
2990 struct dm_arg _args[] = {
2991 { 0, as.argc, "Cannot understand number of raid parameters" },
2992 { 1, 254, "Cannot understand number of raid devices parameters" }
2993 };
2994
2995 arg = dm_shift_arg(&as);
2996 if (!arg) {
2997 ti->error = "No arguments";
2998 return -EINVAL;
2999 }
3000
3001 rt = get_raid_type(arg);
3002 if (!rt) {
3003 ti->error = "Unrecognised raid_type";
3004 return -EINVAL;
3005 }
3006
3007
3008 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
3009 return -EINVAL;
3010
3011
3012 as_nrd = as;
3013 dm_consume_args(&as_nrd, num_raid_params);
3014 _args[1].max = (as_nrd.argc - 1) / 2;
3015 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
3016 return -EINVAL;
3017
3018 if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) {
3019 ti->error = "Invalid number of supplied raid devices";
3020 return -EINVAL;
3021 }
3022
3023 rs = raid_set_alloc(ti, rt, num_raid_devs);
3024 if (IS_ERR(rs))
3025 return PTR_ERR(rs);
3026
3027 r = parse_raid_params(rs, &as, num_raid_params);
3028 if (r)
3029 goto bad;
3030
3031 r = parse_dev_params(rs, &as);
3032 if (r)
3033 goto bad;
3034
3035 rs->md.sync_super = super_sync;
3036
3037
3038
3039
3040
3041
3042
3043 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
3044 if (r)
3045 goto bad;
3046
3047
3048 rs->array_sectors = rs->md.array_sectors;
3049 rs->dev_sectors = rs->md.dev_sectors;
3050
3051
3052
3053
3054
3055
3056 rs_config_backup(rs, &rs_layout);
3057
3058 r = analyse_superblocks(ti, rs);
3059 if (r)
3060 goto bad;
3061
3062
3063 sb_array_sectors = rs->md.array_sectors;
3064 rdev_sectors = __rdev_sectors(rs);
3065 if (!rdev_sectors) {
3066 ti->error = "Invalid rdev size";
3067 r = -EINVAL;
3068 goto bad;
3069 }
3070
3071
3072 reshape_sectors = _get_reshape_sectors(rs);
3073 if (rs->dev_sectors != rdev_sectors) {
3074 resize = (rs->dev_sectors != rdev_sectors - reshape_sectors);
3075 if (rs->dev_sectors > rdev_sectors - reshape_sectors)
3076 set_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
3077 }
3078
3079 INIT_WORK(&rs->md.event_work, do_table_event);
3080 ti->private = rs;
3081 ti->num_flush_bios = 1;
3082
3083
3084 rs_config_restore(rs, &rs_layout);
3085
3086
3087
3088
3089
3090
3091
3092 if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) {
3093
3094 if (rs_is_raid6(rs) &&
3095 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
3096 ti->error = "'nosync' not allowed for new raid6 set";
3097 r = -EINVAL;
3098 goto bad;
3099 }
3100 rs_setup_recovery(rs, 0);
3101 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3102 rs_set_new(rs);
3103 } else if (rs_is_recovering(rs)) {
3104
3105 goto size_check;
3106 } else if (rs_is_reshaping(rs)) {
3107
3108 if (resize) {
3109 ti->error = "Can't resize a reshaping raid set";
3110 r = -EPERM;
3111 goto bad;
3112 }
3113
3114 } else if (rs_takeover_requested(rs)) {
3115 if (rs_is_reshaping(rs)) {
3116 ti->error = "Can't takeover a reshaping raid set";
3117 r = -EPERM;
3118 goto bad;
3119 }
3120
3121
3122 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3123 ti->error = "Can't takeover a journaled raid4/5/6 set";
3124 r = -EPERM;
3125 goto bad;
3126 }
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136 r = rs_check_takeover(rs);
3137 if (r)
3138 goto bad;
3139
3140 r = rs_setup_takeover(rs);
3141 if (r)
3142 goto bad;
3143
3144 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3145
3146 rs_setup_recovery(rs, MaxSector);
3147 rs_set_new(rs);
3148 } else if (rs_reshape_requested(rs)) {
3149
3150 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
3151
3152
3153
3154
3155
3156
3157
3158 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3159 ti->error = "Can't reshape a journaled raid4/5/6 set";
3160 r = -EPERM;
3161 goto bad;
3162 }
3163
3164
3165 if (reshape_sectors || rs_is_raid1(rs)) {
3166
3167
3168
3169
3170
3171
3172
3173 r = rs_prepare_reshape(rs);
3174 if (r)
3175 goto bad;
3176
3177
3178 rs_setup_recovery(rs, MaxSector);
3179 }
3180 rs_set_cur(rs);
3181 } else {
3182size_check:
3183
3184 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3185 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
3186 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3187 rs_setup_recovery(rs, MaxSector);
3188 } else if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
3189
3190
3191
3192
3193 r = rs_set_dev_and_array_sectors(rs, sb_array_sectors, false);
3194 if (r)
3195 goto bad;
3196
3197 rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors);
3198 } else {
3199
3200 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
3201 if (r)
3202 goto bad;
3203
3204 if (sb_array_sectors > rs->array_sectors)
3205 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3206 }
3207 rs_set_cur(rs);
3208 }
3209
3210
3211 r = rs_adjust_data_offsets(rs);
3212 if (r)
3213 goto bad;
3214
3215
3216 rs->md.ro = 1;
3217 rs->md.in_sync = 1;
3218
3219
3220 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
3221
3222
3223 mddev_lock_nointr(&rs->md);
3224 r = md_run(&rs->md);
3225 rs->md.in_sync = 0;
3226 if (r) {
3227 ti->error = "Failed to run raid array";
3228 mddev_unlock(&rs->md);
3229 goto bad;
3230 }
3231
3232 r = md_start(&rs->md);
3233
3234 if (r) {
3235 ti->error = "Failed to start raid array";
3236 mddev_unlock(&rs->md);
3237 goto bad_md_start;
3238 }
3239
3240
3241 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
3242 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
3243 if (r) {
3244 ti->error = "Failed to set raid4/5/6 journal mode";
3245 mddev_unlock(&rs->md);
3246 goto bad_journal_mode_set;
3247 }
3248 }
3249
3250 mddev_suspend(&rs->md);
3251 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3252
3253
3254 if (rs_is_raid456(rs)) {
3255 r = rs_set_raid456_stripe_cache(rs);
3256 if (r)
3257 goto bad_stripe_cache;
3258 }
3259
3260
3261 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
3262 r = rs_check_reshape(rs);
3263 if (r)
3264 goto bad_check_reshape;
3265
3266
3267 rs_config_restore(rs, &rs_layout);
3268
3269 if (rs->md.pers->start_reshape) {
3270 r = rs->md.pers->check_reshape(&rs->md);
3271 if (r) {
3272 ti->error = "Reshape check failed";
3273 goto bad_check_reshape;
3274 }
3275 }
3276 }
3277
3278
3279 configure_discard_support(rs);
3280
3281 mddev_unlock(&rs->md);
3282 return 0;
3283
3284bad_md_start:
3285bad_journal_mode_set:
3286bad_stripe_cache:
3287bad_check_reshape:
3288 md_stop(&rs->md);
3289bad:
3290 raid_set_free(rs);
3291
3292 return r;
3293}
3294
3295static void raid_dtr(struct dm_target *ti)
3296{
3297 struct raid_set *rs = ti->private;
3298
3299 md_stop(&rs->md);
3300 raid_set_free(rs);
3301}
3302
3303static int raid_map(struct dm_target *ti, struct bio *bio)
3304{
3305 struct raid_set *rs = ti->private;
3306 struct mddev *mddev = &rs->md;
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316 if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
3317 return DM_MAPIO_REQUEUE;
3318
3319 md_handle_request(mddev, bio);
3320
3321 return DM_MAPIO_SUBMITTED;
3322}
3323
3324
3325enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
3326static const char *sync_str(enum sync_state state)
3327{
3328
3329 static const char *sync_strs[] = {
3330 "frozen",
3331 "reshape",
3332 "resync",
3333 "check",
3334 "repair",
3335 "recover",
3336 "idle"
3337 };
3338
3339 return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
3340};
3341
3342
3343static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
3344{
3345 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
3346 return st_frozen;
3347
3348
3349 if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
3350 (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
3351 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
3352 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
3353 return st_reshape;
3354
3355 if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
3356 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
3357 return st_resync;
3358 if (test_bit(MD_RECOVERY_CHECK, &recovery))
3359 return st_check;
3360 return st_repair;
3361 }
3362
3363 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3364 return st_recover;
3365
3366 if (mddev->reshape_position != MaxSector)
3367 return st_reshape;
3368 }
3369
3370 return st_idle;
3371}
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
3384{
3385 if (!rdev->bdev)
3386 return "-";
3387 else if (test_bit(Faulty, &rdev->flags))
3388 return "D";
3389 else if (test_bit(Journal, &rdev->flags))
3390 return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a";
3391 else if (test_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags) ||
3392 (!test_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags) &&
3393 !test_bit(In_sync, &rdev->flags)))
3394 return "a";
3395 else
3396 return "A";
3397}
3398
3399
3400static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3401 enum sync_state state, sector_t resync_max_sectors)
3402{
3403 sector_t r;
3404 struct mddev *mddev = &rs->md;
3405
3406 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3407 clear_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3408
3409 if (rs_is_raid0(rs)) {
3410 r = resync_max_sectors;
3411 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3412
3413 } else {
3414 if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
3415 r = mddev->recovery_cp;
3416 else
3417 r = mddev->curr_resync_completed;
3418
3419 if (state == st_idle && r >= resync_max_sectors) {
3420
3421
3422
3423
3424 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3425 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3426
3427 } else if (state == st_recover)
3428
3429
3430
3431
3432
3433
3434 ;
3435
3436 else if (state == st_resync || state == st_reshape)
3437
3438
3439
3440
3441
3442 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3443
3444 else if (state == st_check || state == st_repair)
3445
3446
3447
3448
3449
3450 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3451
3452 else if (test_bit(MD_RECOVERY_NEEDED, &recovery))
3453
3454
3455
3456
3457 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3458
3459 else {
3460
3461
3462
3463
3464
3465
3466 struct md_rdev *rdev;
3467
3468 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3469 rdev_for_each(rdev, mddev)
3470 if (!test_bit(Journal, &rdev->flags) &&
3471 !test_bit(In_sync, &rdev->flags)) {
3472 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3473 break;
3474 }
3475 }
3476 }
3477
3478 return min(r, resync_max_sectors);
3479}
3480
3481
3482static const char *__get_dev_name(struct dm_dev *dev)
3483{
3484 return dev ? dev->name : "-";
3485}
3486
3487static void raid_status(struct dm_target *ti, status_type_t type,
3488 unsigned int status_flags, char *result, unsigned int maxlen)
3489{
3490 struct raid_set *rs = ti->private;
3491 struct mddev *mddev = &rs->md;
3492 struct r5conf *conf = mddev->private;
3493 int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
3494 unsigned long recovery;
3495 unsigned int raid_param_cnt = 1;
3496 unsigned int sz = 0;
3497 unsigned int rebuild_writemostly_count = 0;
3498 sector_t progress, resync_max_sectors, resync_mismatches;
3499 enum sync_state state;
3500 struct raid_type *rt;
3501
3502 switch (type) {
3503 case STATUSTYPE_INFO:
3504
3505 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
3506 if (!rt)
3507 return;
3508
3509 DMEMIT("%s %d ", rt->name, mddev->raid_disks);
3510
3511
3512 smp_rmb();
3513
3514 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
3515 mddev->resync_max_sectors : mddev->dev_sectors;
3516 recovery = rs->md.recovery;
3517 state = decipher_sync_action(mddev, recovery);
3518 progress = rs_get_progress(rs, recovery, state, resync_max_sectors);
3519 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
3520 atomic64_read(&mddev->resync_mismatches) : 0;
3521
3522
3523 for (i = 0; i < rs->raid_disks; i++)
3524 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538 DMEMIT(" %llu/%llu", (unsigned long long) progress,
3539 (unsigned long long) resync_max_sectors);
3540
3541
3542
3543
3544
3545
3546
3547
3548 DMEMIT(" %s", sync_str(state));
3549
3550
3551
3552
3553
3554
3555
3556
3557 DMEMIT(" %llu", (unsigned long long) resync_mismatches);
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
3570
3571
3572
3573
3574 DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ?
3575 __raid_dev_status(rs, &rs->journal_dev.rdev) : "-");
3576 break;
3577
3578 case STATUSTYPE_TABLE:
3579
3580
3581
3582
3583
3584
3585 for (i = 0; i < rs->raid_disks; i++) {
3586 rebuild_writemostly_count += (test_bit(i, (void *) rs->rebuild_disks) ? 2 : 0) +
3587 (test_bit(WriteMostly, &rs->dev[i].rdev.flags) ? 2 : 0);
3588 }
3589 rebuild_writemostly_count -= (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) ? 2 : 0) +
3590 (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags) ? 2 : 0);
3591
3592 raid_param_cnt += rebuild_writemostly_count +
3593 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
3594 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2;
3595
3596
3597 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
3598 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
3599 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
3600 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
3601 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
3602 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags))
3603 for (i = 0; i < rs->raid_disks; i++)
3604 if (test_bit(i, (void *) rs->rebuild_disks))
3605 DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD), i);
3606 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
3607 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
3608 mddev->bitmap_info.daemon_sleep);
3609 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
3610 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE),
3611 mddev->sync_speed_min);
3612 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
3613 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
3614 mddev->sync_speed_max);
3615 if (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags))
3616 for (i = 0; i < rs->raid_disks; i++)
3617 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3618 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
3619 rs->dev[i].rdev.raid_disk);
3620 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
3621 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND),
3622 mddev->bitmap_info.max_write_behind);
3623 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
3624 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE),
3625 max_nr_stripes);
3626 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
3627 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE),
3628 (unsigned long long) to_sector(mddev->bitmap_info.chunksize));
3629 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
3630 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES),
3631 raid10_md_layout_to_copies(mddev->layout));
3632 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
3633 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT),
3634 raid10_md_layout_to_format(mddev->layout));
3635 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags))
3636 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS),
3637 max(rs->delta_disks, mddev->delta_disks));
3638 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags))
3639 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET),
3640 (unsigned long long) rs->data_offset);
3641 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags))
3642 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV),
3643 __get_dev_name(rs->journal_dev.dev));
3644 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags))
3645 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE),
3646 md_journal_mode_to_dm_raid(rs->journal_dev.mode));
3647 DMEMIT(" %d", rs->raid_disks);
3648 for (i = 0; i < rs->raid_disks; i++)
3649 DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev),
3650 __get_dev_name(rs->dev[i].data_dev));
3651 }
3652}
3653
3654static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
3655 char *result, unsigned maxlen)
3656{
3657 struct raid_set *rs = ti->private;
3658 struct mddev *mddev = &rs->md;
3659
3660 if (!mddev->pers || !mddev->pers->sync_request)
3661 return -EINVAL;
3662
3663 if (!strcasecmp(argv[0], "frozen"))
3664 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3665 else
3666 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3667
3668 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
3669 if (mddev->sync_thread) {
3670 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3671 md_reap_sync_thread(mddev);
3672 }
3673 } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
3674 return -EBUSY;
3675 else if (!strcasecmp(argv[0], "resync"))
3676 ;
3677 else if (!strcasecmp(argv[0], "recover"))
3678 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3679 else {
3680 if (!strcasecmp(argv[0], "check")) {
3681 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3682 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3683 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3684 } else if (!strcasecmp(argv[0], "repair")) {
3685 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3686 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3687 } else
3688 return -EINVAL;
3689 }
3690 if (mddev->ro == 2) {
3691
3692
3693
3694 mddev->ro = 0;
3695 if (!mddev->suspended && mddev->sync_thread)
3696 md_wakeup_thread(mddev->sync_thread);
3697 }
3698 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3699 if (!mddev->suspended && mddev->thread)
3700 md_wakeup_thread(mddev->thread);
3701
3702 return 0;
3703}
3704
3705static int raid_iterate_devices(struct dm_target *ti,
3706 iterate_devices_callout_fn fn, void *data)
3707{
3708 struct raid_set *rs = ti->private;
3709 unsigned int i;
3710 int r = 0;
3711
3712 for (i = 0; !r && i < rs->md.raid_disks; i++)
3713 if (rs->dev[i].data_dev)
3714 r = fn(ti,
3715 rs->dev[i].data_dev,
3716 0,
3717 rs->md.dev_sectors,
3718 data);
3719
3720 return r;
3721}
3722
3723static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
3724{
3725 struct raid_set *rs = ti->private;
3726 unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
3727
3728 blk_limits_io_min(limits, chunk_size_bytes);
3729 blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
3730
3731
3732
3733
3734
3735 if (rs_is_raid0(rs) || rs_is_raid10(rs)) {
3736 limits->discard_granularity = chunk_size_bytes;
3737 limits->max_discard_sectors = rs->md.chunk_sectors;
3738 }
3739}
3740
3741static void raid_postsuspend(struct dm_target *ti)
3742{
3743 struct raid_set *rs = ti->private;
3744
3745 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
3746
3747 if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
3748 md_stop_writes(&rs->md);
3749
3750 mddev_lock_nointr(&rs->md);
3751 mddev_suspend(&rs->md);
3752 mddev_unlock(&rs->md);
3753 }
3754}
3755
3756static void attempt_restore_of_faulty_devices(struct raid_set *rs)
3757{
3758 int i;
3759 uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
3760 unsigned long flags;
3761 bool cleared = false;
3762 struct dm_raid_superblock *sb;
3763 struct mddev *mddev = &rs->md;
3764 struct md_rdev *r;
3765
3766
3767 if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
3768 return;
3769
3770 memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
3771
3772 for (i = 0; i < mddev->raid_disks; i++) {
3773 r = &rs->dev[i].rdev;
3774
3775 if (test_bit(Journal, &r->flags))
3776 continue;
3777
3778 if (test_bit(Faulty, &r->flags) &&
3779 r->meta_bdev && !read_disk_sb(r, r->sb_size, true)) {
3780 DMINFO("Faulty %s device #%d has readable super block."
3781 " Attempting to revive it.",
3782 rs->raid_type->name, i);
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793 flags = r->flags;
3794 clear_bit(In_sync, &r->flags);
3795 if (r->raid_disk >= 0) {
3796 if (mddev->pers->hot_remove_disk(mddev, r)) {
3797
3798 r->flags = flags;
3799 continue;
3800 }
3801 } else
3802 r->raid_disk = r->saved_raid_disk = i;
3803
3804 clear_bit(Faulty, &r->flags);
3805 clear_bit(WriteErrorSeen, &r->flags);
3806
3807 if (mddev->pers->hot_add_disk(mddev, r)) {
3808
3809 r->raid_disk = r->saved_raid_disk = -1;
3810 r->flags = flags;
3811 } else {
3812 clear_bit(In_sync, &r->flags);
3813 r->recovery_offset = 0;
3814 set_bit(i, (void *) cleared_failed_devices);
3815 cleared = true;
3816 }
3817 }
3818 }
3819
3820
3821 if (cleared) {
3822 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
3823
3824 rdev_for_each(r, &rs->md) {
3825 if (test_bit(Journal, &r->flags))
3826 continue;
3827
3828 sb = page_address(r->sb_page);
3829 sb_retrieve_failed_devices(sb, failed_devices);
3830
3831 for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
3832 failed_devices[i] &= ~cleared_failed_devices[i];
3833
3834 sb_update_failed_devices(sb, failed_devices);
3835 }
3836 }
3837}
3838
3839static int __load_dirty_region_bitmap(struct raid_set *rs)
3840{
3841 int r = 0;
3842
3843
3844 if (!rs_is_raid0(rs) &&
3845 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
3846 r = md_bitmap_load(&rs->md);
3847 if (r)
3848 DMERR("Failed to load bitmap");
3849 }
3850
3851 return r;
3852}
3853
3854
3855static void rs_update_sbs(struct raid_set *rs)
3856{
3857 struct mddev *mddev = &rs->md;
3858 int ro = mddev->ro;
3859
3860 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3861 mddev->ro = 0;
3862 md_update_sb(mddev, 1);
3863 mddev->ro = ro;
3864}
3865
3866
3867
3868
3869
3870
3871
3872
3873static int rs_start_reshape(struct raid_set *rs)
3874{
3875 int r;
3876 struct mddev *mddev = &rs->md;
3877 struct md_personality *pers = mddev->pers;
3878
3879
3880 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
3881
3882 r = rs_setup_reshape(rs);
3883 if (r)
3884 return r;
3885
3886
3887
3888
3889
3890
3891 r = pers->check_reshape(mddev);
3892 if (r) {
3893 rs->ti->error = "pers->check_reshape() failed";
3894 return r;
3895 }
3896
3897
3898
3899
3900
3901 if (pers->start_reshape) {
3902 r = pers->start_reshape(mddev);
3903 if (r) {
3904 rs->ti->error = "pers->start_reshape() failed";
3905 return r;
3906 }
3907 }
3908
3909
3910
3911
3912
3913
3914 rs_update_sbs(rs);
3915
3916 return 0;
3917}
3918
3919static int raid_preresume(struct dm_target *ti)
3920{
3921 int r;
3922 struct raid_set *rs = ti->private;
3923 struct mddev *mddev = &rs->md;
3924
3925
3926 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
3927 return 0;
3928
3929
3930
3931
3932
3933
3934
3935 if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
3936 rs_update_sbs(rs);
3937
3938
3939 r = __load_dirty_region_bitmap(rs);
3940 if (r)
3941 return r;
3942
3943
3944 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
3945 mddev->array_sectors = rs->array_sectors;
3946 mddev->dev_sectors = rs->dev_sectors;
3947 rs_set_rdev_sectors(rs);
3948 rs_set_capacity(rs);
3949 }
3950
3951
3952 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
3953 (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
3954 (rs->requested_bitmap_chunk_sectors &&
3955 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
3956 int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize;
3957
3958 r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors, chunksize, 0);
3959 if (r)
3960 DMERR("Failed to resize bitmap");
3961 }
3962
3963
3964
3965 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3966 if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
3967 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3968 mddev->resync_min = mddev->recovery_cp;
3969 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags))
3970 mddev->resync_max_sectors = mddev->dev_sectors;
3971 }
3972
3973
3974 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
3975
3976 rs_set_rdev_sectors(rs);
3977 mddev_lock_nointr(mddev);
3978 r = rs_start_reshape(rs);
3979 mddev_unlock(mddev);
3980 if (r)
3981 DMWARN("Failed to check/start reshape, continuing without change");
3982 r = 0;
3983 }
3984
3985 return r;
3986}
3987
3988static void raid_resume(struct dm_target *ti)
3989{
3990 struct raid_set *rs = ti->private;
3991 struct mddev *mddev = &rs->md;
3992
3993 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
3994
3995
3996
3997
3998
3999 attempt_restore_of_faulty_devices(rs);
4000 }
4001
4002 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
4003
4004 if (mddev->delta_disks < 0)
4005 rs_set_capacity(rs);
4006
4007 mddev_lock_nointr(mddev);
4008 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4009 mddev->ro = 0;
4010 mddev->in_sync = 0;
4011 mddev_resume(mddev);
4012 mddev_unlock(mddev);
4013 }
4014}
4015
4016static struct target_type raid_target = {
4017 .name = "raid",
4018 .version = {1, 15, 1},
4019 .module = THIS_MODULE,
4020 .ctr = raid_ctr,
4021 .dtr = raid_dtr,
4022 .map = raid_map,
4023 .status = raid_status,
4024 .message = raid_message,
4025 .iterate_devices = raid_iterate_devices,
4026 .io_hints = raid_io_hints,
4027 .postsuspend = raid_postsuspend,
4028 .preresume = raid_preresume,
4029 .resume = raid_resume,
4030};
4031
4032static int __init dm_raid_init(void)
4033{
4034 DMINFO("Loading target version %u.%u.%u",
4035 raid_target.version[0],
4036 raid_target.version[1],
4037 raid_target.version[2]);
4038 return dm_register_target(&raid_target);
4039}
4040
4041static void __exit dm_raid_exit(void)
4042{
4043 dm_unregister_target(&raid_target);
4044}
4045
4046module_init(dm_raid_init);
4047module_exit(dm_raid_exit);
4048
4049module_param(devices_handle_discard_safely, bool, 0644);
4050MODULE_PARM_DESC(devices_handle_discard_safely,
4051 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
4052
4053MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target");
4054MODULE_ALIAS("dm-raid0");
4055MODULE_ALIAS("dm-raid1");
4056MODULE_ALIAS("dm-raid10");
4057MODULE_ALIAS("dm-raid4");
4058MODULE_ALIAS("dm-raid5");
4059MODULE_ALIAS("dm-raid6");
4060MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
4061MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
4062MODULE_LICENSE("GPL");
4063