1
2
3
4
5
6
7
8#include <linux/slab.h>
9#include <linux/module.h>
10
11#include "md.h"
12#include "raid1.h"
13#include "raid5.h"
14#include "raid10.h"
15#include "bitmap.h"
16
17#include <linux/device-mapper.h>
18
19#define DM_MSG_PREFIX "raid"
20#define MAX_RAID_DEVICES 253
21
22
23
24
25#define MIN_FREE_RESHAPE_SPACE to_sector(4*4096)
26
27
28
29
30#define MIN_RAID456_JOURNAL_SPACE (4*2048)
31
32static bool devices_handle_discard_safely = false;
33
34
35
36
37
38#define FirstUse 10
39
40struct raid_dev {
41
42
43
44
45
46
47
48
49
50
51
52
53 struct dm_dev *meta_dev;
54 struct dm_dev *data_dev;
55 struct md_rdev rdev;
56};
57
58
59
60
61
62
63
64#define __CTR_FLAG_SYNC 0
65#define __CTR_FLAG_NOSYNC 1
66#define __CTR_FLAG_REBUILD 2
67#define __CTR_FLAG_DAEMON_SLEEP 3
68#define __CTR_FLAG_MIN_RECOVERY_RATE 4
69#define __CTR_FLAG_MAX_RECOVERY_RATE 5
70#define __CTR_FLAG_MAX_WRITE_BEHIND 6
71#define __CTR_FLAG_WRITE_MOSTLY 7
72#define __CTR_FLAG_STRIPE_CACHE 8
73#define __CTR_FLAG_REGION_SIZE 9
74#define __CTR_FLAG_RAID10_COPIES 10
75#define __CTR_FLAG_RAID10_FORMAT 11
76
77#define __CTR_FLAG_DELTA_DISKS 12
78#define __CTR_FLAG_DATA_OFFSET 13
79#define __CTR_FLAG_RAID10_USE_NEAR_SETS 14
80
81
82#define __CTR_FLAG_JOURNAL_DEV 15
83
84
85#define __CTR_FLAG_JOURNAL_MODE 16
86
87
88
89
90#define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC)
91#define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC)
92#define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD)
93#define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP)
94#define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE)
95#define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE)
96#define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND)
97#define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY)
98#define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE)
99#define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE)
100#define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES)
101#define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT)
102#define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
103#define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
104#define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
105#define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV)
106#define CTR_FLAG_JOURNAL_MODE (1 << __CTR_FLAG_JOURNAL_MODE)
107
108#define RESUME_STAY_FROZEN_FLAGS (CTR_FLAG_DELTA_DISKS | CTR_FLAG_DATA_OFFSET)
109
110
111
112
113
114
115
116#define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
117
118
119#define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \
120 CTR_FLAG_RAID10_USE_NEAR_SETS)
121
122
123#define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
124 CTR_FLAG_WRITE_MOSTLY | \
125 CTR_FLAG_DAEMON_SLEEP | \
126 CTR_FLAG_MIN_RECOVERY_RATE | \
127 CTR_FLAG_MAX_RECOVERY_RATE | \
128 CTR_FLAG_MAX_WRITE_BEHIND | \
129 CTR_FLAG_STRIPE_CACHE | \
130 CTR_FLAG_REGION_SIZE | \
131 CTR_FLAG_RAID10_COPIES | \
132 CTR_FLAG_RAID10_FORMAT | \
133 CTR_FLAG_DELTA_DISKS | \
134 CTR_FLAG_DATA_OFFSET)
135
136
137
138
139#define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET)
140
141
142#define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
143 CTR_FLAG_REBUILD | \
144 CTR_FLAG_WRITE_MOSTLY | \
145 CTR_FLAG_DAEMON_SLEEP | \
146 CTR_FLAG_MIN_RECOVERY_RATE | \
147 CTR_FLAG_MAX_RECOVERY_RATE | \
148 CTR_FLAG_MAX_WRITE_BEHIND | \
149 CTR_FLAG_REGION_SIZE | \
150 CTR_FLAG_DELTA_DISKS | \
151 CTR_FLAG_DATA_OFFSET)
152
153
154#define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
155 CTR_FLAG_REBUILD | \
156 CTR_FLAG_DAEMON_SLEEP | \
157 CTR_FLAG_MIN_RECOVERY_RATE | \
158 CTR_FLAG_MAX_RECOVERY_RATE | \
159 CTR_FLAG_REGION_SIZE | \
160 CTR_FLAG_RAID10_COPIES | \
161 CTR_FLAG_RAID10_FORMAT | \
162 CTR_FLAG_DELTA_DISKS | \
163 CTR_FLAG_DATA_OFFSET | \
164 CTR_FLAG_RAID10_USE_NEAR_SETS)
165
166
167
168
169
170
171
172
173#define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
174 CTR_FLAG_REBUILD | \
175 CTR_FLAG_DAEMON_SLEEP | \
176 CTR_FLAG_MIN_RECOVERY_RATE | \
177 CTR_FLAG_MAX_RECOVERY_RATE | \
178 CTR_FLAG_STRIPE_CACHE | \
179 CTR_FLAG_REGION_SIZE | \
180 CTR_FLAG_DELTA_DISKS | \
181 CTR_FLAG_DATA_OFFSET | \
182 CTR_FLAG_JOURNAL_DEV | \
183 CTR_FLAG_JOURNAL_MODE)
184
185#define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \
186 CTR_FLAG_REBUILD | \
187 CTR_FLAG_DAEMON_SLEEP | \
188 CTR_FLAG_MIN_RECOVERY_RATE | \
189 CTR_FLAG_MAX_RECOVERY_RATE | \
190 CTR_FLAG_STRIPE_CACHE | \
191 CTR_FLAG_REGION_SIZE | \
192 CTR_FLAG_DELTA_DISKS | \
193 CTR_FLAG_DATA_OFFSET | \
194 CTR_FLAG_JOURNAL_DEV | \
195 CTR_FLAG_JOURNAL_MODE)
196
197
198
199
200
201
202
203
204
205
206#define RT_FLAG_RS_PRERESUMED 0
207#define RT_FLAG_RS_RESUMED 1
208#define RT_FLAG_RS_BITMAP_LOADED 2
209#define RT_FLAG_UPDATE_SBS 3
210#define RT_FLAG_RESHAPE_RS 4
211#define RT_FLAG_RS_SUSPENDED 5
212
213
214#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
215
216
217
218
219struct rs_layout {
220 int new_level;
221 int new_layout;
222 int new_chunk_sectors;
223};
224
225struct raid_set {
226 struct dm_target *ti;
227
228 uint32_t bitmap_loaded;
229 uint32_t stripe_cache_entries;
230 unsigned long ctr_flags;
231 unsigned long runtime_flags;
232
233 uint64_t rebuild_disks[DISKS_ARRAY_ELEMS];
234
235 int raid_disks;
236 int delta_disks;
237 int data_offset;
238 int raid10_copies;
239 int requested_bitmap_chunk_sectors;
240
241 struct mddev md;
242 struct raid_type *raid_type;
243 struct dm_target_callbacks callbacks;
244
245
246 struct journal_dev {
247 struct dm_dev *dev;
248 struct md_rdev rdev;
249 int mode;
250 } journal_dev;
251
252 struct raid_dev dev[0];
253};
254
255static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
256{
257 struct mddev *mddev = &rs->md;
258
259 l->new_level = mddev->new_level;
260 l->new_layout = mddev->new_layout;
261 l->new_chunk_sectors = mddev->new_chunk_sectors;
262}
263
264static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
265{
266 struct mddev *mddev = &rs->md;
267
268 mddev->new_level = l->new_level;
269 mddev->new_layout = l->new_layout;
270 mddev->new_chunk_sectors = l->new_chunk_sectors;
271}
272
273
274#define ALGORITHM_RAID10_DEFAULT 0
275#define ALGORITHM_RAID10_NEAR 1
276#define ALGORITHM_RAID10_OFFSET 2
277#define ALGORITHM_RAID10_FAR 3
278
279
280static struct raid_type {
281 const char *name;
282 const char *descr;
283 const unsigned int parity_devs;
284 const unsigned int minimal_devs;
285 const unsigned int level;
286 const unsigned int algorithm;
287} raid_types[] = {
288 {"raid0", "raid0 (striping)", 0, 2, 0, 0 },
289 {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 },
290 {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR},
291 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
292 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
293 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
294 {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
295 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
296 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
297 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
298 {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
299 {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
300 {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
301 {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
302 {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE},
303 {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6},
304 {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6},
305 {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6},
306 {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6},
307 {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6}
308};
309
310
311static bool __within_range(long v, long min, long max)
312{
313 return v >= min && v <= max;
314}
315
316
317static struct arg_name_flag {
318 const unsigned long flag;
319 const char *name;
320} __arg_name_flags[] = {
321 { CTR_FLAG_SYNC, "sync"},
322 { CTR_FLAG_NOSYNC, "nosync"},
323 { CTR_FLAG_REBUILD, "rebuild"},
324 { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
325 { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
326 { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
327 { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
328 { CTR_FLAG_WRITE_MOSTLY, "write_mostly"},
329 { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
330 { CTR_FLAG_REGION_SIZE, "region_size"},
331 { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
332 { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
333 { CTR_FLAG_DATA_OFFSET, "data_offset"},
334 { CTR_FLAG_DELTA_DISKS, "delta_disks"},
335 { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"},
336 { CTR_FLAG_JOURNAL_DEV, "journal_dev" },
337 { CTR_FLAG_JOURNAL_MODE, "journal_mode" },
338};
339
340
341static const char *dm_raid_arg_name_by_flag(const uint32_t flag)
342{
343 if (hweight32(flag) == 1) {
344 struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags);
345
346 while (anf-- > __arg_name_flags)
347 if (flag & anf->flag)
348 return anf->name;
349
350 } else
351 DMERR("%s called with more than one flag!", __func__);
352
353 return NULL;
354}
355
356
357static struct {
358 const int mode;
359 const char *param;
360} _raid456_journal_mode[] = {
361 { R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" },
362 { R5C_JOURNAL_MODE_WRITE_BACK , "writeback" }
363};
364
365
366static int dm_raid_journal_mode_to_md(const char *mode)
367{
368 int m = ARRAY_SIZE(_raid456_journal_mode);
369
370 while (m--)
371 if (!strcasecmp(mode, _raid456_journal_mode[m].param))
372 return _raid456_journal_mode[m].mode;
373
374 return -EINVAL;
375}
376
377
378static const char *md_journal_mode_to_dm_raid(const int mode)
379{
380 int m = ARRAY_SIZE(_raid456_journal_mode);
381
382 while (m--)
383 if (mode == _raid456_journal_mode[m].mode)
384 return _raid456_journal_mode[m].param;
385
386 return "unknown";
387}
388
389
390
391
392
393
394
395static bool rs_is_raid0(struct raid_set *rs)
396{
397 return !rs->md.level;
398}
399
400
401static bool rs_is_raid1(struct raid_set *rs)
402{
403 return rs->md.level == 1;
404}
405
406
407static bool rs_is_raid10(struct raid_set *rs)
408{
409 return rs->md.level == 10;
410}
411
412
413static bool rs_is_raid6(struct raid_set *rs)
414{
415 return rs->md.level == 6;
416}
417
418
419static bool rs_is_raid456(struct raid_set *rs)
420{
421 return __within_range(rs->md.level, 4, 6);
422}
423
424
425static bool __is_raid10_far(int layout);
426static bool rs_is_reshapable(struct raid_set *rs)
427{
428 return rs_is_raid456(rs) ||
429 (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout));
430}
431
432
433static bool rs_is_recovering(struct raid_set *rs)
434{
435 return rs->md.recovery_cp < rs->md.dev_sectors;
436}
437
438
439static bool rs_is_reshaping(struct raid_set *rs)
440{
441 return rs->md.reshape_position != MaxSector;
442}
443
444
445
446
447
448
449static bool rt_is_raid0(struct raid_type *rt)
450{
451 return !rt->level;
452}
453
454
455static bool rt_is_raid1(struct raid_type *rt)
456{
457 return rt->level == 1;
458}
459
460
461static bool rt_is_raid10(struct raid_type *rt)
462{
463 return rt->level == 10;
464}
465
466
467static bool rt_is_raid45(struct raid_type *rt)
468{
469 return __within_range(rt->level, 4, 5);
470}
471
472
473static bool rt_is_raid6(struct raid_type *rt)
474{
475 return rt->level == 6;
476}
477
478
479static bool rt_is_raid456(struct raid_type *rt)
480{
481 return __within_range(rt->level, 4, 6);
482}
483
484
485
486static unsigned long __valid_flags(struct raid_set *rs)
487{
488 if (rt_is_raid0(rs->raid_type))
489 return RAID0_VALID_FLAGS;
490 else if (rt_is_raid1(rs->raid_type))
491 return RAID1_VALID_FLAGS;
492 else if (rt_is_raid10(rs->raid_type))
493 return RAID10_VALID_FLAGS;
494 else if (rt_is_raid45(rs->raid_type))
495 return RAID45_VALID_FLAGS;
496 else if (rt_is_raid6(rs->raid_type))
497 return RAID6_VALID_FLAGS;
498
499 return 0;
500}
501
502
503
504
505
506
507static int rs_check_for_valid_flags(struct raid_set *rs)
508{
509 if (rs->ctr_flags & ~__valid_flags(rs)) {
510 rs->ti->error = "Invalid flags combination";
511 return -EINVAL;
512 }
513
514 return 0;
515}
516
517
518#define RAID10_OFFSET (1 << 16)
519#define RAID10_BROCKEN_USE_FAR_SETS (1 << 17)
520#define RAID10_USE_FAR_SETS (1 << 18)
521#define RAID10_FAR_COPIES_SHIFT 8
522
523
524static unsigned int __raid10_near_copies(int layout)
525{
526 return layout & 0xFF;
527}
528
529
530static unsigned int __raid10_far_copies(int layout)
531{
532 return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT);
533}
534
535
536static bool __is_raid10_offset(int layout)
537{
538 return !!(layout & RAID10_OFFSET);
539}
540
541
542static bool __is_raid10_near(int layout)
543{
544 return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1;
545}
546
547
548static bool __is_raid10_far(int layout)
549{
550 return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1;
551}
552
553
554static const char *raid10_md_layout_to_format(int layout)
555{
556
557
558
559
560
561
562 if (__is_raid10_offset(layout))
563 return "offset";
564
565 if (__raid10_near_copies(layout) > 1)
566 return "near";
567
568 if (__raid10_far_copies(layout) > 1)
569 return "far";
570
571 return "unknown";
572}
573
574
575static int raid10_name_to_format(const char *name)
576{
577 if (!strcasecmp(name, "near"))
578 return ALGORITHM_RAID10_NEAR;
579 else if (!strcasecmp(name, "offset"))
580 return ALGORITHM_RAID10_OFFSET;
581 else if (!strcasecmp(name, "far"))
582 return ALGORITHM_RAID10_FAR;
583
584 return -EINVAL;
585}
586
587
588static unsigned int raid10_md_layout_to_copies(int layout)
589{
590 return max(__raid10_near_copies(layout), __raid10_far_copies(layout));
591}
592
593
594static int raid10_format_to_md_layout(struct raid_set *rs,
595 unsigned int algorithm,
596 unsigned int copies)
597{
598 unsigned int n = 1, f = 1, r = 0;
599
600
601
602
603
604
605
606
607
608 if (algorithm == ALGORITHM_RAID10_DEFAULT ||
609 algorithm == ALGORITHM_RAID10_NEAR)
610 n = copies;
611
612 else if (algorithm == ALGORITHM_RAID10_OFFSET) {
613 f = copies;
614 r = RAID10_OFFSET;
615 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
616 r |= RAID10_USE_FAR_SETS;
617
618 } else if (algorithm == ALGORITHM_RAID10_FAR) {
619 f = copies;
620 r = !RAID10_OFFSET;
621 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
622 r |= RAID10_USE_FAR_SETS;
623
624 } else
625 return -EINVAL;
626
627 return r | (f << RAID10_FAR_COPIES_SHIFT) | n;
628}
629
630
631
632static bool __got_raid10(struct raid_type *rtp, const int layout)
633{
634 if (rtp->level == 10) {
635 switch (rtp->algorithm) {
636 case ALGORITHM_RAID10_DEFAULT:
637 case ALGORITHM_RAID10_NEAR:
638 return __is_raid10_near(layout);
639 case ALGORITHM_RAID10_OFFSET:
640 return __is_raid10_offset(layout);
641 case ALGORITHM_RAID10_FAR:
642 return __is_raid10_far(layout);
643 default:
644 break;
645 }
646 }
647
648 return false;
649}
650
651
652static struct raid_type *get_raid_type(const char *name)
653{
654 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
655
656 while (rtp-- > raid_types)
657 if (!strcasecmp(rtp->name, name))
658 return rtp;
659
660 return NULL;
661}
662
663
664static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
665{
666 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
667
668 while (rtp-- > raid_types) {
669
670 if (rtp->level == level &&
671 (__got_raid10(rtp, layout) || rtp->algorithm == layout))
672 return rtp;
673 }
674
675 return NULL;
676}
677
678
679
680
681
682static void rs_set_capacity(struct raid_set *rs)
683{
684 struct mddev *mddev = &rs->md;
685 struct md_rdev *rdev;
686 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
687
688
689
690
691
692 rdev_for_each(rdev, mddev)
693 if (!test_bit(Journal, &rdev->flags))
694 rdev->sectors = mddev->dev_sectors;
695
696 set_capacity(gendisk, mddev->array_sectors);
697 revalidate_disk(gendisk);
698}
699
700
701
702
703
704static void rs_set_cur(struct raid_set *rs)
705{
706 struct mddev *mddev = &rs->md;
707
708 mddev->new_level = mddev->level;
709 mddev->new_layout = mddev->layout;
710 mddev->new_chunk_sectors = mddev->chunk_sectors;
711}
712
713
714
715
716
717static void rs_set_new(struct raid_set *rs)
718{
719 struct mddev *mddev = &rs->md;
720
721 mddev->level = mddev->new_level;
722 mddev->layout = mddev->new_layout;
723 mddev->chunk_sectors = mddev->new_chunk_sectors;
724 mddev->raid_disks = rs->raid_disks;
725 mddev->delta_disks = 0;
726}
727
728static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type,
729 unsigned int raid_devs)
730{
731 unsigned int i;
732 struct raid_set *rs;
733
734 if (raid_devs <= raid_type->parity_devs) {
735 ti->error = "Insufficient number of devices";
736 return ERR_PTR(-EINVAL);
737 }
738
739 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
740 if (!rs) {
741 ti->error = "Cannot allocate raid context";
742 return ERR_PTR(-ENOMEM);
743 }
744
745 mddev_init(&rs->md);
746
747 rs->raid_disks = raid_devs;
748 rs->delta_disks = 0;
749
750 rs->ti = ti;
751 rs->raid_type = raid_type;
752 rs->stripe_cache_entries = 256;
753 rs->md.raid_disks = raid_devs;
754 rs->md.level = raid_type->level;
755 rs->md.new_level = rs->md.level;
756 rs->md.layout = raid_type->algorithm;
757 rs->md.new_layout = rs->md.layout;
758 rs->md.delta_disks = 0;
759 rs->md.recovery_cp = MaxSector;
760
761 for (i = 0; i < raid_devs; i++)
762 md_rdev_init(&rs->dev[i].rdev);
763
764
765
766
767
768
769
770
771
772
773 return rs;
774}
775
776static void raid_set_free(struct raid_set *rs)
777{
778 int i;
779
780 if (rs->journal_dev.dev) {
781 md_rdev_clear(&rs->journal_dev.rdev);
782 dm_put_device(rs->ti, rs->journal_dev.dev);
783 }
784
785 for (i = 0; i < rs->raid_disks; i++) {
786 if (rs->dev[i].meta_dev)
787 dm_put_device(rs->ti, rs->dev[i].meta_dev);
788 md_rdev_clear(&rs->dev[i].rdev);
789 if (rs->dev[i].data_dev)
790 dm_put_device(rs->ti, rs->dev[i].data_dev);
791 }
792
793 kfree(rs);
794}
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
813{
814 int i;
815 int rebuild = 0;
816 int metadata_available = 0;
817 int r = 0;
818 const char *arg;
819
820
821 arg = dm_shift_arg(as);
822 if (!arg)
823 return -EINVAL;
824
825 for (i = 0; i < rs->raid_disks; i++) {
826 rs->dev[i].rdev.raid_disk = i;
827
828 rs->dev[i].meta_dev = NULL;
829 rs->dev[i].data_dev = NULL;
830
831
832
833
834
835 rs->dev[i].rdev.data_offset = 0;
836 rs->dev[i].rdev.new_data_offset = 0;
837 rs->dev[i].rdev.mddev = &rs->md;
838
839 arg = dm_shift_arg(as);
840 if (!arg)
841 return -EINVAL;
842
843 if (strcmp(arg, "-")) {
844 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
845 &rs->dev[i].meta_dev);
846 if (r) {
847 rs->ti->error = "RAID metadata device lookup failure";
848 return r;
849 }
850
851 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
852 if (!rs->dev[i].rdev.sb_page) {
853 rs->ti->error = "Failed to allocate superblock page";
854 return -ENOMEM;
855 }
856 }
857
858 arg = dm_shift_arg(as);
859 if (!arg)
860 return -EINVAL;
861
862 if (!strcmp(arg, "-")) {
863 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
864 (!rs->dev[i].rdev.recovery_offset)) {
865 rs->ti->error = "Drive designated for rebuild not specified";
866 return -EINVAL;
867 }
868
869 if (rs->dev[i].meta_dev) {
870 rs->ti->error = "No data device supplied with metadata device";
871 return -EINVAL;
872 }
873
874 continue;
875 }
876
877 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
878 &rs->dev[i].data_dev);
879 if (r) {
880 rs->ti->error = "RAID device lookup failure";
881 return r;
882 }
883
884 if (rs->dev[i].meta_dev) {
885 metadata_available = 1;
886 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
887 }
888 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
889 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
890 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
891 rebuild++;
892 }
893
894 if (rs->journal_dev.dev)
895 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
896
897 if (metadata_available) {
898 rs->md.external = 0;
899 rs->md.persistent = 1;
900 rs->md.major_version = 2;
901 } else if (rebuild && !rs->md.recovery_cp) {
902
903
904
905
906
907
908
909
910
911
912
913 rs->ti->error = "Unable to rebuild drive while array is not in-sync";
914 return -EINVAL;
915 }
916
917 return 0;
918}
919
920
921
922
923
924
925
926
927
928
929
930static int validate_region_size(struct raid_set *rs, unsigned long region_size)
931{
932 unsigned long min_region_size = rs->ti->len / (1 << 21);
933
934 if (rs_is_raid0(rs))
935 return 0;
936
937 if (!region_size) {
938
939
940
941 if (min_region_size > (1 << 13)) {
942
943 region_size = roundup_pow_of_two(min_region_size);
944 DMINFO("Choosing default region size of %lu sectors",
945 region_size);
946 } else {
947 DMINFO("Choosing default region size of 4MiB");
948 region_size = 1 << 13;
949 }
950 } else {
951
952
953
954 if (region_size > rs->ti->len) {
955 rs->ti->error = "Supplied region size is too large";
956 return -EINVAL;
957 }
958
959 if (region_size < min_region_size) {
960 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
961 region_size, min_region_size);
962 rs->ti->error = "Supplied region size is too small";
963 return -EINVAL;
964 }
965
966 if (!is_power_of_2(region_size)) {
967 rs->ti->error = "Region size is not a power of 2";
968 return -EINVAL;
969 }
970
971 if (region_size < rs->md.chunk_sectors) {
972 rs->ti->error = "Region size is smaller than the chunk size";
973 return -EINVAL;
974 }
975 }
976
977
978
979
980 rs->md.bitmap_info.chunksize = to_bytes(region_size);
981
982 return 0;
983}
984
985
986
987
988
989
990
991
992
993
994static int validate_raid_redundancy(struct raid_set *rs)
995{
996 unsigned int i, rebuild_cnt = 0;
997 unsigned int rebuilds_per_group = 0, copies;
998 unsigned int group_size, last_group_start;
999
1000 for (i = 0; i < rs->md.raid_disks; i++)
1001 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
1002 !rs->dev[i].rdev.sb_page)
1003 rebuild_cnt++;
1004
1005 switch (rs->raid_type->level) {
1006 case 0:
1007 break;
1008 case 1:
1009 if (rebuild_cnt >= rs->md.raid_disks)
1010 goto too_many;
1011 break;
1012 case 4:
1013 case 5:
1014 case 6:
1015 if (rebuild_cnt > rs->raid_type->parity_devs)
1016 goto too_many;
1017 break;
1018 case 10:
1019 copies = raid10_md_layout_to_copies(rs->md.new_layout);
1020 if (rebuild_cnt < copies)
1021 break;
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 if (__is_raid10_near(rs->md.new_layout)) {
1038 for (i = 0; i < rs->md.raid_disks; i++) {
1039 if (!(i % copies))
1040 rebuilds_per_group = 0;
1041 if ((!rs->dev[i].rdev.sb_page ||
1042 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1043 (++rebuilds_per_group >= copies))
1044 goto too_many;
1045 }
1046 break;
1047 }
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061 group_size = (rs->md.raid_disks / copies);
1062 last_group_start = (rs->md.raid_disks / group_size) - 1;
1063 last_group_start *= group_size;
1064 for (i = 0; i < rs->md.raid_disks; i++) {
1065 if (!(i % copies) && !(i > last_group_start))
1066 rebuilds_per_group = 0;
1067 if ((!rs->dev[i].rdev.sb_page ||
1068 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1069 (++rebuilds_per_group >= copies))
1070 goto too_many;
1071 }
1072 break;
1073 default:
1074 if (rebuild_cnt)
1075 return -EINVAL;
1076 }
1077
1078 return 0;
1079
1080too_many:
1081 return -EINVAL;
1082}
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1110 unsigned int num_raid_params)
1111{
1112 int value, raid10_format = ALGORITHM_RAID10_DEFAULT;
1113 unsigned int raid10_copies = 2;
1114 unsigned int i, write_mostly = 0;
1115 unsigned int region_size = 0;
1116 sector_t max_io_len;
1117 const char *arg, *key;
1118 struct raid_dev *rd;
1119 struct raid_type *rt = rs->raid_type;
1120
1121 arg = dm_shift_arg(as);
1122 num_raid_params--;
1123
1124 if (kstrtoint(arg, 10, &value) < 0) {
1125 rs->ti->error = "Bad numerical argument given for chunk_size";
1126 return -EINVAL;
1127 }
1128
1129
1130
1131
1132
1133 if (rt_is_raid1(rt)) {
1134 if (value)
1135 DMERR("Ignoring chunk size parameter for RAID 1");
1136 value = 0;
1137 } else if (!is_power_of_2(value)) {
1138 rs->ti->error = "Chunk size must be a power of 2";
1139 return -EINVAL;
1140 } else if (value < 8) {
1141 rs->ti->error = "Chunk size value is too small";
1142 return -EINVAL;
1143 }
1144
1145 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164 for (i = 0; i < rs->raid_disks; i++) {
1165 set_bit(In_sync, &rs->dev[i].rdev.flags);
1166 rs->dev[i].rdev.recovery_offset = MaxSector;
1167 }
1168
1169
1170
1171
1172 for (i = 0; i < num_raid_params; i++) {
1173 key = dm_shift_arg(as);
1174 if (!key) {
1175 rs->ti->error = "Not enough raid parameters given";
1176 return -EINVAL;
1177 }
1178
1179 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) {
1180 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1181 rs->ti->error = "Only one 'nosync' argument allowed";
1182 return -EINVAL;
1183 }
1184 continue;
1185 }
1186 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
1187 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) {
1188 rs->ti->error = "Only one 'sync' argument allowed";
1189 return -EINVAL;
1190 }
1191 continue;
1192 }
1193 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
1194 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1195 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed";
1196 return -EINVAL;
1197 }
1198 continue;
1199 }
1200
1201 arg = dm_shift_arg(as);
1202 i++;
1203 if (!arg) {
1204 rs->ti->error = "Wrong number of raid parameters given";
1205 return -EINVAL;
1206 }
1207
1208
1209
1210
1211
1212 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) {
1213 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
1214 rs->ti->error = "Only one 'raid10_format' argument pair allowed";
1215 return -EINVAL;
1216 }
1217 if (!rt_is_raid10(rt)) {
1218 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
1219 return -EINVAL;
1220 }
1221 raid10_format = raid10_name_to_format(arg);
1222 if (raid10_format < 0) {
1223 rs->ti->error = "Invalid 'raid10_format' value given";
1224 return raid10_format;
1225 }
1226 continue;
1227 }
1228
1229
1230 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV))) {
1231 int r;
1232 struct md_rdev *jdev;
1233
1234 if (test_and_set_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1235 rs->ti->error = "Only one raid4/5/6 set journaling device allowed";
1236 return -EINVAL;
1237 }
1238 if (!rt_is_raid456(rt)) {
1239 rs->ti->error = "'journal_dev' is an invalid parameter for this RAID type";
1240 return -EINVAL;
1241 }
1242 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
1243 &rs->journal_dev.dev);
1244 if (r) {
1245 rs->ti->error = "raid4/5/6 journal device lookup failure";
1246 return r;
1247 }
1248 jdev = &rs->journal_dev.rdev;
1249 md_rdev_init(jdev);
1250 jdev->mddev = &rs->md;
1251 jdev->bdev = rs->journal_dev.dev->bdev;
1252 jdev->sectors = to_sector(i_size_read(jdev->bdev->bd_inode));
1253 if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) {
1254 rs->ti->error = "No space for raid4/5/6 journal";
1255 return -ENOSPC;
1256 }
1257 rs->journal_dev.mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
1258 set_bit(Journal, &jdev->flags);
1259 continue;
1260 }
1261
1262
1263 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE))) {
1264 int r;
1265
1266 if (!test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1267 rs->ti->error = "raid4/5/6 'journal_mode' is invalid without 'journal_dev'";
1268 return -EINVAL;
1269 }
1270 if (test_and_set_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
1271 rs->ti->error = "Only one raid4/5/6 'journal_mode' argument allowed";
1272 return -EINVAL;
1273 }
1274 r = dm_raid_journal_mode_to_md(arg);
1275 if (r < 0) {
1276 rs->ti->error = "Invalid 'journal_mode' argument";
1277 return r;
1278 }
1279 rs->journal_dev.mode = r;
1280 continue;
1281 }
1282
1283
1284
1285
1286 if (kstrtoint(arg, 10, &value) < 0) {
1287 rs->ti->error = "Bad numerical argument given in raid params";
1288 return -EINVAL;
1289 }
1290
1291 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) {
1292
1293
1294
1295
1296
1297 if (!__within_range(value, 0, rs->raid_disks - 1)) {
1298 rs->ti->error = "Invalid rebuild index given";
1299 return -EINVAL;
1300 }
1301
1302 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) {
1303 rs->ti->error = "rebuild for this index already given";
1304 return -EINVAL;
1305 }
1306
1307 rd = rs->dev + value;
1308 clear_bit(In_sync, &rd->rdev.flags);
1309 clear_bit(Faulty, &rd->rdev.flags);
1310 rd->rdev.recovery_offset = 0;
1311 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags);
1312 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
1313 if (!rt_is_raid1(rt)) {
1314 rs->ti->error = "write_mostly option is only valid for RAID1";
1315 return -EINVAL;
1316 }
1317
1318 if (!__within_range(value, 0, rs->md.raid_disks - 1)) {
1319 rs->ti->error = "Invalid write_mostly index given";
1320 return -EINVAL;
1321 }
1322
1323 write_mostly++;
1324 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
1325 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
1326 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
1327 if (!rt_is_raid1(rt)) {
1328 rs->ti->error = "max_write_behind option is only valid for RAID1";
1329 return -EINVAL;
1330 }
1331
1332 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) {
1333 rs->ti->error = "Only one max_write_behind argument pair allowed";
1334 return -EINVAL;
1335 }
1336
1337
1338
1339
1340
1341 value /= 2;
1342 if (value > COUNTER_MAX) {
1343 rs->ti->error = "Max write-behind limit out of range";
1344 return -EINVAL;
1345 }
1346
1347 rs->md.bitmap_info.max_write_behind = value;
1348 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
1349 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) {
1350 rs->ti->error = "Only one daemon_sleep argument pair allowed";
1351 return -EINVAL;
1352 }
1353 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
1354 rs->ti->error = "daemon sleep period out of range";
1355 return -EINVAL;
1356 }
1357 rs->md.bitmap_info.daemon_sleep = value;
1358 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
1359
1360 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
1361 rs->ti->error = "Only one data_offset argument pair allowed";
1362 return -EINVAL;
1363 }
1364
1365 if (value < 0 ||
1366 (value && (value < MIN_FREE_RESHAPE_SPACE || value % to_sector(PAGE_SIZE)))) {
1367 rs->ti->error = "Bogus data_offset value";
1368 return -EINVAL;
1369 }
1370 rs->data_offset = value;
1371 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) {
1372
1373 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
1374 rs->ti->error = "Only one delta_disks argument pair allowed";
1375 return -EINVAL;
1376 }
1377
1378 if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) {
1379 rs->ti->error = "Too many delta_disk requested";
1380 return -EINVAL;
1381 }
1382
1383 rs->delta_disks = value;
1384 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) {
1385 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) {
1386 rs->ti->error = "Only one stripe_cache argument pair allowed";
1387 return -EINVAL;
1388 }
1389
1390 if (!rt_is_raid456(rt)) {
1391 rs->ti->error = "Inappropriate argument: stripe_cache";
1392 return -EINVAL;
1393 }
1394
1395 rs->stripe_cache_entries = value;
1396 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
1397 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1398 rs->ti->error = "Only one min_recovery_rate argument pair allowed";
1399 return -EINVAL;
1400 }
1401 if (value > INT_MAX) {
1402 rs->ti->error = "min_recovery_rate out of range";
1403 return -EINVAL;
1404 }
1405 rs->md.sync_speed_min = (int)value;
1406 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
1407 if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) {
1408 rs->ti->error = "Only one max_recovery_rate argument pair allowed";
1409 return -EINVAL;
1410 }
1411 if (value > INT_MAX) {
1412 rs->ti->error = "max_recovery_rate out of range";
1413 return -EINVAL;
1414 }
1415 rs->md.sync_speed_max = (int)value;
1416 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) {
1417 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) {
1418 rs->ti->error = "Only one region_size argument pair allowed";
1419 return -EINVAL;
1420 }
1421
1422 region_size = value;
1423 rs->requested_bitmap_chunk_sectors = value;
1424 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) {
1425 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) {
1426 rs->ti->error = "Only one raid10_copies argument pair allowed";
1427 return -EINVAL;
1428 }
1429
1430 if (!__within_range(value, 2, rs->md.raid_disks)) {
1431 rs->ti->error = "Bad value for 'raid10_copies'";
1432 return -EINVAL;
1433 }
1434
1435 raid10_copies = value;
1436 } else {
1437 DMERR("Unable to parse RAID parameter: %s", key);
1438 rs->ti->error = "Unable to parse RAID parameter";
1439 return -EINVAL;
1440 }
1441 }
1442
1443 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) &&
1444 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1445 rs->ti->error = "sync and nosync are mutually exclusive";
1446 return -EINVAL;
1447 }
1448
1449 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) &&
1450 (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ||
1451 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) {
1452 rs->ti->error = "sync/nosync and rebuild are mutually exclusive";
1453 return -EINVAL;
1454 }
1455
1456 if (write_mostly >= rs->md.raid_disks) {
1457 rs->ti->error = "Can't set all raid1 devices to write_mostly";
1458 return -EINVAL;
1459 }
1460
1461 if (validate_region_size(rs, region_size))
1462 return -EINVAL;
1463
1464 if (rs->md.chunk_sectors)
1465 max_io_len = rs->md.chunk_sectors;
1466 else
1467 max_io_len = region_size;
1468
1469 if (dm_set_target_max_io_len(rs->ti, max_io_len))
1470 return -EINVAL;
1471
1472 if (rt_is_raid10(rt)) {
1473 if (raid10_copies > rs->md.raid_disks) {
1474 rs->ti->error = "Not enough devices to satisfy specification";
1475 return -EINVAL;
1476 }
1477
1478 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
1479 if (rs->md.new_layout < 0) {
1480 rs->ti->error = "Error getting raid10 format";
1481 return rs->md.new_layout;
1482 }
1483
1484 rt = get_raid_type_by_ll(10, rs->md.new_layout);
1485 if (!rt) {
1486 rs->ti->error = "Failed to recognize new raid10 layout";
1487 return -EINVAL;
1488 }
1489
1490 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
1491 rt->algorithm == ALGORITHM_RAID10_NEAR) &&
1492 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1493 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
1494 return -EINVAL;
1495 }
1496 }
1497
1498 rs->raid10_copies = raid10_copies;
1499
1500
1501 rs->md.persistent = 0;
1502 rs->md.external = 1;
1503
1504
1505 return rs_check_for_valid_flags(rs);
1506}
1507
1508
1509static int rs_set_raid456_stripe_cache(struct raid_set *rs)
1510{
1511 int r;
1512 struct r5conf *conf;
1513 struct mddev *mddev = &rs->md;
1514 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2;
1515 uint32_t nr_stripes = rs->stripe_cache_entries;
1516
1517 if (!rt_is_raid456(rs->raid_type)) {
1518 rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size";
1519 return -EINVAL;
1520 }
1521
1522 if (nr_stripes < min_stripes) {
1523 DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size",
1524 nr_stripes, min_stripes);
1525 nr_stripes = min_stripes;
1526 }
1527
1528 conf = mddev->private;
1529 if (!conf) {
1530 rs->ti->error = "Cannot change stripe_cache size on inactive RAID set";
1531 return -EINVAL;
1532 }
1533
1534
1535 if (conf->min_nr_stripes != nr_stripes) {
1536 r = raid5_set_cache_size(mddev, nr_stripes);
1537 if (r) {
1538 rs->ti->error = "Failed to set raid4/5/6 stripe cache size";
1539 return r;
1540 }
1541
1542 DMINFO("%u stripe cache entries", nr_stripes);
1543 }
1544
1545 return 0;
1546}
1547
1548
1549static unsigned int mddev_data_stripes(struct raid_set *rs)
1550{
1551 return rs->md.raid_disks - rs->raid_type->parity_devs;
1552}
1553
1554
1555static unsigned int rs_data_stripes(struct raid_set *rs)
1556{
1557 return rs->raid_disks - rs->raid_type->parity_devs;
1558}
1559
1560
1561
1562
1563
1564static sector_t __rdev_sectors(struct raid_set *rs)
1565{
1566 int i;
1567
1568 for (i = 0; i < rs->md.raid_disks; i++) {
1569 struct md_rdev *rdev = &rs->dev[i].rdev;
1570
1571 if (!test_bit(Journal, &rdev->flags) &&
1572 rdev->bdev && rdev->sectors)
1573 return rdev->sectors;
1574 }
1575
1576 return 0;
1577}
1578
1579
1580static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
1581{
1582 int delta_disks;
1583 unsigned int data_stripes;
1584 struct mddev *mddev = &rs->md;
1585 struct md_rdev *rdev;
1586 sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len;
1587
1588 if (use_mddev) {
1589 delta_disks = mddev->delta_disks;
1590 data_stripes = mddev_data_stripes(rs);
1591 } else {
1592 delta_disks = rs->delta_disks;
1593 data_stripes = rs_data_stripes(rs);
1594 }
1595
1596
1597 if (rt_is_raid1(rs->raid_type))
1598 ;
1599 else if (rt_is_raid10(rs->raid_type)) {
1600 if (rs->raid10_copies < 2 ||
1601 delta_disks < 0) {
1602 rs->ti->error = "Bogus raid10 data copies or delta disks";
1603 return -EINVAL;
1604 }
1605
1606 dev_sectors *= rs->raid10_copies;
1607 if (sector_div(dev_sectors, data_stripes))
1608 goto bad;
1609
1610 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1611 if (sector_div(array_sectors, rs->raid10_copies))
1612 goto bad;
1613
1614 } else if (sector_div(dev_sectors, data_stripes))
1615 goto bad;
1616
1617 else
1618
1619 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1620
1621 rdev_for_each(rdev, mddev)
1622 if (!test_bit(Journal, &rdev->flags))
1623 rdev->sectors = dev_sectors;
1624
1625 mddev->array_sectors = array_sectors;
1626 mddev->dev_sectors = dev_sectors;
1627
1628 return 0;
1629bad:
1630 rs->ti->error = "Target length not divisible by number of data devices";
1631 return -EINVAL;
1632}
1633
1634
1635static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1636{
1637
1638 if (rs_is_raid0(rs))
1639 rs->md.recovery_cp = MaxSector;
1640
1641
1642
1643
1644
1645 else if (rs_is_raid6(rs))
1646 rs->md.recovery_cp = dev_sectors;
1647
1648
1649
1650
1651 else
1652 rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
1653 ? MaxSector : dev_sectors;
1654}
1655
1656
1657static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1658{
1659 if (!dev_sectors)
1660
1661 __rs_setup_recovery(rs, 0);
1662 else if (dev_sectors == MaxSector)
1663
1664 __rs_setup_recovery(rs, MaxSector);
1665 else if (__rdev_sectors(rs) < dev_sectors)
1666
1667 __rs_setup_recovery(rs, __rdev_sectors(rs));
1668 else
1669 __rs_setup_recovery(rs, MaxSector);
1670}
1671
1672static void do_table_event(struct work_struct *ws)
1673{
1674 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
1675
1676 smp_rmb();
1677 if (!rs_is_reshaping(rs))
1678 rs_set_capacity(rs);
1679 dm_table_event(rs->ti->table);
1680}
1681
1682static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
1683{
1684 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
1685
1686 return mddev_congested(&rs->md, bits);
1687}
1688
1689
1690
1691
1692
1693
1694
1695static int rs_check_takeover(struct raid_set *rs)
1696{
1697 struct mddev *mddev = &rs->md;
1698 unsigned int near_copies;
1699
1700 if (rs->md.degraded) {
1701 rs->ti->error = "Can't takeover degraded raid set";
1702 return -EPERM;
1703 }
1704
1705 if (rs_is_reshaping(rs)) {
1706 rs->ti->error = "Can't takeover reshaping raid set";
1707 return -EPERM;
1708 }
1709
1710 switch (mddev->level) {
1711 case 0:
1712
1713 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1714 mddev->raid_disks == 1)
1715 return 0;
1716
1717
1718 if (mddev->new_level == 10 &&
1719 !(rs->raid_disks % mddev->raid_disks))
1720 return 0;
1721
1722
1723 if (__within_range(mddev->new_level, 4, 6) &&
1724 mddev->new_layout == ALGORITHM_PARITY_N &&
1725 mddev->raid_disks > 1)
1726 return 0;
1727
1728 break;
1729
1730 case 10:
1731
1732 if (__is_raid10_offset(mddev->layout))
1733 break;
1734
1735 near_copies = __raid10_near_copies(mddev->layout);
1736
1737
1738 if (mddev->new_level == 0) {
1739
1740 if (near_copies > 1 &&
1741 !(mddev->raid_disks % near_copies)) {
1742 mddev->raid_disks /= near_copies;
1743 mddev->delta_disks = mddev->raid_disks;
1744 return 0;
1745 }
1746
1747
1748 if (near_copies == 1 &&
1749 __raid10_far_copies(mddev->layout) > 1)
1750 return 0;
1751
1752 break;
1753 }
1754
1755
1756 if (mddev->new_level == 1 &&
1757 max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks)
1758 return 0;
1759
1760
1761 if (__within_range(mddev->new_level, 4, 5) &&
1762 mddev->raid_disks == 2)
1763 return 0;
1764 break;
1765
1766 case 1:
1767
1768 if (__within_range(mddev->new_level, 4, 5) &&
1769 mddev->raid_disks == 2) {
1770 mddev->degraded = 1;
1771 return 0;
1772 }
1773
1774
1775 if (mddev->new_level == 0 &&
1776 mddev->raid_disks == 1)
1777 return 0;
1778
1779
1780 if (mddev->new_level == 10)
1781 return 0;
1782 break;
1783
1784 case 4:
1785
1786 if (mddev->new_level == 0)
1787 return 0;
1788
1789
1790 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1791 mddev->raid_disks == 2)
1792 return 0;
1793
1794
1795 if (__within_range(mddev->new_level, 5, 6) &&
1796 mddev->layout == ALGORITHM_PARITY_N)
1797 return 0;
1798 break;
1799
1800 case 5:
1801
1802 if (mddev->new_level == 0 &&
1803 mddev->layout == ALGORITHM_PARITY_N)
1804 return 0;
1805
1806
1807 if (mddev->new_level == 4 &&
1808 mddev->layout == ALGORITHM_PARITY_N)
1809 return 0;
1810
1811
1812 if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) &&
1813 mddev->raid_disks == 2)
1814 return 0;
1815
1816
1817 if (mddev->new_level == 6 &&
1818 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1819 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6)))
1820 return 0;
1821 break;
1822
1823 case 6:
1824
1825 if (mddev->new_level == 0 &&
1826 mddev->layout == ALGORITHM_PARITY_N)
1827 return 0;
1828
1829
1830 if (mddev->new_level == 4 &&
1831 mddev->layout == ALGORITHM_PARITY_N)
1832 return 0;
1833
1834
1835 if (mddev->new_level == 5 &&
1836 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1837 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
1838 return 0;
1839
1840 default:
1841 break;
1842 }
1843
1844 rs->ti->error = "takeover not possible";
1845 return -EINVAL;
1846}
1847
1848
1849static bool rs_takeover_requested(struct raid_set *rs)
1850{
1851 return rs->md.new_level != rs->md.level;
1852}
1853
1854
1855static bool rs_reshape_requested(struct raid_set *rs)
1856{
1857 bool change;
1858 struct mddev *mddev = &rs->md;
1859
1860 if (rs_takeover_requested(rs))
1861 return false;
1862
1863 if (!mddev->level)
1864 return false;
1865
1866 change = mddev->new_layout != mddev->layout ||
1867 mddev->new_chunk_sectors != mddev->chunk_sectors ||
1868 rs->delta_disks;
1869
1870
1871 if (mddev->level == 1) {
1872 if (rs->delta_disks)
1873 return !!rs->delta_disks;
1874
1875 return !change &&
1876 mddev->raid_disks != rs->raid_disks;
1877 }
1878
1879 if (mddev->level == 10)
1880 return change &&
1881 !__is_raid10_far(mddev->new_layout) &&
1882 rs->delta_disks >= 0;
1883
1884 return change;
1885}
1886
1887
1888#define FEATURE_FLAG_SUPPORTS_V190 0x1
1889
1890
1891#define SB_FLAG_RESHAPE_ACTIVE 0x1
1892#define SB_FLAG_RESHAPE_BACKWARDS 0x2
1893
1894
1895
1896
1897
1898#define DM_RAID_MAGIC 0x64526D44
1899struct dm_raid_superblock {
1900 __le32 magic;
1901 __le32 compat_features;
1902
1903 __le32 num_devices;
1904 __le32 array_position;
1905
1906 __le64 events;
1907 __le64 failed_devices;
1908
1909
1910
1911
1912
1913
1914 __le64 disk_recovery_offset;
1915
1916
1917
1918
1919
1920 __le64 array_resync_offset;
1921
1922
1923
1924
1925 __le32 level;
1926 __le32 layout;
1927 __le32 stripe_sectors;
1928
1929
1930
1931
1932
1933
1934
1935 __le32 flags;
1936
1937
1938
1939
1940
1941 __le64 reshape_position;
1942
1943
1944
1945
1946 __le32 new_level;
1947 __le32 new_layout;
1948 __le32 new_stripe_sectors;
1949 __le32 delta_disks;
1950
1951 __le64 array_sectors;
1952
1953
1954
1955
1956
1957
1958
1959 __le64 data_offset;
1960 __le64 new_data_offset;
1961
1962 __le64 sectors;
1963
1964
1965
1966
1967
1968 __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1];
1969
1970 __le32 incompat_features;
1971
1972
1973} __packed;
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986static int rs_check_reshape(struct raid_set *rs)
1987{
1988 struct mddev *mddev = &rs->md;
1989
1990 if (!mddev->pers || !mddev->pers->check_reshape)
1991 rs->ti->error = "Reshape not supported";
1992 else if (mddev->degraded)
1993 rs->ti->error = "Can't reshape degraded raid set";
1994 else if (rs_is_recovering(rs))
1995 rs->ti->error = "Convert request on recovering raid set prohibited";
1996 else if (rs_is_reshaping(rs))
1997 rs->ti->error = "raid set already reshaping!";
1998 else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs)))
1999 rs->ti->error = "Reshaping only supported for raid1/4/5/6/10";
2000 else
2001 return 0;
2002
2003 return -EPERM;
2004}
2005
2006static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
2007{
2008 BUG_ON(!rdev->sb_page);
2009
2010 if (rdev->sb_loaded && !force_reload)
2011 return 0;
2012
2013 rdev->sb_loaded = 0;
2014
2015 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) {
2016 DMERR("Failed to read superblock of device at position %d",
2017 rdev->raid_disk);
2018 md_error(rdev->mddev, rdev);
2019 set_bit(Faulty, &rdev->flags);
2020 return -EIO;
2021 }
2022
2023 rdev->sb_loaded = 1;
2024
2025 return 0;
2026}
2027
2028static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2029{
2030 failed_devices[0] = le64_to_cpu(sb->failed_devices);
2031 memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices));
2032
2033 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2034 int i = ARRAY_SIZE(sb->extended_failed_devices);
2035
2036 while (i--)
2037 failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]);
2038 }
2039}
2040
2041static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2042{
2043 int i = ARRAY_SIZE(sb->extended_failed_devices);
2044
2045 sb->failed_devices = cpu_to_le64(failed_devices[0]);
2046 while (i--)
2047 sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]);
2048}
2049
2050
2051
2052
2053
2054
2055static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
2056{
2057 bool update_failed_devices = false;
2058 unsigned int i;
2059 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2060 struct dm_raid_superblock *sb;
2061 struct raid_set *rs = container_of(mddev, struct raid_set, md);
2062
2063
2064 if (!rdev->meta_bdev)
2065 return;
2066
2067 BUG_ON(!rdev->sb_page);
2068
2069 sb = page_address(rdev->sb_page);
2070
2071 sb_retrieve_failed_devices(sb, failed_devices);
2072
2073 for (i = 0; i < rs->raid_disks; i++)
2074 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) {
2075 update_failed_devices = true;
2076 set_bit(i, (void *) failed_devices);
2077 }
2078
2079 if (update_failed_devices)
2080 sb_update_failed_devices(sb, failed_devices);
2081
2082 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
2083 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2084
2085 sb->num_devices = cpu_to_le32(mddev->raid_disks);
2086 sb->array_position = cpu_to_le32(rdev->raid_disk);
2087
2088 sb->events = cpu_to_le64(mddev->events);
2089
2090 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
2091 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
2092
2093 sb->level = cpu_to_le32(mddev->level);
2094 sb->layout = cpu_to_le32(mddev->layout);
2095 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
2096
2097
2098
2099
2100
2101
2102 sb->new_level = cpu_to_le32(mddev->new_level);
2103 sb->new_layout = cpu_to_le32(mddev->new_layout);
2104 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
2105
2106 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2107
2108 smp_rmb();
2109 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2110 if (le64_to_cpu(sb->reshape_position) != MaxSector) {
2111
2112 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE);
2113
2114 if (mddev->delta_disks < 0 || mddev->reshape_backwards)
2115 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS);
2116 } else {
2117
2118 sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS));
2119 }
2120
2121 sb->array_sectors = cpu_to_le64(mddev->array_sectors);
2122 sb->data_offset = cpu_to_le64(rdev->data_offset);
2123 sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
2124 sb->sectors = cpu_to_le64(rdev->sectors);
2125 sb->incompat_features = cpu_to_le32(0);
2126
2127
2128 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
2129}
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
2140{
2141 int r;
2142 struct dm_raid_superblock *sb;
2143 struct dm_raid_superblock *refsb;
2144 uint64_t events_sb, events_refsb;
2145
2146 rdev->sb_start = 0;
2147 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
2148 if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
2149 DMERR("superblock size of a logical block is no longer valid");
2150 return -EINVAL;
2151 }
2152
2153 r = read_disk_sb(rdev, rdev->sb_size, false);
2154 if (r)
2155 return r;
2156
2157 sb = page_address(rdev->sb_page);
2158
2159
2160
2161
2162
2163
2164 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
2165 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
2166 super_sync(rdev->mddev, rdev);
2167
2168 set_bit(FirstUse, &rdev->flags);
2169 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2170
2171
2172 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
2173
2174
2175 return refdev ? 0 : 1;
2176 }
2177
2178 if (!refdev)
2179 return 1;
2180
2181 events_sb = le64_to_cpu(sb->events);
2182
2183 refsb = page_address(refdev->sb_page);
2184 events_refsb = le64_to_cpu(refsb->events);
2185
2186 return (events_sb > events_refsb) ? 1 : 0;
2187}
2188
2189static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2190{
2191 int role;
2192 unsigned int d;
2193 struct mddev *mddev = &rs->md;
2194 uint64_t events_sb;
2195 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2196 struct dm_raid_superblock *sb;
2197 uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0;
2198 struct md_rdev *r;
2199 struct dm_raid_superblock *sb2;
2200
2201 sb = page_address(rdev->sb_page);
2202 events_sb = le64_to_cpu(sb->events);
2203
2204
2205
2206
2207 mddev->events = events_sb ? : 1;
2208
2209 mddev->reshape_position = MaxSector;
2210
2211 mddev->raid_disks = le32_to_cpu(sb->num_devices);
2212 mddev->level = le32_to_cpu(sb->level);
2213 mddev->layout = le32_to_cpu(sb->layout);
2214 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
2215
2216
2217
2218
2219
2220 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2221
2222 mddev->new_level = le32_to_cpu(sb->new_level);
2223 mddev->new_layout = le32_to_cpu(sb->new_layout);
2224 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
2225 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
2226 mddev->array_sectors = le64_to_cpu(sb->array_sectors);
2227
2228
2229 if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) {
2230 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
2231 DMERR("Reshape requested but raid set is still reshaping");
2232 return -EINVAL;
2233 }
2234
2235 if (mddev->delta_disks < 0 ||
2236 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS)))
2237 mddev->reshape_backwards = 1;
2238 else
2239 mddev->reshape_backwards = 0;
2240
2241 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
2242 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout);
2243 }
2244
2245 } else {
2246
2247
2248
2249 struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
2250 struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
2251
2252 if (rs_takeover_requested(rs)) {
2253 if (rt_cur && rt_new)
2254 DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)",
2255 rt_cur->name, rt_new->name);
2256 else
2257 DMERR("Takeover raid sets not yet supported by metadata. (raid level change)");
2258 return -EINVAL;
2259 } else if (rs_reshape_requested(rs)) {
2260 DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)");
2261 if (mddev->layout != mddev->new_layout) {
2262 if (rt_cur && rt_new)
2263 DMERR(" current layout %s vs new layout %s",
2264 rt_cur->name, rt_new->name);
2265 else
2266 DMERR(" current layout 0x%X vs new layout 0x%X",
2267 le32_to_cpu(sb->layout), mddev->new_layout);
2268 }
2269 if (mddev->chunk_sectors != mddev->new_chunk_sectors)
2270 DMERR(" current stripe sectors %u vs new stripe sectors %u",
2271 mddev->chunk_sectors, mddev->new_chunk_sectors);
2272 if (rs->delta_disks)
2273 DMERR(" current %u disks vs new %u disks",
2274 mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
2275 if (rs_is_raid10(rs)) {
2276 DMERR(" Old layout: %s w/ %u copies",
2277 raid10_md_layout_to_format(mddev->layout),
2278 raid10_md_layout_to_copies(mddev->layout));
2279 DMERR(" New layout: %s w/ %u copies",
2280 raid10_md_layout_to_format(mddev->new_layout),
2281 raid10_md_layout_to_copies(mddev->new_layout));
2282 }
2283 return -EINVAL;
2284 }
2285
2286 DMINFO("Discovered old metadata format; upgrading to extended metadata format");
2287 }
2288
2289 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
2290 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307 d = 0;
2308 rdev_for_each(r, mddev) {
2309 if (test_bit(Journal, &rdev->flags))
2310 continue;
2311
2312 if (test_bit(FirstUse, &r->flags))
2313 new_devs++;
2314
2315 if (!test_bit(In_sync, &r->flags)) {
2316 DMINFO("Device %d specified for rebuild; clearing superblock",
2317 r->raid_disk);
2318 rebuilds++;
2319
2320 if (test_bit(FirstUse, &r->flags))
2321 rebuild_and_new++;
2322 }
2323
2324 d++;
2325 }
2326
2327 if (new_devs == rs->raid_disks || !rebuilds) {
2328
2329 if (new_devs == 1 && !rs->delta_disks)
2330 ;
2331 if (new_devs == rs->raid_disks) {
2332 DMINFO("Superblocks created for new raid set");
2333 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2334 } else if (new_devs != rebuilds &&
2335 new_devs != rs->delta_disks) {
2336 DMERR("New device injected into existing raid set without "
2337 "'delta_disks' or 'rebuild' parameter specified");
2338 return -EINVAL;
2339 }
2340 } else if (new_devs && new_devs != rebuilds) {
2341 DMERR("%u 'rebuild' devices cannot be injected into"
2342 " a raid set with %u other first-time devices",
2343 rebuilds, new_devs);
2344 return -EINVAL;
2345 } else if (rebuilds) {
2346 if (rebuild_and_new && rebuilds != rebuild_and_new) {
2347 DMERR("new device%s provided without 'rebuild'",
2348 new_devs > 1 ? "s" : "");
2349 return -EINVAL;
2350 } else if (rs_is_recovering(rs)) {
2351 DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
2352 (unsigned long long) mddev->recovery_cp);
2353 return -EINVAL;
2354 } else if (rs_is_reshaping(rs)) {
2355 DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
2356 (unsigned long long) mddev->reshape_position);
2357 return -EINVAL;
2358 }
2359 }
2360
2361
2362
2363
2364
2365 sb_retrieve_failed_devices(sb, failed_devices);
2366 rdev_for_each(r, mddev) {
2367 if (test_bit(Journal, &rdev->flags) ||
2368 !r->sb_page)
2369 continue;
2370 sb2 = page_address(r->sb_page);
2371 sb2->failed_devices = 0;
2372 memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices));
2373
2374
2375
2376
2377 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
2378 role = le32_to_cpu(sb2->array_position);
2379 if (role < 0)
2380 continue;
2381
2382 if (role != r->raid_disk) {
2383 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
2384 if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
2385 rs->raid_disks % rs->raid10_copies) {
2386 rs->ti->error =
2387 "Cannot change raid10 near set to odd # of devices!";
2388 return -EINVAL;
2389 }
2390
2391 sb2->array_position = cpu_to_le32(r->raid_disk);
2392
2393 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) &&
2394 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) &&
2395 !rt_is_raid1(rs->raid_type)) {
2396 rs->ti->error = "Cannot change device positions in raid set";
2397 return -EINVAL;
2398 }
2399
2400 DMINFO("raid device #%d now at position #%d", role, r->raid_disk);
2401 }
2402
2403
2404
2405
2406
2407 if (test_bit(role, (void *) failed_devices))
2408 set_bit(Faulty, &r->flags);
2409 }
2410 }
2411
2412 return 0;
2413}
2414
2415static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2416{
2417 struct mddev *mddev = &rs->md;
2418 struct dm_raid_superblock *sb;
2419
2420 if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0)
2421 return 0;
2422
2423 sb = page_address(rdev->sb_page);
2424
2425
2426
2427
2428
2429 if (!mddev->events && super_init_validation(rs, rdev))
2430 return -EINVAL;
2431
2432 if (le32_to_cpu(sb->compat_features) &&
2433 le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
2434 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
2435 return -EINVAL;
2436 }
2437
2438 if (sb->incompat_features) {
2439 rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet";
2440 return -EINVAL;
2441 }
2442
2443
2444 mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096);
2445 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
2446
2447 if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
2448
2449
2450
2451
2452
2453 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
2454 rdev->sectors = le64_to_cpu(sb->sectors);
2455
2456 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
2457 if (rdev->recovery_offset == MaxSector)
2458 set_bit(In_sync, &rdev->flags);
2459
2460
2461
2462
2463 else if (!rs_is_reshaping(rs))
2464 clear_bit(In_sync, &rdev->flags);
2465 }
2466
2467
2468
2469
2470 if (test_and_clear_bit(Faulty, &rdev->flags)) {
2471 rdev->recovery_offset = 0;
2472 clear_bit(In_sync, &rdev->flags);
2473 rdev->saved_raid_disk = rdev->raid_disk;
2474 }
2475
2476
2477 rdev->data_offset = le64_to_cpu(sb->data_offset);
2478 rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
2479
2480 return 0;
2481}
2482
2483
2484
2485
2486static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2487{
2488 int r;
2489 struct md_rdev *rdev, *freshest;
2490 struct mddev *mddev = &rs->md;
2491
2492 freshest = NULL;
2493 rdev_for_each(rdev, mddev) {
2494 if (test_bit(Journal, &rdev->flags))
2495 continue;
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
2507 continue;
2508
2509 if (!rdev->meta_bdev)
2510 continue;
2511
2512 r = super_load(rdev, freshest);
2513
2514 switch (r) {
2515 case 1:
2516 freshest = rdev;
2517 break;
2518 case 0:
2519 break;
2520 default:
2521
2522
2523
2524
2525
2526 if (rs_is_raid0(rs))
2527 continue;
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537 rdev->raid_disk = rdev->saved_raid_disk = -1;
2538 break;
2539 }
2540 }
2541
2542 if (!freshest)
2543 return 0;
2544
2545
2546
2547
2548
2549 rs->ti->error = "Unable to assemble array: Invalid superblocks";
2550 if (super_validate(rs, freshest))
2551 return -EINVAL;
2552
2553 if (validate_raid_redundancy(rs)) {
2554 rs->ti->error = "Insufficient redundancy to activate array";
2555 return -EINVAL;
2556 }
2557
2558 rdev_for_each(rdev, mddev)
2559 if (!test_bit(Journal, &rdev->flags) &&
2560 rdev != freshest &&
2561 super_validate(rs, rdev))
2562 return -EINVAL;
2563 return 0;
2564}
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574static int rs_adjust_data_offsets(struct raid_set *rs)
2575{
2576 sector_t data_offset = 0, new_data_offset = 0;
2577 struct md_rdev *rdev;
2578
2579
2580 if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
2581 if (!rs_is_reshapable(rs))
2582 goto out;
2583
2584 return 0;
2585 }
2586
2587
2588 rdev = &rs->dev[0].rdev;
2589
2590 if (rs->delta_disks < 0) {
2591
2592
2593
2594
2595
2596
2597
2598
2599 data_offset = 0;
2600 new_data_offset = rs->data_offset;
2601
2602 } else if (rs->delta_disks > 0) {
2603
2604
2605
2606
2607
2608
2609
2610
2611 data_offset = rs->data_offset;
2612 new_data_offset = 0;
2613
2614 } else {
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633 data_offset = rs->data_offset ? rdev->data_offset : 0;
2634 new_data_offset = data_offset ? 0 : rs->data_offset;
2635 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2636 }
2637
2638
2639
2640
2641 if (rs->data_offset &&
2642 to_sector(i_size_read(rdev->bdev->bd_inode)) - rdev->sectors < MIN_FREE_RESHAPE_SPACE) {
2643 rs->ti->error = data_offset ? "No space for forward reshape" :
2644 "No space for backward reshape";
2645 return -ENOSPC;
2646 }
2647out:
2648
2649 rdev_for_each(rdev, &rs->md) {
2650 if (!test_bit(Journal, &rdev->flags)) {
2651 rdev->data_offset = data_offset;
2652 rdev->new_data_offset = new_data_offset;
2653 }
2654 }
2655
2656 return 0;
2657}
2658
2659
2660static void __reorder_raid_disk_indexes(struct raid_set *rs)
2661{
2662 int i = 0;
2663 struct md_rdev *rdev;
2664
2665 rdev_for_each(rdev, &rs->md) {
2666 if (!test_bit(Journal, &rdev->flags)) {
2667 rdev->raid_disk = i++;
2668 rdev->saved_raid_disk = rdev->new_raid_disk = -1;
2669 }
2670 }
2671}
2672
2673
2674
2675
2676static int rs_setup_takeover(struct raid_set *rs)
2677{
2678 struct mddev *mddev = &rs->md;
2679 struct md_rdev *rdev;
2680 unsigned int d = mddev->raid_disks = rs->raid_disks;
2681 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
2682
2683 if (rt_is_raid10(rs->raid_type)) {
2684 if (mddev->level == 0) {
2685
2686 __reorder_raid_disk_indexes(rs);
2687
2688
2689 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
2690 rs->raid10_copies);
2691 } else if (mddev->level == 1)
2692
2693 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2694 rs->raid_disks);
2695 else
2696 return -EINVAL;
2697
2698 }
2699
2700 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2701 mddev->recovery_cp = MaxSector;
2702
2703 while (d--) {
2704 rdev = &rs->dev[d].rdev;
2705
2706 if (test_bit(d, (void *) rs->rebuild_disks)) {
2707 clear_bit(In_sync, &rdev->flags);
2708 clear_bit(Faulty, &rdev->flags);
2709 mddev->recovery_cp = rdev->recovery_offset = 0;
2710
2711 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2712 }
2713
2714 rdev->new_data_offset = new_data_offset;
2715 }
2716
2717 return 0;
2718}
2719
2720
2721static int rs_prepare_reshape(struct raid_set *rs)
2722{
2723 bool reshape;
2724 struct mddev *mddev = &rs->md;
2725
2726 if (rs_is_raid10(rs)) {
2727 if (rs->raid_disks != mddev->raid_disks &&
2728 __is_raid10_near(mddev->layout) &&
2729 rs->raid10_copies &&
2730 rs->raid10_copies != __raid10_near_copies(mddev->layout)) {
2731
2732
2733
2734
2735
2736
2737 if (rs->raid_disks % rs->raid10_copies) {
2738 rs->ti->error = "Can't reshape raid10 mirror groups";
2739 return -EINVAL;
2740 }
2741
2742
2743 __reorder_raid_disk_indexes(rs);
2744 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2745 rs->raid10_copies);
2746 mddev->new_layout = mddev->layout;
2747 reshape = false;
2748 } else
2749 reshape = true;
2750
2751 } else if (rs_is_raid456(rs))
2752 reshape = true;
2753
2754 else if (rs_is_raid1(rs)) {
2755 if (rs->delta_disks) {
2756
2757 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks;
2758 reshape = true;
2759 } else {
2760
2761 mddev->raid_disks = rs->raid_disks;
2762 reshape = false;
2763 }
2764 } else {
2765 rs->ti->error = "Called with bogus raid type";
2766 return -EINVAL;
2767 }
2768
2769 if (reshape) {
2770 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
2771 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2772 } else if (mddev->raid_disks < rs->raid_disks)
2773
2774 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2775
2776 return 0;
2777}
2778
2779
2780
2781
2782
2783
2784
2785
2786static int rs_setup_reshape(struct raid_set *rs)
2787{
2788 int r = 0;
2789 unsigned int cur_raid_devs, d;
2790 struct mddev *mddev = &rs->md;
2791 struct md_rdev *rdev;
2792
2793 mddev->delta_disks = rs->delta_disks;
2794 cur_raid_devs = mddev->raid_disks;
2795
2796
2797 if (mddev->delta_disks &&
2798 mddev->layout != mddev->new_layout) {
2799 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks);
2800 mddev->new_layout = mddev->layout;
2801 }
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826 if (rs->delta_disks > 0) {
2827
2828 for (d = cur_raid_devs; d < rs->raid_disks; d++) {
2829 rdev = &rs->dev[d].rdev;
2830 clear_bit(In_sync, &rdev->flags);
2831
2832
2833
2834
2835
2836 rdev->saved_raid_disk = -1;
2837 rdev->raid_disk = d;
2838
2839 rdev->sectors = mddev->dev_sectors;
2840 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
2841 }
2842
2843 mddev->reshape_backwards = 0;
2844
2845
2846 } else if (rs->delta_disks < 0) {
2847 r = rs_set_dev_and_array_sectors(rs, true);
2848 mddev->reshape_backwards = 1;
2849
2850
2851 } else {
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
2874 }
2875
2876 return r;
2877}
2878
2879
2880
2881
2882
2883static void configure_discard_support(struct raid_set *rs)
2884{
2885 int i;
2886 bool raid456;
2887 struct dm_target *ti = rs->ti;
2888
2889
2890 ti->discards_supported = false;
2891
2892
2893
2894
2895 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
2896
2897 for (i = 0; i < rs->raid_disks; i++) {
2898 struct request_queue *q;
2899
2900 if (!rs->dev[i].rdev.bdev)
2901 continue;
2902
2903 q = bdev_get_queue(rs->dev[i].rdev.bdev);
2904 if (!q || !blk_queue_discard(q))
2905 return;
2906
2907 if (raid456) {
2908 if (!devices_handle_discard_safely) {
2909 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
2910 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
2911 return;
2912 }
2913 }
2914 }
2915
2916
2917 ti->discards_supported = true;
2918
2919
2920
2921
2922
2923 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
2924 ti->num_discard_bios = 1;
2925}
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2941{
2942 int r;
2943 bool resize;
2944 struct raid_type *rt;
2945 unsigned int num_raid_params, num_raid_devs;
2946 sector_t calculated_dev_sectors, rdev_sectors;
2947 struct raid_set *rs = NULL;
2948 const char *arg;
2949 struct rs_layout rs_layout;
2950 struct dm_arg_set as = { argc, argv }, as_nrd;
2951 struct dm_arg _args[] = {
2952 { 0, as.argc, "Cannot understand number of raid parameters" },
2953 { 1, 254, "Cannot understand number of raid devices parameters" }
2954 };
2955
2956
2957 arg = dm_shift_arg(&as);
2958 if (!arg) {
2959 ti->error = "No arguments";
2960 return -EINVAL;
2961 }
2962
2963 rt = get_raid_type(arg);
2964 if (!rt) {
2965 ti->error = "Unrecognised raid_type";
2966 return -EINVAL;
2967 }
2968
2969
2970 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
2971 return -EINVAL;
2972
2973
2974 as_nrd = as;
2975 dm_consume_args(&as_nrd, num_raid_params);
2976 _args[1].max = (as_nrd.argc - 1) / 2;
2977 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
2978 return -EINVAL;
2979
2980 if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) {
2981 ti->error = "Invalid number of supplied raid devices";
2982 return -EINVAL;
2983 }
2984
2985 rs = raid_set_alloc(ti, rt, num_raid_devs);
2986 if (IS_ERR(rs))
2987 return PTR_ERR(rs);
2988
2989 r = parse_raid_params(rs, &as, num_raid_params);
2990 if (r)
2991 goto bad;
2992
2993 r = parse_dev_params(rs, &as);
2994 if (r)
2995 goto bad;
2996
2997 rs->md.sync_super = super_sync;
2998
2999
3000
3001
3002
3003
3004
3005 r = rs_set_dev_and_array_sectors(rs, false);
3006 if (r)
3007 goto bad;
3008
3009 calculated_dev_sectors = rs->md.dev_sectors;
3010
3011
3012
3013
3014
3015
3016 rs_config_backup(rs, &rs_layout);
3017
3018 r = analyse_superblocks(ti, rs);
3019 if (r)
3020 goto bad;
3021
3022 rdev_sectors = __rdev_sectors(rs);
3023 if (!rdev_sectors) {
3024 ti->error = "Invalid rdev size";
3025 r = -EINVAL;
3026 goto bad;
3027 }
3028
3029 resize = calculated_dev_sectors != rdev_sectors;
3030
3031 INIT_WORK(&rs->md.event_work, do_table_event);
3032 ti->private = rs;
3033 ti->num_flush_bios = 1;
3034
3035
3036 rs_config_restore(rs, &rs_layout);
3037
3038
3039
3040
3041
3042
3043
3044 if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) {
3045
3046 if (rs_is_raid6(rs) &&
3047 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
3048 ti->error = "'nosync' not allowed for new raid6 set";
3049 r = -EINVAL;
3050 goto bad;
3051 }
3052 rs_setup_recovery(rs, 0);
3053 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3054 rs_set_new(rs);
3055 } else if (rs_is_recovering(rs)) {
3056
3057 ;
3058 } else if (rs_is_reshaping(rs)) {
3059
3060 if (resize) {
3061 ti->error = "Can't resize a reshaping raid set";
3062 r = -EPERM;
3063 goto bad;
3064 }
3065
3066 } else if (rs_takeover_requested(rs)) {
3067 if (rs_is_reshaping(rs)) {
3068 ti->error = "Can't takeover a reshaping raid set";
3069 r = -EPERM;
3070 goto bad;
3071 }
3072
3073
3074 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3075 ti->error = "Can't takeover a journaled raid4/5/6 set";
3076 r = -EPERM;
3077 goto bad;
3078 }
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088 r = rs_check_takeover(rs);
3089 if (r)
3090 goto bad;
3091
3092 r = rs_setup_takeover(rs);
3093 if (r)
3094 goto bad;
3095
3096 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3097
3098 rs_setup_recovery(rs, MaxSector);
3099 rs_set_new(rs);
3100 } else if (rs_reshape_requested(rs)) {
3101
3102
3103
3104
3105
3106
3107 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3108 ti->error = "Can't reshape a journaled raid4/5/6 set";
3109 r = -EPERM;
3110 goto bad;
3111 }
3112
3113
3114
3115
3116
3117
3118
3119
3120 r = rs_prepare_reshape(rs);
3121 if (r)
3122 return r;
3123
3124
3125 rs_setup_recovery(rs, MaxSector);
3126 rs_set_cur(rs);
3127 } else {
3128
3129 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3130 rs_setup_recovery(rs, MaxSector);
3131 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3132 } else
3133 rs_setup_recovery(rs, test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ?
3134 0 : (resize ? calculated_dev_sectors : MaxSector));
3135 rs_set_cur(rs);
3136 }
3137
3138
3139 r = rs_adjust_data_offsets(rs);
3140 if (r)
3141 goto bad;
3142
3143
3144 rs->md.ro = 1;
3145 rs->md.in_sync = 1;
3146 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
3147
3148
3149 mddev_lock_nointr(&rs->md);
3150 r = md_run(&rs->md);
3151 rs->md.in_sync = 0;
3152
3153 if (r) {
3154 ti->error = "Failed to run raid array";
3155 mddev_unlock(&rs->md);
3156 goto bad;
3157 }
3158
3159 rs->callbacks.congested_fn = raid_is_congested;
3160 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
3161
3162
3163 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
3164 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
3165 if (r) {
3166 ti->error = "Failed to set raid4/5/6 journal mode";
3167 mddev_unlock(&rs->md);
3168 goto bad_journal_mode_set;
3169 }
3170 }
3171
3172 mddev_suspend(&rs->md);
3173 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3174
3175
3176 if (rs_is_raid456(rs)) {
3177 r = rs_set_raid456_stripe_cache(rs);
3178 if (r)
3179 goto bad_stripe_cache;
3180 }
3181
3182
3183 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
3184 r = rs_check_reshape(rs);
3185 if (r)
3186 goto bad_check_reshape;
3187
3188
3189 rs_config_restore(rs, &rs_layout);
3190
3191 if (rs->md.pers->start_reshape) {
3192 r = rs->md.pers->check_reshape(&rs->md);
3193 if (r) {
3194 ti->error = "Reshape check failed";
3195 goto bad_check_reshape;
3196 }
3197 }
3198 }
3199
3200
3201 configure_discard_support(rs);
3202
3203 mddev_unlock(&rs->md);
3204 return 0;
3205
3206bad_journal_mode_set:
3207bad_stripe_cache:
3208bad_check_reshape:
3209 md_stop(&rs->md);
3210bad:
3211 raid_set_free(rs);
3212
3213 return r;
3214}
3215
3216static void raid_dtr(struct dm_target *ti)
3217{
3218 struct raid_set *rs = ti->private;
3219
3220 list_del_init(&rs->callbacks.list);
3221 md_stop(&rs->md);
3222 raid_set_free(rs);
3223}
3224
3225static int raid_map(struct dm_target *ti, struct bio *bio)
3226{
3227 struct raid_set *rs = ti->private;
3228 struct mddev *mddev = &rs->md;
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238 if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
3239 return DM_MAPIO_REQUEUE;
3240
3241 mddev->pers->make_request(mddev, bio);
3242
3243 return DM_MAPIO_SUBMITTED;
3244}
3245
3246
3247static const char *decipher_sync_action(struct mddev *mddev)
3248{
3249 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3250 return "frozen";
3251
3252 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3253 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3254 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3255 return "reshape";
3256
3257 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3258 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3259 return "resync";
3260 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3261 return "check";
3262 return "repair";
3263 }
3264
3265 if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3266 return "recover";
3267 }
3268
3269 return "idle";
3270}
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev, bool array_in_sync)
3283{
3284 if (!rdev->bdev)
3285 return "-";
3286 else if (test_bit(Faulty, &rdev->flags))
3287 return "D";
3288 else if (test_bit(Journal, &rdev->flags))
3289 return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a";
3290 else if (!array_in_sync || !test_bit(In_sync, &rdev->flags))
3291 return "a";
3292 else
3293 return "A";
3294}
3295
3296
3297static sector_t rs_get_progress(struct raid_set *rs,
3298 sector_t resync_max_sectors, bool *array_in_sync)
3299{
3300 sector_t r, recovery_cp, curr_resync_completed;
3301 struct mddev *mddev = &rs->md;
3302
3303 curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
3304 recovery_cp = mddev->recovery_cp;
3305 *array_in_sync = false;
3306
3307 if (rs_is_raid0(rs)) {
3308 r = resync_max_sectors;
3309 *array_in_sync = true;
3310
3311 } else {
3312 r = mddev->reshape_position;
3313
3314
3315 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
3316 r != MaxSector) {
3317 if (r == MaxSector) {
3318 *array_in_sync = true;
3319 r = resync_max_sectors;
3320 } else {
3321
3322 if (mddev->reshape_backwards)
3323 r = mddev->array_sectors - r;
3324
3325
3326 sector_div(r, mddev_data_stripes(rs));
3327 }
3328
3329
3330 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3331 r = curr_resync_completed;
3332 else
3333 r = recovery_cp;
3334
3335 if (r == MaxSector) {
3336
3337
3338
3339 *array_in_sync = true;
3340 r = resync_max_sectors;
3341 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
3342
3343
3344
3345
3346
3347 *array_in_sync = true;
3348 } else {
3349 struct md_rdev *rdev;
3350
3351
3352
3353
3354
3355
3356
3357 rdev_for_each(rdev, mddev)
3358 if (!test_bit(Journal, &rdev->flags) &&
3359 !test_bit(In_sync, &rdev->flags))
3360 *array_in_sync = true;
3361#if 0
3362 r = 0;
3363#endif
3364 }
3365 }
3366
3367 return r;
3368}
3369
3370
3371static const char *__get_dev_name(struct dm_dev *dev)
3372{
3373 return dev ? dev->name : "-";
3374}
3375
3376static void raid_status(struct dm_target *ti, status_type_t type,
3377 unsigned int status_flags, char *result, unsigned int maxlen)
3378{
3379 struct raid_set *rs = ti->private;
3380 struct mddev *mddev = &rs->md;
3381 struct r5conf *conf = mddev->private;
3382 int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
3383 bool array_in_sync;
3384 unsigned int raid_param_cnt = 1;
3385 unsigned int sz = 0;
3386 unsigned int rebuild_disks;
3387 unsigned int write_mostly_params = 0;
3388 sector_t progress, resync_max_sectors, resync_mismatches;
3389 const char *sync_action;
3390 struct raid_type *rt;
3391
3392 switch (type) {
3393 case STATUSTYPE_INFO:
3394
3395 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
3396 if (!rt)
3397 return;
3398
3399 DMEMIT("%s %d ", rt->name, mddev->raid_disks);
3400
3401
3402 smp_rmb();
3403
3404 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
3405 mddev->resync_max_sectors : mddev->dev_sectors;
3406 progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync);
3407 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
3408 atomic64_read(&mddev->resync_mismatches) : 0;
3409 sync_action = decipher_sync_action(&rs->md);
3410
3411
3412 for (i = 0; i < rs->raid_disks; i++)
3413 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev, array_in_sync));
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427 DMEMIT(" %llu/%llu", (unsigned long long) progress,
3428 (unsigned long long) resync_max_sectors);
3429
3430
3431
3432
3433
3434
3435
3436
3437 DMEMIT(" %s", sync_action);
3438
3439
3440
3441
3442
3443
3444
3445
3446 DMEMIT(" %llu", (unsigned long long) resync_mismatches);
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
3459
3460
3461
3462
3463 DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ?
3464 __raid_dev_status(rs, &rs->journal_dev.rdev, 0) : "-");
3465 break;
3466
3467 case STATUSTYPE_TABLE:
3468
3469
3470
3471 for (i = 0; i < rs->raid_disks; i++)
3472 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3473 write_mostly_params += 2;
3474 rebuild_disks = memweight(rs->rebuild_disks, DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks));
3475 raid_param_cnt += rebuild_disks * 2 +
3476 write_mostly_params +
3477 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
3478 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2 +
3479 (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ? 2 : 0) +
3480 (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags) ? 2 : 0);
3481
3482
3483
3484 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
3485 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
3486 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
3487 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
3488 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
3489 if (rebuild_disks)
3490 for (i = 0; i < rs->raid_disks; i++)
3491 if (test_bit(rs->dev[i].rdev.raid_disk, (void *) rs->rebuild_disks))
3492 DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD),
3493 rs->dev[i].rdev.raid_disk);
3494 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
3495 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
3496 mddev->bitmap_info.daemon_sleep);
3497 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
3498 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE),
3499 mddev->sync_speed_min);
3500 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
3501 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
3502 mddev->sync_speed_max);
3503 if (write_mostly_params)
3504 for (i = 0; i < rs->raid_disks; i++)
3505 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3506 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
3507 rs->dev[i].rdev.raid_disk);
3508 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
3509 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND),
3510 mddev->bitmap_info.max_write_behind);
3511 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
3512 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE),
3513 max_nr_stripes);
3514 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
3515 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE),
3516 (unsigned long long) to_sector(mddev->bitmap_info.chunksize));
3517 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
3518 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES),
3519 raid10_md_layout_to_copies(mddev->layout));
3520 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
3521 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT),
3522 raid10_md_layout_to_format(mddev->layout));
3523 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags))
3524 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS),
3525 max(rs->delta_disks, mddev->delta_disks));
3526 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags))
3527 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET),
3528 (unsigned long long) rs->data_offset);
3529 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags))
3530 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV),
3531 __get_dev_name(rs->journal_dev.dev));
3532 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags))
3533 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE),
3534 md_journal_mode_to_dm_raid(rs->journal_dev.mode));
3535 DMEMIT(" %d", rs->raid_disks);
3536 for (i = 0; i < rs->raid_disks; i++)
3537 DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev),
3538 __get_dev_name(rs->dev[i].data_dev));
3539 }
3540}
3541
3542static int raid_message(struct dm_target *ti, unsigned int argc, char **argv)
3543{
3544 struct raid_set *rs = ti->private;
3545 struct mddev *mddev = &rs->md;
3546
3547 if (!mddev->pers || !mddev->pers->sync_request)
3548 return -EINVAL;
3549
3550 if (!strcasecmp(argv[0], "frozen"))
3551 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3552 else
3553 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3554
3555 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
3556 if (mddev->sync_thread) {
3557 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3558 md_reap_sync_thread(mddev);
3559 }
3560 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3561 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3562 return -EBUSY;
3563 else if (!strcasecmp(argv[0], "resync"))
3564 ;
3565 else if (!strcasecmp(argv[0], "recover"))
3566 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3567 else {
3568 if (!strcasecmp(argv[0], "check")) {
3569 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3570 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3571 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3572 } else if (!strcasecmp(argv[0], "repair")) {
3573 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3574 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3575 } else
3576 return -EINVAL;
3577 }
3578 if (mddev->ro == 2) {
3579
3580
3581
3582 mddev->ro = 0;
3583 if (!mddev->suspended && mddev->sync_thread)
3584 md_wakeup_thread(mddev->sync_thread);
3585 }
3586 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3587 if (!mddev->suspended && mddev->thread)
3588 md_wakeup_thread(mddev->thread);
3589
3590 return 0;
3591}
3592
3593static int raid_iterate_devices(struct dm_target *ti,
3594 iterate_devices_callout_fn fn, void *data)
3595{
3596 struct raid_set *rs = ti->private;
3597 unsigned int i;
3598 int r = 0;
3599
3600 for (i = 0; !r && i < rs->md.raid_disks; i++)
3601 if (rs->dev[i].data_dev)
3602 r = fn(ti,
3603 rs->dev[i].data_dev,
3604 0,
3605 rs->md.dev_sectors,
3606 data);
3607
3608 return r;
3609}
3610
3611static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
3612{
3613 struct raid_set *rs = ti->private;
3614 unsigned int chunk_size = to_bytes(rs->md.chunk_sectors);
3615
3616 blk_limits_io_min(limits, chunk_size);
3617 blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
3618}
3619
3620static void raid_presuspend(struct dm_target *ti)
3621{
3622 struct raid_set *rs = ti->private;
3623
3624 md_stop_writes(&rs->md);
3625}
3626
3627static void raid_postsuspend(struct dm_target *ti)
3628{
3629 struct raid_set *rs = ti->private;
3630
3631 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3632 mddev_suspend(&rs->md);
3633
3634 rs->md.ro = 1;
3635}
3636
3637static void attempt_restore_of_faulty_devices(struct raid_set *rs)
3638{
3639 int i;
3640 uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
3641 unsigned long flags;
3642 bool cleared = false;
3643 struct dm_raid_superblock *sb;
3644 struct mddev *mddev = &rs->md;
3645 struct md_rdev *r;
3646
3647
3648 if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
3649 return;
3650
3651 memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
3652
3653 for (i = 0; i < mddev->raid_disks; i++) {
3654 r = &rs->dev[i].rdev;
3655
3656 if (test_bit(Journal, &r->flags))
3657 continue;
3658
3659 if (test_bit(Faulty, &r->flags) &&
3660 r->meta_bdev && !read_disk_sb(r, r->sb_size, true)) {
3661 DMINFO("Faulty %s device #%d has readable super block."
3662 " Attempting to revive it.",
3663 rs->raid_type->name, i);
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674 flags = r->flags;
3675 clear_bit(In_sync, &r->flags);
3676 if (r->raid_disk >= 0) {
3677 if (mddev->pers->hot_remove_disk(mddev, r)) {
3678
3679 r->flags = flags;
3680 continue;
3681 }
3682 } else
3683 r->raid_disk = r->saved_raid_disk = i;
3684
3685 clear_bit(Faulty, &r->flags);
3686 clear_bit(WriteErrorSeen, &r->flags);
3687
3688 if (mddev->pers->hot_add_disk(mddev, r)) {
3689
3690 r->raid_disk = r->saved_raid_disk = -1;
3691 r->flags = flags;
3692 } else {
3693 clear_bit(In_sync, &r->flags);
3694 r->recovery_offset = 0;
3695 set_bit(i, (void *) cleared_failed_devices);
3696 cleared = true;
3697 }
3698 }
3699 }
3700
3701
3702 if (cleared) {
3703 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
3704
3705 rdev_for_each(r, &rs->md) {
3706 if (test_bit(Journal, &r->flags))
3707 continue;
3708
3709 sb = page_address(r->sb_page);
3710 sb_retrieve_failed_devices(sb, failed_devices);
3711
3712 for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
3713 failed_devices[i] &= ~cleared_failed_devices[i];
3714
3715 sb_update_failed_devices(sb, failed_devices);
3716 }
3717 }
3718}
3719
3720static int __load_dirty_region_bitmap(struct raid_set *rs)
3721{
3722 int r = 0;
3723
3724
3725 if (!rs_is_raid0(rs) &&
3726 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
3727 r = bitmap_load(&rs->md);
3728 if (r)
3729 DMERR("Failed to load bitmap");
3730 }
3731
3732 return r;
3733}
3734
3735
3736static void rs_update_sbs(struct raid_set *rs)
3737{
3738 struct mddev *mddev = &rs->md;
3739 int ro = mddev->ro;
3740
3741 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3742 mddev->ro = 0;
3743 md_update_sb(mddev, 1);
3744 mddev->ro = ro;
3745}
3746
3747
3748
3749
3750
3751
3752
3753
3754static int rs_start_reshape(struct raid_set *rs)
3755{
3756 int r;
3757 struct mddev *mddev = &rs->md;
3758 struct md_personality *pers = mddev->pers;
3759
3760 r = rs_setup_reshape(rs);
3761 if (r)
3762 return r;
3763
3764
3765 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3766 mddev_resume(mddev);
3767
3768
3769
3770
3771
3772
3773 r = pers->check_reshape(mddev);
3774 if (r) {
3775 rs->ti->error = "pers->check_reshape() failed";
3776 return r;
3777 }
3778
3779
3780
3781
3782
3783 if (pers->start_reshape) {
3784 r = pers->start_reshape(mddev);
3785 if (r) {
3786 rs->ti->error = "pers->start_reshape() failed";
3787 return r;
3788 }
3789 }
3790
3791
3792 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3793 mddev_suspend(mddev);
3794
3795
3796
3797
3798
3799
3800 rs_update_sbs(rs);
3801
3802 return 0;
3803}
3804
3805static int raid_preresume(struct dm_target *ti)
3806{
3807 int r;
3808 struct raid_set *rs = ti->private;
3809 struct mddev *mddev = &rs->md;
3810
3811
3812 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
3813 return 0;
3814
3815
3816
3817
3818
3819
3820
3821 if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
3822 rs_update_sbs(rs);
3823
3824
3825 r = __load_dirty_region_bitmap(rs);
3826 if (r)
3827 return r;
3828
3829
3830 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
3831 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
3832 r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
3833 to_bytes(rs->requested_bitmap_chunk_sectors), 0);
3834 if (r)
3835 DMERR("Failed to resize bitmap");
3836 }
3837
3838
3839
3840 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3841 if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
3842 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3843 mddev->resync_min = mddev->recovery_cp;
3844 }
3845
3846 rs_set_capacity(rs);
3847
3848
3849 if (test_and_clear_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
3850
3851 mddev_lock_nointr(mddev);
3852 r = rs_start_reshape(rs);
3853 mddev_unlock(mddev);
3854 if (r)
3855 DMWARN("Failed to check/start reshape, continuing without change");
3856 r = 0;
3857 }
3858
3859 return r;
3860}
3861
3862static void raid_resume(struct dm_target *ti)
3863{
3864 struct raid_set *rs = ti->private;
3865 struct mddev *mddev = &rs->md;
3866
3867 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
3868
3869
3870
3871
3872
3873 attempt_restore_of_faulty_devices(rs);
3874 }
3875
3876 mddev->ro = 0;
3877 mddev->in_sync = 0;
3878
3879
3880
3881
3882
3883
3884
3885
3886 if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
3887 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3888
3889 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3890 mddev_resume(mddev);
3891}
3892
3893static struct target_type raid_target = {
3894 .name = "raid",
3895 .version = {1, 12, 1},
3896 .module = THIS_MODULE,
3897 .ctr = raid_ctr,
3898 .dtr = raid_dtr,
3899 .map = raid_map,
3900 .status = raid_status,
3901 .message = raid_message,
3902 .iterate_devices = raid_iterate_devices,
3903 .io_hints = raid_io_hints,
3904 .presuspend = raid_presuspend,
3905 .postsuspend = raid_postsuspend,
3906 .preresume = raid_preresume,
3907 .resume = raid_resume,
3908};
3909
3910static int __init dm_raid_init(void)
3911{
3912 DMINFO("Loading target version %u.%u.%u",
3913 raid_target.version[0],
3914 raid_target.version[1],
3915 raid_target.version[2]);
3916 return dm_register_target(&raid_target);
3917}
3918
3919static void __exit dm_raid_exit(void)
3920{
3921 dm_unregister_target(&raid_target);
3922}
3923
3924module_init(dm_raid_init);
3925module_exit(dm_raid_exit);
3926
3927module_param(devices_handle_discard_safely, bool, 0644);
3928MODULE_PARM_DESC(devices_handle_discard_safely,
3929 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
3930
3931MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target");
3932MODULE_ALIAS("dm-raid0");
3933MODULE_ALIAS("dm-raid1");
3934MODULE_ALIAS("dm-raid10");
3935MODULE_ALIAS("dm-raid4");
3936MODULE_ALIAS("dm-raid5");
3937MODULE_ALIAS("dm-raid6");
3938MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
3939MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
3940MODULE_LICENSE("GPL");
3941