1
2
3
4
5
6
7
8#include <linux/slab.h>
9#include <linux/module.h>
10
11#include "md.h"
12#include "raid1.h"
13#include "raid5.h"
14#include "raid10.h"
15#include "md-bitmap.h"
16
17#include <linux/device-mapper.h>
18
19#define DM_MSG_PREFIX "raid"
20#define MAX_RAID_DEVICES 253
21
22
23
24
25#define MIN_FREE_RESHAPE_SPACE to_sector(4*4096)
26
27
28
29
30#define MIN_RAID456_JOURNAL_SPACE (4*2048)
31
32static bool devices_handle_discard_safely = false;
33
34
35
36
37
38#define FirstUse 10
39
40struct raid_dev {
41
42
43
44
45
46
47
48
49
50
51
52
53 struct dm_dev *meta_dev;
54 struct dm_dev *data_dev;
55 struct md_rdev rdev;
56};
57
58
59
60
61
62
63
64#define __CTR_FLAG_SYNC 0
65#define __CTR_FLAG_NOSYNC 1
66#define __CTR_FLAG_REBUILD 2
67#define __CTR_FLAG_DAEMON_SLEEP 3
68#define __CTR_FLAG_MIN_RECOVERY_RATE 4
69#define __CTR_FLAG_MAX_RECOVERY_RATE 5
70#define __CTR_FLAG_MAX_WRITE_BEHIND 6
71#define __CTR_FLAG_WRITE_MOSTLY 7
72#define __CTR_FLAG_STRIPE_CACHE 8
73#define __CTR_FLAG_REGION_SIZE 9
74#define __CTR_FLAG_RAID10_COPIES 10
75#define __CTR_FLAG_RAID10_FORMAT 11
76
77#define __CTR_FLAG_DELTA_DISKS 12
78#define __CTR_FLAG_DATA_OFFSET 13
79#define __CTR_FLAG_RAID10_USE_NEAR_SETS 14
80
81
82#define __CTR_FLAG_JOURNAL_DEV 15
83
84
85#define __CTR_FLAG_JOURNAL_MODE 16
86
87
88
89
90#define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC)
91#define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC)
92#define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD)
93#define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP)
94#define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE)
95#define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE)
96#define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND)
97#define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY)
98#define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE)
99#define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE)
100#define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES)
101#define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT)
102#define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
103#define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
104#define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
105#define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV)
106#define CTR_FLAG_JOURNAL_MODE (1 << __CTR_FLAG_JOURNAL_MODE)
107
108
109
110
111
112
113
114#define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
115
116
117#define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \
118 CTR_FLAG_RAID10_USE_NEAR_SETS)
119
120
121#define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
122 CTR_FLAG_WRITE_MOSTLY | \
123 CTR_FLAG_DAEMON_SLEEP | \
124 CTR_FLAG_MIN_RECOVERY_RATE | \
125 CTR_FLAG_MAX_RECOVERY_RATE | \
126 CTR_FLAG_MAX_WRITE_BEHIND | \
127 CTR_FLAG_STRIPE_CACHE | \
128 CTR_FLAG_REGION_SIZE | \
129 CTR_FLAG_RAID10_COPIES | \
130 CTR_FLAG_RAID10_FORMAT | \
131 CTR_FLAG_DELTA_DISKS | \
132 CTR_FLAG_DATA_OFFSET)
133
134
135
136
137#define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET)
138
139
140#define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
141 CTR_FLAG_REBUILD | \
142 CTR_FLAG_WRITE_MOSTLY | \
143 CTR_FLAG_DAEMON_SLEEP | \
144 CTR_FLAG_MIN_RECOVERY_RATE | \
145 CTR_FLAG_MAX_RECOVERY_RATE | \
146 CTR_FLAG_MAX_WRITE_BEHIND | \
147 CTR_FLAG_REGION_SIZE | \
148 CTR_FLAG_DELTA_DISKS | \
149 CTR_FLAG_DATA_OFFSET)
150
151
152#define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
153 CTR_FLAG_REBUILD | \
154 CTR_FLAG_DAEMON_SLEEP | \
155 CTR_FLAG_MIN_RECOVERY_RATE | \
156 CTR_FLAG_MAX_RECOVERY_RATE | \
157 CTR_FLAG_REGION_SIZE | \
158 CTR_FLAG_RAID10_COPIES | \
159 CTR_FLAG_RAID10_FORMAT | \
160 CTR_FLAG_DELTA_DISKS | \
161 CTR_FLAG_DATA_OFFSET | \
162 CTR_FLAG_RAID10_USE_NEAR_SETS)
163
164
165
166
167
168
169
170
171#define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
172 CTR_FLAG_REBUILD | \
173 CTR_FLAG_DAEMON_SLEEP | \
174 CTR_FLAG_MIN_RECOVERY_RATE | \
175 CTR_FLAG_MAX_RECOVERY_RATE | \
176 CTR_FLAG_STRIPE_CACHE | \
177 CTR_FLAG_REGION_SIZE | \
178 CTR_FLAG_DELTA_DISKS | \
179 CTR_FLAG_DATA_OFFSET | \
180 CTR_FLAG_JOURNAL_DEV | \
181 CTR_FLAG_JOURNAL_MODE)
182
183#define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \
184 CTR_FLAG_REBUILD | \
185 CTR_FLAG_DAEMON_SLEEP | \
186 CTR_FLAG_MIN_RECOVERY_RATE | \
187 CTR_FLAG_MAX_RECOVERY_RATE | \
188 CTR_FLAG_STRIPE_CACHE | \
189 CTR_FLAG_REGION_SIZE | \
190 CTR_FLAG_DELTA_DISKS | \
191 CTR_FLAG_DATA_OFFSET | \
192 CTR_FLAG_JOURNAL_DEV | \
193 CTR_FLAG_JOURNAL_MODE)
194
195
196
197
198
199
200
201
202
203
204#define RT_FLAG_RS_PRERESUMED 0
205#define RT_FLAG_RS_RESUMED 1
206#define RT_FLAG_RS_BITMAP_LOADED 2
207#define RT_FLAG_UPDATE_SBS 3
208#define RT_FLAG_RESHAPE_RS 4
209#define RT_FLAG_RS_SUSPENDED 5
210#define RT_FLAG_RS_IN_SYNC 6
211#define RT_FLAG_RS_RESYNCING 7
212
213
214#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
215
216
217
218
219struct rs_layout {
220 int new_level;
221 int new_layout;
222 int new_chunk_sectors;
223};
224
225struct raid_set {
226 struct dm_target *ti;
227
228 uint32_t stripe_cache_entries;
229 unsigned long ctr_flags;
230 unsigned long runtime_flags;
231
232 uint64_t rebuild_disks[DISKS_ARRAY_ELEMS];
233
234 int raid_disks;
235 int delta_disks;
236 int data_offset;
237 int raid10_copies;
238 int requested_bitmap_chunk_sectors;
239
240 struct mddev md;
241 struct raid_type *raid_type;
242 struct dm_target_callbacks callbacks;
243
244
245 struct journal_dev {
246 struct dm_dev *dev;
247 struct md_rdev rdev;
248 int mode;
249 } journal_dev;
250
251 struct raid_dev dev[0];
252};
253
254static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
255{
256 struct mddev *mddev = &rs->md;
257
258 l->new_level = mddev->new_level;
259 l->new_layout = mddev->new_layout;
260 l->new_chunk_sectors = mddev->new_chunk_sectors;
261}
262
263static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
264{
265 struct mddev *mddev = &rs->md;
266
267 mddev->new_level = l->new_level;
268 mddev->new_layout = l->new_layout;
269 mddev->new_chunk_sectors = l->new_chunk_sectors;
270}
271
272
273#define ALGORITHM_RAID10_DEFAULT 0
274#define ALGORITHM_RAID10_NEAR 1
275#define ALGORITHM_RAID10_OFFSET 2
276#define ALGORITHM_RAID10_FAR 3
277
278
279static struct raid_type {
280 const char *name;
281 const char *descr;
282 const unsigned int parity_devs;
283 const unsigned int minimal_devs;
284 const unsigned int level;
285 const unsigned int algorithm;
286} raid_types[] = {
287 {"raid0", "raid0 (striping)", 0, 2, 0, 0 },
288 {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 },
289 {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR},
290 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
291 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
292 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
293 {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
294 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
295 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
296 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
297 {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
298 {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
299 {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
300 {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
301 {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE},
302 {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6},
303 {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6},
304 {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6},
305 {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6},
306 {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6}
307};
308
309
310static bool __within_range(long v, long min, long max)
311{
312 return v >= min && v <= max;
313}
314
315
316static struct arg_name_flag {
317 const unsigned long flag;
318 const char *name;
319} __arg_name_flags[] = {
320 { CTR_FLAG_SYNC, "sync"},
321 { CTR_FLAG_NOSYNC, "nosync"},
322 { CTR_FLAG_REBUILD, "rebuild"},
323 { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
324 { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
325 { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
326 { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
327 { CTR_FLAG_WRITE_MOSTLY, "write_mostly"},
328 { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
329 { CTR_FLAG_REGION_SIZE, "region_size"},
330 { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
331 { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
332 { CTR_FLAG_DATA_OFFSET, "data_offset"},
333 { CTR_FLAG_DELTA_DISKS, "delta_disks"},
334 { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"},
335 { CTR_FLAG_JOURNAL_DEV, "journal_dev" },
336 { CTR_FLAG_JOURNAL_MODE, "journal_mode" },
337};
338
339
340static const char *dm_raid_arg_name_by_flag(const uint32_t flag)
341{
342 if (hweight32(flag) == 1) {
343 struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags);
344
345 while (anf-- > __arg_name_flags)
346 if (flag & anf->flag)
347 return anf->name;
348
349 } else
350 DMERR("%s called with more than one flag!", __func__);
351
352 return NULL;
353}
354
355
356static struct {
357 const int mode;
358 const char *param;
359} _raid456_journal_mode[] = {
360 { R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" },
361 { R5C_JOURNAL_MODE_WRITE_BACK , "writeback" }
362};
363
364
365static int dm_raid_journal_mode_to_md(const char *mode)
366{
367 int m = ARRAY_SIZE(_raid456_journal_mode);
368
369 while (m--)
370 if (!strcasecmp(mode, _raid456_journal_mode[m].param))
371 return _raid456_journal_mode[m].mode;
372
373 return -EINVAL;
374}
375
376
377static const char *md_journal_mode_to_dm_raid(const int mode)
378{
379 int m = ARRAY_SIZE(_raid456_journal_mode);
380
381 while (m--)
382 if (mode == _raid456_journal_mode[m].mode)
383 return _raid456_journal_mode[m].param;
384
385 return "unknown";
386}
387
388
389
390
391
392
393
394static bool rs_is_raid0(struct raid_set *rs)
395{
396 return !rs->md.level;
397}
398
399
400static bool rs_is_raid1(struct raid_set *rs)
401{
402 return rs->md.level == 1;
403}
404
405
406static bool rs_is_raid10(struct raid_set *rs)
407{
408 return rs->md.level == 10;
409}
410
411
412static bool rs_is_raid6(struct raid_set *rs)
413{
414 return rs->md.level == 6;
415}
416
417
418static bool rs_is_raid456(struct raid_set *rs)
419{
420 return __within_range(rs->md.level, 4, 6);
421}
422
423
424static bool __is_raid10_far(int layout);
425static bool rs_is_reshapable(struct raid_set *rs)
426{
427 return rs_is_raid456(rs) ||
428 (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout));
429}
430
431
432static bool rs_is_recovering(struct raid_set *rs)
433{
434 return rs->md.recovery_cp < rs->md.dev_sectors;
435}
436
437
438static bool rs_is_reshaping(struct raid_set *rs)
439{
440 return rs->md.reshape_position != MaxSector;
441}
442
443
444
445
446
447
448static bool rt_is_raid0(struct raid_type *rt)
449{
450 return !rt->level;
451}
452
453
454static bool rt_is_raid1(struct raid_type *rt)
455{
456 return rt->level == 1;
457}
458
459
460static bool rt_is_raid10(struct raid_type *rt)
461{
462 return rt->level == 10;
463}
464
465
466static bool rt_is_raid45(struct raid_type *rt)
467{
468 return __within_range(rt->level, 4, 5);
469}
470
471
472static bool rt_is_raid6(struct raid_type *rt)
473{
474 return rt->level == 6;
475}
476
477
478static bool rt_is_raid456(struct raid_type *rt)
479{
480 return __within_range(rt->level, 4, 6);
481}
482
483
484
485static unsigned long __valid_flags(struct raid_set *rs)
486{
487 if (rt_is_raid0(rs->raid_type))
488 return RAID0_VALID_FLAGS;
489 else if (rt_is_raid1(rs->raid_type))
490 return RAID1_VALID_FLAGS;
491 else if (rt_is_raid10(rs->raid_type))
492 return RAID10_VALID_FLAGS;
493 else if (rt_is_raid45(rs->raid_type))
494 return RAID45_VALID_FLAGS;
495 else if (rt_is_raid6(rs->raid_type))
496 return RAID6_VALID_FLAGS;
497
498 return 0;
499}
500
501
502
503
504
505
506static int rs_check_for_valid_flags(struct raid_set *rs)
507{
508 if (rs->ctr_flags & ~__valid_flags(rs)) {
509 rs->ti->error = "Invalid flags combination";
510 return -EINVAL;
511 }
512
513 return 0;
514}
515
516
517#define RAID10_OFFSET (1 << 16)
518#define RAID10_BROCKEN_USE_FAR_SETS (1 << 17)
519#define RAID10_USE_FAR_SETS (1 << 18)
520#define RAID10_FAR_COPIES_SHIFT 8
521
522
523static unsigned int __raid10_near_copies(int layout)
524{
525 return layout & 0xFF;
526}
527
528
529static unsigned int __raid10_far_copies(int layout)
530{
531 return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT);
532}
533
534
535static bool __is_raid10_offset(int layout)
536{
537 return !!(layout & RAID10_OFFSET);
538}
539
540
541static bool __is_raid10_near(int layout)
542{
543 return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1;
544}
545
546
547static bool __is_raid10_far(int layout)
548{
549 return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1;
550}
551
552
553static const char *raid10_md_layout_to_format(int layout)
554{
555
556
557
558
559
560
561 if (__is_raid10_offset(layout))
562 return "offset";
563
564 if (__raid10_near_copies(layout) > 1)
565 return "near";
566
567 if (__raid10_far_copies(layout) > 1)
568 return "far";
569
570 return "unknown";
571}
572
573
574static int raid10_name_to_format(const char *name)
575{
576 if (!strcasecmp(name, "near"))
577 return ALGORITHM_RAID10_NEAR;
578 else if (!strcasecmp(name, "offset"))
579 return ALGORITHM_RAID10_OFFSET;
580 else if (!strcasecmp(name, "far"))
581 return ALGORITHM_RAID10_FAR;
582
583 return -EINVAL;
584}
585
586
587static unsigned int raid10_md_layout_to_copies(int layout)
588{
589 return max(__raid10_near_copies(layout), __raid10_far_copies(layout));
590}
591
592
593static int raid10_format_to_md_layout(struct raid_set *rs,
594 unsigned int algorithm,
595 unsigned int copies)
596{
597 unsigned int n = 1, f = 1, r = 0;
598
599
600
601
602
603
604
605
606
607 if (algorithm == ALGORITHM_RAID10_DEFAULT ||
608 algorithm == ALGORITHM_RAID10_NEAR)
609 n = copies;
610
611 else if (algorithm == ALGORITHM_RAID10_OFFSET) {
612 f = copies;
613 r = RAID10_OFFSET;
614 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
615 r |= RAID10_USE_FAR_SETS;
616
617 } else if (algorithm == ALGORITHM_RAID10_FAR) {
618 f = copies;
619 r = !RAID10_OFFSET;
620 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
621 r |= RAID10_USE_FAR_SETS;
622
623 } else
624 return -EINVAL;
625
626 return r | (f << RAID10_FAR_COPIES_SHIFT) | n;
627}
628
629
630
631static bool __got_raid10(struct raid_type *rtp, const int layout)
632{
633 if (rtp->level == 10) {
634 switch (rtp->algorithm) {
635 case ALGORITHM_RAID10_DEFAULT:
636 case ALGORITHM_RAID10_NEAR:
637 return __is_raid10_near(layout);
638 case ALGORITHM_RAID10_OFFSET:
639 return __is_raid10_offset(layout);
640 case ALGORITHM_RAID10_FAR:
641 return __is_raid10_far(layout);
642 default:
643 break;
644 }
645 }
646
647 return false;
648}
649
650
651static struct raid_type *get_raid_type(const char *name)
652{
653 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
654
655 while (rtp-- > raid_types)
656 if (!strcasecmp(rtp->name, name))
657 return rtp;
658
659 return NULL;
660}
661
662
663static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
664{
665 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
666
667 while (rtp-- > raid_types) {
668
669 if (rtp->level == level &&
670 (__got_raid10(rtp, layout) || rtp->algorithm == layout))
671 return rtp;
672 }
673
674 return NULL;
675}
676
677
678static void rs_set_rdev_sectors(struct raid_set *rs)
679{
680 struct mddev *mddev = &rs->md;
681 struct md_rdev *rdev;
682
683
684
685
686
687 rdev_for_each(rdev, mddev)
688 if (!test_bit(Journal, &rdev->flags))
689 rdev->sectors = mddev->dev_sectors;
690}
691
692
693
694
695static void rs_set_capacity(struct raid_set *rs)
696{
697 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
698
699 set_capacity(gendisk, rs->md.array_sectors);
700 revalidate_disk(gendisk);
701}
702
703
704
705
706
707static void rs_set_cur(struct raid_set *rs)
708{
709 struct mddev *mddev = &rs->md;
710
711 mddev->new_level = mddev->level;
712 mddev->new_layout = mddev->layout;
713 mddev->new_chunk_sectors = mddev->chunk_sectors;
714}
715
716
717
718
719
720static void rs_set_new(struct raid_set *rs)
721{
722 struct mddev *mddev = &rs->md;
723
724 mddev->level = mddev->new_level;
725 mddev->layout = mddev->new_layout;
726 mddev->chunk_sectors = mddev->new_chunk_sectors;
727 mddev->raid_disks = rs->raid_disks;
728 mddev->delta_disks = 0;
729}
730
731static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type,
732 unsigned int raid_devs)
733{
734 unsigned int i;
735 struct raid_set *rs;
736
737 if (raid_devs <= raid_type->parity_devs) {
738 ti->error = "Insufficient number of devices";
739 return ERR_PTR(-EINVAL);
740 }
741
742 rs = kzalloc(struct_size(rs, dev, raid_devs), GFP_KERNEL);
743 if (!rs) {
744 ti->error = "Cannot allocate raid context";
745 return ERR_PTR(-ENOMEM);
746 }
747
748 mddev_init(&rs->md);
749
750 rs->raid_disks = raid_devs;
751 rs->delta_disks = 0;
752
753 rs->ti = ti;
754 rs->raid_type = raid_type;
755 rs->stripe_cache_entries = 256;
756 rs->md.raid_disks = raid_devs;
757 rs->md.level = raid_type->level;
758 rs->md.new_level = rs->md.level;
759 rs->md.layout = raid_type->algorithm;
760 rs->md.new_layout = rs->md.layout;
761 rs->md.delta_disks = 0;
762 rs->md.recovery_cp = MaxSector;
763
764 for (i = 0; i < raid_devs; i++)
765 md_rdev_init(&rs->dev[i].rdev);
766
767
768
769
770
771
772
773
774
775
776 return rs;
777}
778
779
780static void raid_set_free(struct raid_set *rs)
781{
782 int i;
783
784 if (rs->journal_dev.dev) {
785 md_rdev_clear(&rs->journal_dev.rdev);
786 dm_put_device(rs->ti, rs->journal_dev.dev);
787 }
788
789 for (i = 0; i < rs->raid_disks; i++) {
790 if (rs->dev[i].meta_dev)
791 dm_put_device(rs->ti, rs->dev[i].meta_dev);
792 md_rdev_clear(&rs->dev[i].rdev);
793 if (rs->dev[i].data_dev)
794 dm_put_device(rs->ti, rs->dev[i].data_dev);
795 }
796
797 kfree(rs);
798}
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
817{
818 int i;
819 int rebuild = 0;
820 int metadata_available = 0;
821 int r = 0;
822 const char *arg;
823
824
825 arg = dm_shift_arg(as);
826 if (!arg)
827 return -EINVAL;
828
829 for (i = 0; i < rs->raid_disks; i++) {
830 rs->dev[i].rdev.raid_disk = i;
831
832 rs->dev[i].meta_dev = NULL;
833 rs->dev[i].data_dev = NULL;
834
835
836
837
838
839 rs->dev[i].rdev.data_offset = 0;
840 rs->dev[i].rdev.new_data_offset = 0;
841 rs->dev[i].rdev.mddev = &rs->md;
842
843 arg = dm_shift_arg(as);
844 if (!arg)
845 return -EINVAL;
846
847 if (strcmp(arg, "-")) {
848 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
849 &rs->dev[i].meta_dev);
850 if (r) {
851 rs->ti->error = "RAID metadata device lookup failure";
852 return r;
853 }
854
855 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
856 if (!rs->dev[i].rdev.sb_page) {
857 rs->ti->error = "Failed to allocate superblock page";
858 return -ENOMEM;
859 }
860 }
861
862 arg = dm_shift_arg(as);
863 if (!arg)
864 return -EINVAL;
865
866 if (!strcmp(arg, "-")) {
867 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
868 (!rs->dev[i].rdev.recovery_offset)) {
869 rs->ti->error = "Drive designated for rebuild not specified";
870 return -EINVAL;
871 }
872
873 if (rs->dev[i].meta_dev) {
874 rs->ti->error = "No data device supplied with metadata device";
875 return -EINVAL;
876 }
877
878 continue;
879 }
880
881 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
882 &rs->dev[i].data_dev);
883 if (r) {
884 rs->ti->error = "RAID device lookup failure";
885 return r;
886 }
887
888 if (rs->dev[i].meta_dev) {
889 metadata_available = 1;
890 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
891 }
892 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
893 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
894 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
895 rebuild++;
896 }
897
898 if (rs->journal_dev.dev)
899 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
900
901 if (metadata_available) {
902 rs->md.external = 0;
903 rs->md.persistent = 1;
904 rs->md.major_version = 2;
905 } else if (rebuild && !rs->md.recovery_cp) {
906
907
908
909
910
911
912
913
914
915
916
917 rs->ti->error = "Unable to rebuild drive while array is not in-sync";
918 return -EINVAL;
919 }
920
921 return 0;
922}
923
924
925
926
927
928
929
930
931
932
933
934static int validate_region_size(struct raid_set *rs, unsigned long region_size)
935{
936 unsigned long min_region_size = rs->ti->len / (1 << 21);
937
938 if (rs_is_raid0(rs))
939 return 0;
940
941 if (!region_size) {
942
943
944
945 if (min_region_size > (1 << 13)) {
946
947 region_size = roundup_pow_of_two(min_region_size);
948 DMINFO("Choosing default region size of %lu sectors",
949 region_size);
950 } else {
951 DMINFO("Choosing default region size of 4MiB");
952 region_size = 1 << 13;
953 }
954 } else {
955
956
957
958 if (region_size > rs->ti->len) {
959 rs->ti->error = "Supplied region size is too large";
960 return -EINVAL;
961 }
962
963 if (region_size < min_region_size) {
964 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
965 region_size, min_region_size);
966 rs->ti->error = "Supplied region size is too small";
967 return -EINVAL;
968 }
969
970 if (!is_power_of_2(region_size)) {
971 rs->ti->error = "Region size is not a power of 2";
972 return -EINVAL;
973 }
974
975 if (region_size < rs->md.chunk_sectors) {
976 rs->ti->error = "Region size is smaller than the chunk size";
977 return -EINVAL;
978 }
979 }
980
981
982
983
984 rs->md.bitmap_info.chunksize = to_bytes(region_size);
985
986 return 0;
987}
988
989
990
991
992
993
994
995
996
997
998static int validate_raid_redundancy(struct raid_set *rs)
999{
1000 unsigned int i, rebuild_cnt = 0;
1001 unsigned int rebuilds_per_group = 0, copies;
1002 unsigned int group_size, last_group_start;
1003
1004 for (i = 0; i < rs->md.raid_disks; i++)
1005 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
1006 !rs->dev[i].rdev.sb_page)
1007 rebuild_cnt++;
1008
1009 switch (rs->md.level) {
1010 case 0:
1011 break;
1012 case 1:
1013 if (rebuild_cnt >= rs->md.raid_disks)
1014 goto too_many;
1015 break;
1016 case 4:
1017 case 5:
1018 case 6:
1019 if (rebuild_cnt > rs->raid_type->parity_devs)
1020 goto too_many;
1021 break;
1022 case 10:
1023 copies = raid10_md_layout_to_copies(rs->md.new_layout);
1024 if (copies < 2) {
1025 DMERR("Bogus raid10 data copies < 2!");
1026 return -EINVAL;
1027 }
1028
1029 if (rebuild_cnt < copies)
1030 break;
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 if (__is_raid10_near(rs->md.new_layout)) {
1047 for (i = 0; i < rs->md.raid_disks; i++) {
1048 if (!(i % copies))
1049 rebuilds_per_group = 0;
1050 if ((!rs->dev[i].rdev.sb_page ||
1051 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1052 (++rebuilds_per_group >= copies))
1053 goto too_many;
1054 }
1055 break;
1056 }
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 group_size = (rs->md.raid_disks / copies);
1071 last_group_start = (rs->md.raid_disks / group_size) - 1;
1072 last_group_start *= group_size;
1073 for (i = 0; i < rs->md.raid_disks; i++) {
1074 if (!(i % copies) && !(i > last_group_start))
1075 rebuilds_per_group = 0;
1076 if ((!rs->dev[i].rdev.sb_page ||
1077 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1078 (++rebuilds_per_group >= copies))
1079 goto too_many;
1080 }
1081 break;
1082 default:
1083 if (rebuild_cnt)
1084 return -EINVAL;
1085 }
1086
1087 return 0;
1088
1089too_many:
1090 return -EINVAL;
1091}
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1119 unsigned int num_raid_params)
1120{
1121 int value, raid10_format = ALGORITHM_RAID10_DEFAULT;
1122 unsigned int raid10_copies = 2;
1123 unsigned int i, write_mostly = 0;
1124 unsigned int region_size = 0;
1125 sector_t max_io_len;
1126 const char *arg, *key;
1127 struct raid_dev *rd;
1128 struct raid_type *rt = rs->raid_type;
1129
1130 arg = dm_shift_arg(as);
1131 num_raid_params--;
1132
1133 if (kstrtoint(arg, 10, &value) < 0) {
1134 rs->ti->error = "Bad numerical argument given for chunk_size";
1135 return -EINVAL;
1136 }
1137
1138
1139
1140
1141
1142 if (rt_is_raid1(rt)) {
1143 if (value)
1144 DMERR("Ignoring chunk size parameter for RAID 1");
1145 value = 0;
1146 } else if (!is_power_of_2(value)) {
1147 rs->ti->error = "Chunk size must be a power of 2";
1148 return -EINVAL;
1149 } else if (value < 8) {
1150 rs->ti->error = "Chunk size value is too small";
1151 return -EINVAL;
1152 }
1153
1154 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 for (i = 0; i < rs->raid_disks; i++) {
1174 set_bit(In_sync, &rs->dev[i].rdev.flags);
1175 rs->dev[i].rdev.recovery_offset = MaxSector;
1176 }
1177
1178
1179
1180
1181 for (i = 0; i < num_raid_params; i++) {
1182 key = dm_shift_arg(as);
1183 if (!key) {
1184 rs->ti->error = "Not enough raid parameters given";
1185 return -EINVAL;
1186 }
1187
1188 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) {
1189 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1190 rs->ti->error = "Only one 'nosync' argument allowed";
1191 return -EINVAL;
1192 }
1193 continue;
1194 }
1195 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
1196 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) {
1197 rs->ti->error = "Only one 'sync' argument allowed";
1198 return -EINVAL;
1199 }
1200 continue;
1201 }
1202 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
1203 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1204 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed";
1205 return -EINVAL;
1206 }
1207 continue;
1208 }
1209
1210 arg = dm_shift_arg(as);
1211 i++;
1212 if (!arg) {
1213 rs->ti->error = "Wrong number of raid parameters given";
1214 return -EINVAL;
1215 }
1216
1217
1218
1219
1220
1221 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) {
1222 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
1223 rs->ti->error = "Only one 'raid10_format' argument pair allowed";
1224 return -EINVAL;
1225 }
1226 if (!rt_is_raid10(rt)) {
1227 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
1228 return -EINVAL;
1229 }
1230 raid10_format = raid10_name_to_format(arg);
1231 if (raid10_format < 0) {
1232 rs->ti->error = "Invalid 'raid10_format' value given";
1233 return raid10_format;
1234 }
1235 continue;
1236 }
1237
1238
1239 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV))) {
1240 int r;
1241 struct md_rdev *jdev;
1242
1243 if (test_and_set_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1244 rs->ti->error = "Only one raid4/5/6 set journaling device allowed";
1245 return -EINVAL;
1246 }
1247 if (!rt_is_raid456(rt)) {
1248 rs->ti->error = "'journal_dev' is an invalid parameter for this RAID type";
1249 return -EINVAL;
1250 }
1251 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
1252 &rs->journal_dev.dev);
1253 if (r) {
1254 rs->ti->error = "raid4/5/6 journal device lookup failure";
1255 return r;
1256 }
1257 jdev = &rs->journal_dev.rdev;
1258 md_rdev_init(jdev);
1259 jdev->mddev = &rs->md;
1260 jdev->bdev = rs->journal_dev.dev->bdev;
1261 jdev->sectors = to_sector(i_size_read(jdev->bdev->bd_inode));
1262 if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) {
1263 rs->ti->error = "No space for raid4/5/6 journal";
1264 return -ENOSPC;
1265 }
1266 rs->journal_dev.mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
1267 set_bit(Journal, &jdev->flags);
1268 continue;
1269 }
1270
1271
1272 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE))) {
1273 int r;
1274
1275 if (!test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1276 rs->ti->error = "raid4/5/6 'journal_mode' is invalid without 'journal_dev'";
1277 return -EINVAL;
1278 }
1279 if (test_and_set_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
1280 rs->ti->error = "Only one raid4/5/6 'journal_mode' argument allowed";
1281 return -EINVAL;
1282 }
1283 r = dm_raid_journal_mode_to_md(arg);
1284 if (r < 0) {
1285 rs->ti->error = "Invalid 'journal_mode' argument";
1286 return r;
1287 }
1288 rs->journal_dev.mode = r;
1289 continue;
1290 }
1291
1292
1293
1294
1295 if (kstrtoint(arg, 10, &value) < 0) {
1296 rs->ti->error = "Bad numerical argument given in raid params";
1297 return -EINVAL;
1298 }
1299
1300 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) {
1301
1302
1303
1304
1305
1306 if (!__within_range(value, 0, rs->raid_disks - 1)) {
1307 rs->ti->error = "Invalid rebuild index given";
1308 return -EINVAL;
1309 }
1310
1311 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) {
1312 rs->ti->error = "rebuild for this index already given";
1313 return -EINVAL;
1314 }
1315
1316 rd = rs->dev + value;
1317 clear_bit(In_sync, &rd->rdev.flags);
1318 clear_bit(Faulty, &rd->rdev.flags);
1319 rd->rdev.recovery_offset = 0;
1320 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags);
1321 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
1322 if (!rt_is_raid1(rt)) {
1323 rs->ti->error = "write_mostly option is only valid for RAID1";
1324 return -EINVAL;
1325 }
1326
1327 if (!__within_range(value, 0, rs->md.raid_disks - 1)) {
1328 rs->ti->error = "Invalid write_mostly index given";
1329 return -EINVAL;
1330 }
1331
1332 write_mostly++;
1333 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
1334 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
1335 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
1336 if (!rt_is_raid1(rt)) {
1337 rs->ti->error = "max_write_behind option is only valid for RAID1";
1338 return -EINVAL;
1339 }
1340
1341 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) {
1342 rs->ti->error = "Only one max_write_behind argument pair allowed";
1343 return -EINVAL;
1344 }
1345
1346
1347
1348
1349
1350 if (value < 0 || value / 2 > COUNTER_MAX) {
1351 rs->ti->error = "Max write-behind limit out of range";
1352 return -EINVAL;
1353 }
1354
1355 rs->md.bitmap_info.max_write_behind = value / 2;
1356 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
1357 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) {
1358 rs->ti->error = "Only one daemon_sleep argument pair allowed";
1359 return -EINVAL;
1360 }
1361 if (value < 0) {
1362 rs->ti->error = "daemon sleep period out of range";
1363 return -EINVAL;
1364 }
1365 rs->md.bitmap_info.daemon_sleep = value;
1366 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
1367
1368 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
1369 rs->ti->error = "Only one data_offset argument pair allowed";
1370 return -EINVAL;
1371 }
1372
1373 if (value < 0 ||
1374 (value && (value < MIN_FREE_RESHAPE_SPACE || value % to_sector(PAGE_SIZE)))) {
1375 rs->ti->error = "Bogus data_offset value";
1376 return -EINVAL;
1377 }
1378 rs->data_offset = value;
1379 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) {
1380
1381 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
1382 rs->ti->error = "Only one delta_disks argument pair allowed";
1383 return -EINVAL;
1384 }
1385
1386 if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) {
1387 rs->ti->error = "Too many delta_disk requested";
1388 return -EINVAL;
1389 }
1390
1391 rs->delta_disks = value;
1392 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) {
1393 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) {
1394 rs->ti->error = "Only one stripe_cache argument pair allowed";
1395 return -EINVAL;
1396 }
1397
1398 if (!rt_is_raid456(rt)) {
1399 rs->ti->error = "Inappropriate argument: stripe_cache";
1400 return -EINVAL;
1401 }
1402
1403 if (value < 0) {
1404 rs->ti->error = "Bogus stripe cache entries value";
1405 return -EINVAL;
1406 }
1407 rs->stripe_cache_entries = value;
1408 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
1409 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1410 rs->ti->error = "Only one min_recovery_rate argument pair allowed";
1411 return -EINVAL;
1412 }
1413
1414 if (value < 0) {
1415 rs->ti->error = "min_recovery_rate out of range";
1416 return -EINVAL;
1417 }
1418 rs->md.sync_speed_min = value;
1419 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
1420 if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) {
1421 rs->ti->error = "Only one max_recovery_rate argument pair allowed";
1422 return -EINVAL;
1423 }
1424
1425 if (value < 0) {
1426 rs->ti->error = "max_recovery_rate out of range";
1427 return -EINVAL;
1428 }
1429 rs->md.sync_speed_max = value;
1430 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) {
1431 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) {
1432 rs->ti->error = "Only one region_size argument pair allowed";
1433 return -EINVAL;
1434 }
1435
1436 region_size = value;
1437 rs->requested_bitmap_chunk_sectors = value;
1438 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) {
1439 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) {
1440 rs->ti->error = "Only one raid10_copies argument pair allowed";
1441 return -EINVAL;
1442 }
1443
1444 if (!__within_range(value, 2, rs->md.raid_disks)) {
1445 rs->ti->error = "Bad value for 'raid10_copies'";
1446 return -EINVAL;
1447 }
1448
1449 raid10_copies = value;
1450 } else {
1451 DMERR("Unable to parse RAID parameter: %s", key);
1452 rs->ti->error = "Unable to parse RAID parameter";
1453 return -EINVAL;
1454 }
1455 }
1456
1457 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) &&
1458 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1459 rs->ti->error = "sync and nosync are mutually exclusive";
1460 return -EINVAL;
1461 }
1462
1463 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) &&
1464 (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ||
1465 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) {
1466 rs->ti->error = "sync/nosync and rebuild are mutually exclusive";
1467 return -EINVAL;
1468 }
1469
1470 if (write_mostly >= rs->md.raid_disks) {
1471 rs->ti->error = "Can't set all raid1 devices to write_mostly";
1472 return -EINVAL;
1473 }
1474
1475 if (rs->md.sync_speed_max &&
1476 rs->md.sync_speed_min > rs->md.sync_speed_max) {
1477 rs->ti->error = "Bogus recovery rates";
1478 return -EINVAL;
1479 }
1480
1481 if (validate_region_size(rs, region_size))
1482 return -EINVAL;
1483
1484 if (rs->md.chunk_sectors)
1485 max_io_len = rs->md.chunk_sectors;
1486 else
1487 max_io_len = region_size;
1488
1489 if (dm_set_target_max_io_len(rs->ti, max_io_len))
1490 return -EINVAL;
1491
1492 if (rt_is_raid10(rt)) {
1493 if (raid10_copies > rs->md.raid_disks) {
1494 rs->ti->error = "Not enough devices to satisfy specification";
1495 return -EINVAL;
1496 }
1497
1498 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
1499 if (rs->md.new_layout < 0) {
1500 rs->ti->error = "Error getting raid10 format";
1501 return rs->md.new_layout;
1502 }
1503
1504 rt = get_raid_type_by_ll(10, rs->md.new_layout);
1505 if (!rt) {
1506 rs->ti->error = "Failed to recognize new raid10 layout";
1507 return -EINVAL;
1508 }
1509
1510 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
1511 rt->algorithm == ALGORITHM_RAID10_NEAR) &&
1512 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1513 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
1514 return -EINVAL;
1515 }
1516 }
1517
1518 rs->raid10_copies = raid10_copies;
1519
1520
1521 rs->md.persistent = 0;
1522 rs->md.external = 1;
1523
1524
1525 return rs_check_for_valid_flags(rs);
1526}
1527
1528
1529static int rs_set_raid456_stripe_cache(struct raid_set *rs)
1530{
1531 int r;
1532 struct r5conf *conf;
1533 struct mddev *mddev = &rs->md;
1534 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2;
1535 uint32_t nr_stripes = rs->stripe_cache_entries;
1536
1537 if (!rt_is_raid456(rs->raid_type)) {
1538 rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size";
1539 return -EINVAL;
1540 }
1541
1542 if (nr_stripes < min_stripes) {
1543 DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size",
1544 nr_stripes, min_stripes);
1545 nr_stripes = min_stripes;
1546 }
1547
1548 conf = mddev->private;
1549 if (!conf) {
1550 rs->ti->error = "Cannot change stripe_cache size on inactive RAID set";
1551 return -EINVAL;
1552 }
1553
1554
1555 if (conf->min_nr_stripes != nr_stripes) {
1556 r = raid5_set_cache_size(mddev, nr_stripes);
1557 if (r) {
1558 rs->ti->error = "Failed to set raid4/5/6 stripe cache size";
1559 return r;
1560 }
1561
1562 DMINFO("%u stripe cache entries", nr_stripes);
1563 }
1564
1565 return 0;
1566}
1567
1568
1569static unsigned int mddev_data_stripes(struct raid_set *rs)
1570{
1571 return rs->md.raid_disks - rs->raid_type->parity_devs;
1572}
1573
1574
1575static unsigned int rs_data_stripes(struct raid_set *rs)
1576{
1577 return rs->raid_disks - rs->raid_type->parity_devs;
1578}
1579
1580
1581
1582
1583
1584static sector_t __rdev_sectors(struct raid_set *rs)
1585{
1586 int i;
1587
1588 for (i = 0; i < rs->md.raid_disks; i++) {
1589 struct md_rdev *rdev = &rs->dev[i].rdev;
1590
1591 if (!test_bit(Journal, &rdev->flags) &&
1592 rdev->bdev && rdev->sectors)
1593 return rdev->sectors;
1594 }
1595
1596 return 0;
1597}
1598
1599
1600static int _check_data_dev_sectors(struct raid_set *rs)
1601{
1602 sector_t ds = ~0;
1603 struct md_rdev *rdev;
1604
1605 rdev_for_each(rdev, &rs->md)
1606 if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
1607 ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode)));
1608 if (ds < rs->md.dev_sectors) {
1609 rs->ti->error = "Component device(s) too small";
1610 return -EINVAL;
1611 }
1612 }
1613
1614 return 0;
1615}
1616
1617
1618static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
1619{
1620 int delta_disks;
1621 unsigned int data_stripes;
1622 struct mddev *mddev = &rs->md;
1623 struct md_rdev *rdev;
1624 sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len;
1625
1626 if (use_mddev) {
1627 delta_disks = mddev->delta_disks;
1628 data_stripes = mddev_data_stripes(rs);
1629 } else {
1630 delta_disks = rs->delta_disks;
1631 data_stripes = rs_data_stripes(rs);
1632 }
1633
1634
1635 if (rt_is_raid1(rs->raid_type))
1636 ;
1637 else if (rt_is_raid10(rs->raid_type)) {
1638 if (rs->raid10_copies < 2 ||
1639 delta_disks < 0) {
1640 rs->ti->error = "Bogus raid10 data copies or delta disks";
1641 return -EINVAL;
1642 }
1643
1644 dev_sectors *= rs->raid10_copies;
1645 if (sector_div(dev_sectors, data_stripes))
1646 goto bad;
1647
1648 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1649 if (sector_div(array_sectors, rs->raid10_copies))
1650 goto bad;
1651
1652 } else if (sector_div(dev_sectors, data_stripes))
1653 goto bad;
1654
1655 else
1656
1657 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1658
1659 rdev_for_each(rdev, mddev)
1660 if (!test_bit(Journal, &rdev->flags))
1661 rdev->sectors = dev_sectors;
1662
1663 mddev->array_sectors = array_sectors;
1664 mddev->dev_sectors = dev_sectors;
1665
1666 return _check_data_dev_sectors(rs);
1667bad:
1668 rs->ti->error = "Target length not divisible by number of data devices";
1669 return -EINVAL;
1670}
1671
1672
1673static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1674{
1675
1676 if (rs_is_raid0(rs))
1677 rs->md.recovery_cp = MaxSector;
1678
1679
1680
1681
1682
1683 else if (rs_is_raid6(rs))
1684 rs->md.recovery_cp = dev_sectors;
1685
1686
1687
1688
1689 else
1690 rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
1691 ? MaxSector : dev_sectors;
1692}
1693
1694
1695static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1696{
1697 if (!dev_sectors)
1698
1699 __rs_setup_recovery(rs, 0);
1700 else if (dev_sectors == MaxSector)
1701
1702 __rs_setup_recovery(rs, MaxSector);
1703 else if (__rdev_sectors(rs) < dev_sectors)
1704
1705 __rs_setup_recovery(rs, __rdev_sectors(rs));
1706 else
1707 __rs_setup_recovery(rs, MaxSector);
1708}
1709
1710static void do_table_event(struct work_struct *ws)
1711{
1712 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
1713
1714 smp_rmb();
1715 if (!rs_is_reshaping(rs)) {
1716 if (rs_is_raid10(rs))
1717 rs_set_rdev_sectors(rs);
1718 rs_set_capacity(rs);
1719 }
1720 dm_table_event(rs->ti->table);
1721}
1722
1723static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
1724{
1725 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
1726
1727 return mddev_congested(&rs->md, bits);
1728}
1729
1730
1731
1732
1733
1734
1735
1736static int rs_check_takeover(struct raid_set *rs)
1737{
1738 struct mddev *mddev = &rs->md;
1739 unsigned int near_copies;
1740
1741 if (rs->md.degraded) {
1742 rs->ti->error = "Can't takeover degraded raid set";
1743 return -EPERM;
1744 }
1745
1746 if (rs_is_reshaping(rs)) {
1747 rs->ti->error = "Can't takeover reshaping raid set";
1748 return -EPERM;
1749 }
1750
1751 switch (mddev->level) {
1752 case 0:
1753
1754 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1755 mddev->raid_disks == 1)
1756 return 0;
1757
1758
1759 if (mddev->new_level == 10 &&
1760 !(rs->raid_disks % mddev->raid_disks))
1761 return 0;
1762
1763
1764 if (__within_range(mddev->new_level, 4, 6) &&
1765 mddev->new_layout == ALGORITHM_PARITY_N &&
1766 mddev->raid_disks > 1)
1767 return 0;
1768
1769 break;
1770
1771 case 10:
1772
1773 if (__is_raid10_offset(mddev->layout))
1774 break;
1775
1776 near_copies = __raid10_near_copies(mddev->layout);
1777
1778
1779 if (mddev->new_level == 0) {
1780
1781 if (near_copies > 1 &&
1782 !(mddev->raid_disks % near_copies)) {
1783 mddev->raid_disks /= near_copies;
1784 mddev->delta_disks = mddev->raid_disks;
1785 return 0;
1786 }
1787
1788
1789 if (near_copies == 1 &&
1790 __raid10_far_copies(mddev->layout) > 1)
1791 return 0;
1792
1793 break;
1794 }
1795
1796
1797 if (mddev->new_level == 1 &&
1798 max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks)
1799 return 0;
1800
1801
1802 if (__within_range(mddev->new_level, 4, 5) &&
1803 mddev->raid_disks == 2)
1804 return 0;
1805 break;
1806
1807 case 1:
1808
1809 if (__within_range(mddev->new_level, 4, 5) &&
1810 mddev->raid_disks == 2) {
1811 mddev->degraded = 1;
1812 return 0;
1813 }
1814
1815
1816 if (mddev->new_level == 0 &&
1817 mddev->raid_disks == 1)
1818 return 0;
1819
1820
1821 if (mddev->new_level == 10)
1822 return 0;
1823 break;
1824
1825 case 4:
1826
1827 if (mddev->new_level == 0)
1828 return 0;
1829
1830
1831 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1832 mddev->raid_disks == 2)
1833 return 0;
1834
1835
1836 if (__within_range(mddev->new_level, 5, 6) &&
1837 mddev->layout == ALGORITHM_PARITY_N)
1838 return 0;
1839 break;
1840
1841 case 5:
1842
1843 if (mddev->new_level == 0 &&
1844 mddev->layout == ALGORITHM_PARITY_N)
1845 return 0;
1846
1847
1848 if (mddev->new_level == 4 &&
1849 mddev->layout == ALGORITHM_PARITY_N)
1850 return 0;
1851
1852
1853 if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) &&
1854 mddev->raid_disks == 2)
1855 return 0;
1856
1857
1858 if (mddev->new_level == 6 &&
1859 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1860 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6)))
1861 return 0;
1862 break;
1863
1864 case 6:
1865
1866 if (mddev->new_level == 0 &&
1867 mddev->layout == ALGORITHM_PARITY_N)
1868 return 0;
1869
1870
1871 if (mddev->new_level == 4 &&
1872 mddev->layout == ALGORITHM_PARITY_N)
1873 return 0;
1874
1875
1876 if (mddev->new_level == 5 &&
1877 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1878 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
1879 return 0;
1880
1881 default:
1882 break;
1883 }
1884
1885 rs->ti->error = "takeover not possible";
1886 return -EINVAL;
1887}
1888
1889
1890static bool rs_takeover_requested(struct raid_set *rs)
1891{
1892 return rs->md.new_level != rs->md.level;
1893}
1894
1895
1896static bool rs_reshape_requested(struct raid_set *rs)
1897{
1898 bool change;
1899 struct mddev *mddev = &rs->md;
1900
1901 if (rs_takeover_requested(rs))
1902 return false;
1903
1904 if (rs_is_raid0(rs))
1905 return false;
1906
1907 change = mddev->new_layout != mddev->layout ||
1908 mddev->new_chunk_sectors != mddev->chunk_sectors ||
1909 rs->delta_disks;
1910
1911
1912 if (rs_is_raid1(rs)) {
1913 if (rs->delta_disks)
1914 return !!rs->delta_disks;
1915
1916 return !change &&
1917 mddev->raid_disks != rs->raid_disks;
1918 }
1919
1920 if (rs_is_raid10(rs))
1921 return change &&
1922 !__is_raid10_far(mddev->new_layout) &&
1923 rs->delta_disks >= 0;
1924
1925 return change;
1926}
1927
1928
1929#define FEATURE_FLAG_SUPPORTS_V190 0x1
1930
1931
1932#define SB_FLAG_RESHAPE_ACTIVE 0x1
1933#define SB_FLAG_RESHAPE_BACKWARDS 0x2
1934
1935
1936
1937
1938
1939#define DM_RAID_MAGIC 0x64526D44
1940struct dm_raid_superblock {
1941 __le32 magic;
1942 __le32 compat_features;
1943
1944 __le32 num_devices;
1945 __le32 array_position;
1946
1947 __le64 events;
1948 __le64 failed_devices;
1949
1950
1951
1952
1953
1954
1955 __le64 disk_recovery_offset;
1956
1957
1958
1959
1960
1961 __le64 array_resync_offset;
1962
1963
1964
1965
1966 __le32 level;
1967 __le32 layout;
1968 __le32 stripe_sectors;
1969
1970
1971
1972
1973
1974
1975
1976 __le32 flags;
1977
1978
1979
1980
1981
1982 __le64 reshape_position;
1983
1984
1985
1986
1987 __le32 new_level;
1988 __le32 new_layout;
1989 __le32 new_stripe_sectors;
1990 __le32 delta_disks;
1991
1992 __le64 array_sectors;
1993
1994
1995
1996
1997
1998
1999
2000 __le64 data_offset;
2001 __le64 new_data_offset;
2002
2003 __le64 sectors;
2004
2005
2006
2007
2008
2009 __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1];
2010
2011 __le32 incompat_features;
2012
2013
2014} __packed;
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027static int rs_check_reshape(struct raid_set *rs)
2028{
2029 struct mddev *mddev = &rs->md;
2030
2031 if (!mddev->pers || !mddev->pers->check_reshape)
2032 rs->ti->error = "Reshape not supported";
2033 else if (mddev->degraded)
2034 rs->ti->error = "Can't reshape degraded raid set";
2035 else if (rs_is_recovering(rs))
2036 rs->ti->error = "Convert request on recovering raid set prohibited";
2037 else if (rs_is_reshaping(rs))
2038 rs->ti->error = "raid set already reshaping!";
2039 else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs)))
2040 rs->ti->error = "Reshaping only supported for raid1/4/5/6/10";
2041 else
2042 return 0;
2043
2044 return -EPERM;
2045}
2046
2047static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
2048{
2049 BUG_ON(!rdev->sb_page);
2050
2051 if (rdev->sb_loaded && !force_reload)
2052 return 0;
2053
2054 rdev->sb_loaded = 0;
2055
2056 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) {
2057 DMERR("Failed to read superblock of device at position %d",
2058 rdev->raid_disk);
2059 md_error(rdev->mddev, rdev);
2060 set_bit(Faulty, &rdev->flags);
2061 return -EIO;
2062 }
2063
2064 rdev->sb_loaded = 1;
2065
2066 return 0;
2067}
2068
2069static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2070{
2071 failed_devices[0] = le64_to_cpu(sb->failed_devices);
2072 memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices));
2073
2074 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2075 int i = ARRAY_SIZE(sb->extended_failed_devices);
2076
2077 while (i--)
2078 failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]);
2079 }
2080}
2081
2082static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2083{
2084 int i = ARRAY_SIZE(sb->extended_failed_devices);
2085
2086 sb->failed_devices = cpu_to_le64(failed_devices[0]);
2087 while (i--)
2088 sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]);
2089}
2090
2091
2092
2093
2094
2095
2096static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
2097{
2098 bool update_failed_devices = false;
2099 unsigned int i;
2100 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2101 struct dm_raid_superblock *sb;
2102 struct raid_set *rs = container_of(mddev, struct raid_set, md);
2103
2104
2105 if (!rdev->meta_bdev)
2106 return;
2107
2108 BUG_ON(!rdev->sb_page);
2109
2110 sb = page_address(rdev->sb_page);
2111
2112 sb_retrieve_failed_devices(sb, failed_devices);
2113
2114 for (i = 0; i < rs->raid_disks; i++)
2115 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) {
2116 update_failed_devices = true;
2117 set_bit(i, (void *) failed_devices);
2118 }
2119
2120 if (update_failed_devices)
2121 sb_update_failed_devices(sb, failed_devices);
2122
2123 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
2124 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2125
2126 sb->num_devices = cpu_to_le32(mddev->raid_disks);
2127 sb->array_position = cpu_to_le32(rdev->raid_disk);
2128
2129 sb->events = cpu_to_le64(mddev->events);
2130
2131 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
2132 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
2133
2134 sb->level = cpu_to_le32(mddev->level);
2135 sb->layout = cpu_to_le32(mddev->layout);
2136 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
2137
2138
2139
2140
2141
2142
2143 sb->new_level = cpu_to_le32(mddev->new_level);
2144 sb->new_layout = cpu_to_le32(mddev->new_layout);
2145 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
2146
2147 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2148
2149 smp_rmb();
2150 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2151 if (le64_to_cpu(sb->reshape_position) != MaxSector) {
2152
2153 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE);
2154
2155 if (mddev->delta_disks < 0 || mddev->reshape_backwards)
2156 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS);
2157 } else {
2158
2159 sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS));
2160 }
2161
2162 sb->array_sectors = cpu_to_le64(mddev->array_sectors);
2163 sb->data_offset = cpu_to_le64(rdev->data_offset);
2164 sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
2165 sb->sectors = cpu_to_le64(rdev->sectors);
2166 sb->incompat_features = cpu_to_le32(0);
2167
2168
2169 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
2170}
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
2181{
2182 int r;
2183 struct dm_raid_superblock *sb;
2184 struct dm_raid_superblock *refsb;
2185 uint64_t events_sb, events_refsb;
2186
2187 r = read_disk_sb(rdev, rdev->sb_size, false);
2188 if (r)
2189 return r;
2190
2191 sb = page_address(rdev->sb_page);
2192
2193
2194
2195
2196
2197
2198 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
2199 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
2200 super_sync(rdev->mddev, rdev);
2201
2202 set_bit(FirstUse, &rdev->flags);
2203 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2204
2205
2206 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
2207
2208
2209 return refdev ? 0 : 1;
2210 }
2211
2212 if (!refdev)
2213 return 1;
2214
2215 events_sb = le64_to_cpu(sb->events);
2216
2217 refsb = page_address(refdev->sb_page);
2218 events_refsb = le64_to_cpu(refsb->events);
2219
2220 return (events_sb > events_refsb) ? 1 : 0;
2221}
2222
2223static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2224{
2225 int role;
2226 unsigned int d;
2227 struct mddev *mddev = &rs->md;
2228 uint64_t events_sb;
2229 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2230 struct dm_raid_superblock *sb;
2231 uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0;
2232 struct md_rdev *r;
2233 struct dm_raid_superblock *sb2;
2234
2235 sb = page_address(rdev->sb_page);
2236 events_sb = le64_to_cpu(sb->events);
2237
2238
2239
2240
2241 mddev->events = events_sb ? : 1;
2242
2243 mddev->reshape_position = MaxSector;
2244
2245 mddev->raid_disks = le32_to_cpu(sb->num_devices);
2246 mddev->level = le32_to_cpu(sb->level);
2247 mddev->layout = le32_to_cpu(sb->layout);
2248 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
2249
2250
2251
2252
2253
2254 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2255
2256 mddev->new_level = le32_to_cpu(sb->new_level);
2257 mddev->new_layout = le32_to_cpu(sb->new_layout);
2258 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
2259 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
2260 mddev->array_sectors = le64_to_cpu(sb->array_sectors);
2261
2262
2263 if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) {
2264 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
2265 DMERR("Reshape requested but raid set is still reshaping");
2266 return -EINVAL;
2267 }
2268
2269 if (mddev->delta_disks < 0 ||
2270 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS)))
2271 mddev->reshape_backwards = 1;
2272 else
2273 mddev->reshape_backwards = 0;
2274
2275 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
2276 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout);
2277 }
2278
2279 } else {
2280
2281
2282
2283 struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
2284 struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
2285
2286 if (rs_takeover_requested(rs)) {
2287 if (rt_cur && rt_new)
2288 DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)",
2289 rt_cur->name, rt_new->name);
2290 else
2291 DMERR("Takeover raid sets not yet supported by metadata. (raid level change)");
2292 return -EINVAL;
2293 } else if (rs_reshape_requested(rs)) {
2294 DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)");
2295 if (mddev->layout != mddev->new_layout) {
2296 if (rt_cur && rt_new)
2297 DMERR(" current layout %s vs new layout %s",
2298 rt_cur->name, rt_new->name);
2299 else
2300 DMERR(" current layout 0x%X vs new layout 0x%X",
2301 le32_to_cpu(sb->layout), mddev->new_layout);
2302 }
2303 if (mddev->chunk_sectors != mddev->new_chunk_sectors)
2304 DMERR(" current stripe sectors %u vs new stripe sectors %u",
2305 mddev->chunk_sectors, mddev->new_chunk_sectors);
2306 if (rs->delta_disks)
2307 DMERR(" current %u disks vs new %u disks",
2308 mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
2309 if (rs_is_raid10(rs)) {
2310 DMERR(" Old layout: %s w/ %u copies",
2311 raid10_md_layout_to_format(mddev->layout),
2312 raid10_md_layout_to_copies(mddev->layout));
2313 DMERR(" New layout: %s w/ %u copies",
2314 raid10_md_layout_to_format(mddev->new_layout),
2315 raid10_md_layout_to_copies(mddev->new_layout));
2316 }
2317 return -EINVAL;
2318 }
2319
2320 DMINFO("Discovered old metadata format; upgrading to extended metadata format");
2321 }
2322
2323 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
2324 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341 d = 0;
2342 rdev_for_each(r, mddev) {
2343 if (test_bit(Journal, &rdev->flags))
2344 continue;
2345
2346 if (test_bit(FirstUse, &r->flags))
2347 new_devs++;
2348
2349 if (!test_bit(In_sync, &r->flags)) {
2350 DMINFO("Device %d specified for rebuild; clearing superblock",
2351 r->raid_disk);
2352 rebuilds++;
2353
2354 if (test_bit(FirstUse, &r->flags))
2355 rebuild_and_new++;
2356 }
2357
2358 d++;
2359 }
2360
2361 if (new_devs == rs->raid_disks || !rebuilds) {
2362
2363 if (new_devs == 1 && !rs->delta_disks)
2364 ;
2365 if (new_devs == rs->raid_disks) {
2366 DMINFO("Superblocks created for new raid set");
2367 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2368 } else if (new_devs != rebuilds &&
2369 new_devs != rs->delta_disks) {
2370 DMERR("New device injected into existing raid set without "
2371 "'delta_disks' or 'rebuild' parameter specified");
2372 return -EINVAL;
2373 }
2374 } else if (new_devs && new_devs != rebuilds) {
2375 DMERR("%u 'rebuild' devices cannot be injected into"
2376 " a raid set with %u other first-time devices",
2377 rebuilds, new_devs);
2378 return -EINVAL;
2379 } else if (rebuilds) {
2380 if (rebuild_and_new && rebuilds != rebuild_and_new) {
2381 DMERR("new device%s provided without 'rebuild'",
2382 new_devs > 1 ? "s" : "");
2383 return -EINVAL;
2384 } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
2385 DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
2386 (unsigned long long) mddev->recovery_cp);
2387 return -EINVAL;
2388 } else if (rs_is_reshaping(rs)) {
2389 DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
2390 (unsigned long long) mddev->reshape_position);
2391 return -EINVAL;
2392 }
2393 }
2394
2395
2396
2397
2398
2399 sb_retrieve_failed_devices(sb, failed_devices);
2400 rdev_for_each(r, mddev) {
2401 if (test_bit(Journal, &rdev->flags) ||
2402 !r->sb_page)
2403 continue;
2404 sb2 = page_address(r->sb_page);
2405 sb2->failed_devices = 0;
2406 memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices));
2407
2408
2409
2410
2411 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
2412 role = le32_to_cpu(sb2->array_position);
2413 if (role < 0)
2414 continue;
2415
2416 if (role != r->raid_disk) {
2417 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
2418 if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
2419 rs->raid_disks % rs->raid10_copies) {
2420 rs->ti->error =
2421 "Cannot change raid10 near set to odd # of devices!";
2422 return -EINVAL;
2423 }
2424
2425 sb2->array_position = cpu_to_le32(r->raid_disk);
2426
2427 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) &&
2428 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) &&
2429 !rt_is_raid1(rs->raid_type)) {
2430 rs->ti->error = "Cannot change device positions in raid set";
2431 return -EINVAL;
2432 }
2433
2434 DMINFO("raid device #%d now at position #%d", role, r->raid_disk);
2435 }
2436
2437
2438
2439
2440
2441 if (test_bit(role, (void *) failed_devices))
2442 set_bit(Faulty, &r->flags);
2443 }
2444 }
2445
2446 return 0;
2447}
2448
2449static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2450{
2451 struct mddev *mddev = &rs->md;
2452 struct dm_raid_superblock *sb;
2453
2454 if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0)
2455 return 0;
2456
2457 sb = page_address(rdev->sb_page);
2458
2459
2460
2461
2462
2463 if (!mddev->events && super_init_validation(rs, rdev))
2464 return -EINVAL;
2465
2466 if (le32_to_cpu(sb->compat_features) &&
2467 le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
2468 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
2469 return -EINVAL;
2470 }
2471
2472 if (sb->incompat_features) {
2473 rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet";
2474 return -EINVAL;
2475 }
2476
2477
2478 mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
2479 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
2480
2481 if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
2482
2483
2484
2485
2486
2487 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
2488 rdev->sectors = le64_to_cpu(sb->sectors);
2489
2490 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
2491 if (rdev->recovery_offset == MaxSector)
2492 set_bit(In_sync, &rdev->flags);
2493
2494
2495
2496
2497 else if (!rs_is_reshaping(rs))
2498 clear_bit(In_sync, &rdev->flags);
2499 }
2500
2501
2502
2503
2504 if (test_and_clear_bit(Faulty, &rdev->flags)) {
2505 rdev->recovery_offset = 0;
2506 clear_bit(In_sync, &rdev->flags);
2507 rdev->saved_raid_disk = rdev->raid_disk;
2508 }
2509
2510
2511 rdev->data_offset = le64_to_cpu(sb->data_offset);
2512 rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
2513
2514 return 0;
2515}
2516
2517
2518
2519
2520static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2521{
2522 int r;
2523 struct md_rdev *rdev, *freshest;
2524 struct mddev *mddev = &rs->md;
2525
2526 freshest = NULL;
2527 rdev_for_each(rdev, mddev) {
2528 if (test_bit(Journal, &rdev->flags))
2529 continue;
2530
2531 if (!rdev->meta_bdev)
2532 continue;
2533
2534
2535 rdev->sb_start = 0;
2536 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
2537 if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
2538 DMERR("superblock size of a logical block is no longer valid");
2539 return -EINVAL;
2540 }
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
2552 continue;
2553
2554 r = super_load(rdev, freshest);
2555
2556 switch (r) {
2557 case 1:
2558 freshest = rdev;
2559 break;
2560 case 0:
2561 break;
2562 default:
2563
2564
2565
2566
2567
2568 if (rs_is_raid0(rs))
2569 continue;
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579 rdev->raid_disk = rdev->saved_raid_disk = -1;
2580 break;
2581 }
2582 }
2583
2584 if (!freshest)
2585 return 0;
2586
2587
2588
2589
2590
2591 rs->ti->error = "Unable to assemble array: Invalid superblocks";
2592 if (super_validate(rs, freshest))
2593 return -EINVAL;
2594
2595 if (validate_raid_redundancy(rs)) {
2596 rs->ti->error = "Insufficient redundancy to activate array";
2597 return -EINVAL;
2598 }
2599
2600 rdev_for_each(rdev, mddev)
2601 if (!test_bit(Journal, &rdev->flags) &&
2602 rdev != freshest &&
2603 super_validate(rs, rdev))
2604 return -EINVAL;
2605 return 0;
2606}
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616static int rs_adjust_data_offsets(struct raid_set *rs)
2617{
2618 sector_t data_offset = 0, new_data_offset = 0;
2619 struct md_rdev *rdev;
2620
2621
2622 if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
2623 if (!rs_is_reshapable(rs))
2624 goto out;
2625
2626 return 0;
2627 }
2628
2629
2630 rdev = &rs->dev[0].rdev;
2631
2632 if (rs->delta_disks < 0) {
2633
2634
2635
2636
2637
2638
2639
2640
2641 data_offset = 0;
2642 new_data_offset = rs->data_offset;
2643
2644 } else if (rs->delta_disks > 0) {
2645
2646
2647
2648
2649
2650
2651
2652
2653 data_offset = rs->data_offset;
2654 new_data_offset = 0;
2655
2656 } else {
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675 data_offset = rs->data_offset ? rdev->data_offset : 0;
2676 new_data_offset = data_offset ? 0 : rs->data_offset;
2677 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2678 }
2679
2680
2681
2682
2683 if (rs->data_offset &&
2684 to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
2685 rs->ti->error = data_offset ? "No space for forward reshape" :
2686 "No space for backward reshape";
2687 return -ENOSPC;
2688 }
2689out:
2690
2691
2692
2693
2694 if (rs->md.recovery_cp < rs->md.dev_sectors)
2695 rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
2696
2697
2698 rdev_for_each(rdev, &rs->md) {
2699 if (!test_bit(Journal, &rdev->flags)) {
2700 rdev->data_offset = data_offset;
2701 rdev->new_data_offset = new_data_offset;
2702 }
2703 }
2704
2705 return 0;
2706}
2707
2708
2709static void __reorder_raid_disk_indexes(struct raid_set *rs)
2710{
2711 int i = 0;
2712 struct md_rdev *rdev;
2713
2714 rdev_for_each(rdev, &rs->md) {
2715 if (!test_bit(Journal, &rdev->flags)) {
2716 rdev->raid_disk = i++;
2717 rdev->saved_raid_disk = rdev->new_raid_disk = -1;
2718 }
2719 }
2720}
2721
2722
2723
2724
2725static int rs_setup_takeover(struct raid_set *rs)
2726{
2727 struct mddev *mddev = &rs->md;
2728 struct md_rdev *rdev;
2729 unsigned int d = mddev->raid_disks = rs->raid_disks;
2730 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
2731
2732 if (rt_is_raid10(rs->raid_type)) {
2733 if (rs_is_raid0(rs)) {
2734
2735 __reorder_raid_disk_indexes(rs);
2736
2737
2738 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
2739 rs->raid10_copies);
2740 } else if (rs_is_raid1(rs))
2741
2742 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2743 rs->raid_disks);
2744 else
2745 return -EINVAL;
2746
2747 }
2748
2749 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2750 mddev->recovery_cp = MaxSector;
2751
2752 while (d--) {
2753 rdev = &rs->dev[d].rdev;
2754
2755 if (test_bit(d, (void *) rs->rebuild_disks)) {
2756 clear_bit(In_sync, &rdev->flags);
2757 clear_bit(Faulty, &rdev->flags);
2758 mddev->recovery_cp = rdev->recovery_offset = 0;
2759
2760 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2761 }
2762
2763 rdev->new_data_offset = new_data_offset;
2764 }
2765
2766 return 0;
2767}
2768
2769
2770static int rs_prepare_reshape(struct raid_set *rs)
2771{
2772 bool reshape;
2773 struct mddev *mddev = &rs->md;
2774
2775 if (rs_is_raid10(rs)) {
2776 if (rs->raid_disks != mddev->raid_disks &&
2777 __is_raid10_near(mddev->layout) &&
2778 rs->raid10_copies &&
2779 rs->raid10_copies != __raid10_near_copies(mddev->layout)) {
2780
2781
2782
2783
2784
2785
2786 if (rs->raid_disks % rs->raid10_copies) {
2787 rs->ti->error = "Can't reshape raid10 mirror groups";
2788 return -EINVAL;
2789 }
2790
2791
2792 __reorder_raid_disk_indexes(rs);
2793 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2794 rs->raid10_copies);
2795 mddev->new_layout = mddev->layout;
2796 reshape = false;
2797 } else
2798 reshape = true;
2799
2800 } else if (rs_is_raid456(rs))
2801 reshape = true;
2802
2803 else if (rs_is_raid1(rs)) {
2804 if (rs->delta_disks) {
2805
2806 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks;
2807 reshape = true;
2808 } else {
2809
2810 mddev->raid_disks = rs->raid_disks;
2811 reshape = false;
2812 }
2813 } else {
2814 rs->ti->error = "Called with bogus raid type";
2815 return -EINVAL;
2816 }
2817
2818 if (reshape) {
2819 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
2820 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2821 } else if (mddev->raid_disks < rs->raid_disks)
2822
2823 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2824
2825 return 0;
2826}
2827
2828
2829static sector_t _get_reshape_sectors(struct raid_set *rs)
2830{
2831 struct md_rdev *rdev;
2832 sector_t reshape_sectors = 0;
2833
2834 rdev_for_each(rdev, &rs->md)
2835 if (!test_bit(Journal, &rdev->flags)) {
2836 reshape_sectors = (rdev->data_offset > rdev->new_data_offset) ?
2837 rdev->data_offset - rdev->new_data_offset :
2838 rdev->new_data_offset - rdev->data_offset;
2839 break;
2840 }
2841
2842 return max(reshape_sectors, (sector_t) rs->data_offset);
2843}
2844
2845
2846
2847
2848
2849
2850
2851
2852static int rs_setup_reshape(struct raid_set *rs)
2853{
2854 int r = 0;
2855 unsigned int cur_raid_devs, d;
2856 sector_t reshape_sectors = _get_reshape_sectors(rs);
2857 struct mddev *mddev = &rs->md;
2858 struct md_rdev *rdev;
2859
2860 mddev->delta_disks = rs->delta_disks;
2861 cur_raid_devs = mddev->raid_disks;
2862
2863
2864 if (mddev->delta_disks &&
2865 mddev->layout != mddev->new_layout) {
2866 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks);
2867 mddev->new_layout = mddev->layout;
2868 }
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893 if (rs->delta_disks > 0) {
2894
2895 for (d = cur_raid_devs; d < rs->raid_disks; d++) {
2896 rdev = &rs->dev[d].rdev;
2897 clear_bit(In_sync, &rdev->flags);
2898
2899
2900
2901
2902
2903 rdev->saved_raid_disk = -1;
2904 rdev->raid_disk = d;
2905
2906 rdev->sectors = mddev->dev_sectors;
2907 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
2908 }
2909
2910 mddev->reshape_backwards = 0;
2911
2912
2913 } else if (rs->delta_disks < 0) {
2914 r = rs_set_dev_and_array_sectors(rs, true);
2915 mddev->reshape_backwards = 1;
2916
2917
2918 } else {
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
2941 }
2942
2943
2944
2945
2946
2947 if (!mddev->reshape_backwards)
2948 rdev_for_each(rdev, &rs->md)
2949 if (!test_bit(Journal, &rdev->flags))
2950 rdev->sectors += reshape_sectors;
2951
2952 return r;
2953}
2954
2955
2956
2957
2958
2959static void configure_discard_support(struct raid_set *rs)
2960{
2961 int i;
2962 bool raid456;
2963 struct dm_target *ti = rs->ti;
2964
2965
2966
2967
2968 raid456 = rs_is_raid456(rs);
2969
2970 for (i = 0; i < rs->raid_disks; i++) {
2971 struct request_queue *q;
2972
2973 if (!rs->dev[i].rdev.bdev)
2974 continue;
2975
2976 q = bdev_get_queue(rs->dev[i].rdev.bdev);
2977 if (!q || !blk_queue_discard(q))
2978 return;
2979
2980 if (raid456) {
2981 if (!devices_handle_discard_safely) {
2982 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
2983 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
2984 return;
2985 }
2986 }
2987 }
2988
2989
2990
2991
2992
2993 ti->split_discard_bios = !!(rs_is_raid1(rs) || rs_is_raid10(rs));
2994 ti->num_discard_bios = 1;
2995}
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3011{
3012 int r;
3013 bool resize = false;
3014 struct raid_type *rt;
3015 unsigned int num_raid_params, num_raid_devs;
3016 sector_t calculated_dev_sectors, rdev_sectors, reshape_sectors;
3017 struct raid_set *rs = NULL;
3018 const char *arg;
3019 struct rs_layout rs_layout;
3020 struct dm_arg_set as = { argc, argv }, as_nrd;
3021 struct dm_arg _args[] = {
3022 { 0, as.argc, "Cannot understand number of raid parameters" },
3023 { 1, 254, "Cannot understand number of raid devices parameters" }
3024 };
3025
3026
3027 arg = dm_shift_arg(&as);
3028 if (!arg) {
3029 ti->error = "No arguments";
3030 return -EINVAL;
3031 }
3032
3033 rt = get_raid_type(arg);
3034 if (!rt) {
3035 ti->error = "Unrecognised raid_type";
3036 return -EINVAL;
3037 }
3038
3039
3040 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
3041 return -EINVAL;
3042
3043
3044 as_nrd = as;
3045 dm_consume_args(&as_nrd, num_raid_params);
3046 _args[1].max = (as_nrd.argc - 1) / 2;
3047 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
3048 return -EINVAL;
3049
3050 if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) {
3051 ti->error = "Invalid number of supplied raid devices";
3052 return -EINVAL;
3053 }
3054
3055 rs = raid_set_alloc(ti, rt, num_raid_devs);
3056 if (IS_ERR(rs))
3057 return PTR_ERR(rs);
3058
3059 r = parse_raid_params(rs, &as, num_raid_params);
3060 if (r)
3061 goto bad;
3062
3063 r = parse_dev_params(rs, &as);
3064 if (r)
3065 goto bad;
3066
3067 rs->md.sync_super = super_sync;
3068
3069
3070
3071
3072
3073
3074
3075 r = rs_set_dev_and_array_sectors(rs, false);
3076 if (r)
3077 goto bad;
3078
3079 calculated_dev_sectors = rs->md.dev_sectors;
3080
3081
3082
3083
3084
3085
3086 rs_config_backup(rs, &rs_layout);
3087
3088 r = analyse_superblocks(ti, rs);
3089 if (r)
3090 goto bad;
3091
3092 rdev_sectors = __rdev_sectors(rs);
3093 if (!rdev_sectors) {
3094 ti->error = "Invalid rdev size";
3095 r = -EINVAL;
3096 goto bad;
3097 }
3098
3099
3100 reshape_sectors = _get_reshape_sectors(rs);
3101 if (calculated_dev_sectors != rdev_sectors)
3102 resize = calculated_dev_sectors != (reshape_sectors ? rdev_sectors - reshape_sectors : rdev_sectors);
3103
3104 INIT_WORK(&rs->md.event_work, do_table_event);
3105 ti->private = rs;
3106 ti->num_flush_bios = 1;
3107
3108
3109 rs_config_restore(rs, &rs_layout);
3110
3111
3112
3113
3114
3115
3116
3117 if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) {
3118
3119 if (rs_is_raid6(rs) &&
3120 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
3121 ti->error = "'nosync' not allowed for new raid6 set";
3122 r = -EINVAL;
3123 goto bad;
3124 }
3125 rs_setup_recovery(rs, 0);
3126 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3127 rs_set_new(rs);
3128 } else if (rs_is_recovering(rs)) {
3129
3130 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3131 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3132 rs_setup_recovery(rs, MaxSector);
3133 }
3134
3135 ;
3136 } else if (rs_is_reshaping(rs)) {
3137
3138 if (resize) {
3139 ti->error = "Can't resize a reshaping raid set";
3140 r = -EPERM;
3141 goto bad;
3142 }
3143
3144 } else if (rs_takeover_requested(rs)) {
3145 if (rs_is_reshaping(rs)) {
3146 ti->error = "Can't takeover a reshaping raid set";
3147 r = -EPERM;
3148 goto bad;
3149 }
3150
3151
3152 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3153 ti->error = "Can't takeover a journaled raid4/5/6 set";
3154 r = -EPERM;
3155 goto bad;
3156 }
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166 r = rs_check_takeover(rs);
3167 if (r)
3168 goto bad;
3169
3170 r = rs_setup_takeover(rs);
3171 if (r)
3172 goto bad;
3173
3174 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3175
3176 rs_setup_recovery(rs, MaxSector);
3177 rs_set_new(rs);
3178 } else if (rs_reshape_requested(rs)) {
3179
3180
3181
3182
3183
3184
3185 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3186 ti->error = "Can't reshape a journaled raid4/5/6 set";
3187 r = -EPERM;
3188 goto bad;
3189 }
3190
3191
3192 if (reshape_sectors || rs_is_raid1(rs)) {
3193
3194
3195
3196
3197
3198
3199
3200 r = rs_prepare_reshape(rs);
3201 if (r)
3202 return r;
3203
3204
3205 rs_setup_recovery(rs, MaxSector);
3206 }
3207 rs_set_cur(rs);
3208 } else {
3209
3210 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3211 rs_setup_recovery(rs, MaxSector);
3212 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3213 } else
3214 rs_setup_recovery(rs, test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ?
3215 0 : (resize ? calculated_dev_sectors : MaxSector));
3216 rs_set_cur(rs);
3217 }
3218
3219
3220 r = rs_adjust_data_offsets(rs);
3221 if (r)
3222 goto bad;
3223
3224
3225 rs->md.ro = 1;
3226 rs->md.in_sync = 1;
3227
3228
3229 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
3230
3231
3232 mddev_lock_nointr(&rs->md);
3233 r = md_run(&rs->md);
3234 rs->md.in_sync = 0;
3235 if (r) {
3236 ti->error = "Failed to run raid array";
3237 mddev_unlock(&rs->md);
3238 goto bad;
3239 }
3240
3241 r = md_start(&rs->md);
3242
3243 if (r) {
3244 ti->error = "Failed to start raid array";
3245 mddev_unlock(&rs->md);
3246 goto bad_md_start;
3247 }
3248
3249 rs->callbacks.congested_fn = raid_is_congested;
3250 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
3251
3252
3253 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
3254 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
3255 if (r) {
3256 ti->error = "Failed to set raid4/5/6 journal mode";
3257 mddev_unlock(&rs->md);
3258 goto bad_journal_mode_set;
3259 }
3260 }
3261
3262 mddev_suspend(&rs->md);
3263 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3264
3265
3266 if (rs_is_raid456(rs)) {
3267 r = rs_set_raid456_stripe_cache(rs);
3268 if (r)
3269 goto bad_stripe_cache;
3270 }
3271
3272
3273 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
3274 r = rs_check_reshape(rs);
3275 if (r)
3276 goto bad_check_reshape;
3277
3278
3279 rs_config_restore(rs, &rs_layout);
3280
3281 if (rs->md.pers->start_reshape) {
3282 r = rs->md.pers->check_reshape(&rs->md);
3283 if (r) {
3284 ti->error = "Reshape check failed";
3285 goto bad_check_reshape;
3286 }
3287 }
3288 }
3289
3290
3291 configure_discard_support(rs);
3292
3293 mddev_unlock(&rs->md);
3294 return 0;
3295
3296bad_md_start:
3297bad_journal_mode_set:
3298bad_stripe_cache:
3299bad_check_reshape:
3300 md_stop(&rs->md);
3301bad:
3302 raid_set_free(rs);
3303
3304 return r;
3305}
3306
3307static void raid_dtr(struct dm_target *ti)
3308{
3309 struct raid_set *rs = ti->private;
3310
3311 list_del_init(&rs->callbacks.list);
3312 md_stop(&rs->md);
3313 raid_set_free(rs);
3314}
3315
3316static int raid_map(struct dm_target *ti, struct bio *bio)
3317{
3318 struct raid_set *rs = ti->private;
3319 struct mddev *mddev = &rs->md;
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329 if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
3330 return DM_MAPIO_REQUEUE;
3331
3332 md_handle_request(mddev, bio);
3333
3334 return DM_MAPIO_SUBMITTED;
3335}
3336
3337
3338enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
3339static const char *sync_str(enum sync_state state)
3340{
3341
3342 static const char *sync_strs[] = {
3343 "frozen",
3344 "reshape",
3345 "resync",
3346 "check",
3347 "repair",
3348 "recover",
3349 "idle"
3350 };
3351
3352 return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
3353};
3354
3355
3356static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
3357{
3358 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
3359 return st_frozen;
3360
3361
3362 if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
3363 (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
3364 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
3365 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
3366 return st_reshape;
3367
3368 if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
3369 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
3370 return st_resync;
3371 if (test_bit(MD_RECOVERY_CHECK, &recovery))
3372 return st_check;
3373 return st_repair;
3374 }
3375
3376 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3377 return st_recover;
3378
3379 if (mddev->reshape_position != MaxSector)
3380 return st_reshape;
3381 }
3382
3383 return st_idle;
3384}
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
3397{
3398 if (!rdev->bdev)
3399 return "-";
3400 else if (test_bit(Faulty, &rdev->flags))
3401 return "D";
3402 else if (test_bit(Journal, &rdev->flags))
3403 return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a";
3404 else if (test_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags) ||
3405 (!test_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags) &&
3406 !test_bit(In_sync, &rdev->flags)))
3407 return "a";
3408 else
3409 return "A";
3410}
3411
3412
3413static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3414 sector_t resync_max_sectors)
3415{
3416 sector_t r;
3417 enum sync_state state;
3418 struct mddev *mddev = &rs->md;
3419
3420 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3421 clear_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3422
3423 if (rs_is_raid0(rs)) {
3424 r = resync_max_sectors;
3425 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3426
3427 } else {
3428 state = decipher_sync_action(mddev, recovery);
3429
3430 if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
3431 r = mddev->recovery_cp;
3432 else
3433 r = mddev->curr_resync_completed;
3434
3435 if (state == st_idle && r >= resync_max_sectors) {
3436
3437
3438
3439
3440 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3441 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3442
3443 } else if (state == st_recover)
3444
3445
3446
3447
3448 ;
3449 else if (state == st_resync)
3450
3451
3452
3453
3454
3455 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3456 else if (state == st_reshape)
3457
3458
3459
3460
3461
3462 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3463
3464 else if (state == st_check || state == st_repair)
3465
3466
3467
3468
3469
3470 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3471
3472 else {
3473 struct md_rdev *rdev;
3474
3475
3476
3477
3478
3479 if (test_bit(MD_RECOVERY_NEEDED, &recovery))
3480 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3481
3482
3483
3484
3485
3486
3487
3488 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3489 rdev_for_each(rdev, mddev)
3490 if (!test_bit(Journal, &rdev->flags) &&
3491 !test_bit(In_sync, &rdev->flags)) {
3492 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3493 break;
3494 }
3495 }
3496 }
3497
3498 return min(r, resync_max_sectors);
3499}
3500
3501
3502static const char *__get_dev_name(struct dm_dev *dev)
3503{
3504 return dev ? dev->name : "-";
3505}
3506
3507static void raid_status(struct dm_target *ti, status_type_t type,
3508 unsigned int status_flags, char *result, unsigned int maxlen)
3509{
3510 struct raid_set *rs = ti->private;
3511 struct mddev *mddev = &rs->md;
3512 struct r5conf *conf = mddev->private;
3513 int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
3514 unsigned long recovery;
3515 unsigned int raid_param_cnt = 1;
3516 unsigned int sz = 0;
3517 unsigned int rebuild_disks;
3518 unsigned int write_mostly_params = 0;
3519 sector_t progress, resync_max_sectors, resync_mismatches;
3520 const char *sync_action;
3521 struct raid_type *rt;
3522
3523 switch (type) {
3524 case STATUSTYPE_INFO:
3525
3526 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
3527 if (!rt)
3528 return;
3529
3530 DMEMIT("%s %d ", rt->name, mddev->raid_disks);
3531
3532
3533 smp_rmb();
3534 recovery = rs->md.recovery;
3535
3536 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
3537 mddev->resync_max_sectors : mddev->dev_sectors;
3538 progress = rs_get_progress(rs, recovery, resync_max_sectors);
3539 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
3540 atomic64_read(&mddev->resync_mismatches) : 0;
3541 sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
3542
3543
3544 for (i = 0; i < rs->raid_disks; i++)
3545 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559 DMEMIT(" %llu/%llu", (unsigned long long) progress,
3560 (unsigned long long) resync_max_sectors);
3561
3562
3563
3564
3565
3566
3567
3568
3569 DMEMIT(" %s", sync_action);
3570
3571
3572
3573
3574
3575
3576
3577
3578 DMEMIT(" %llu", (unsigned long long) resync_mismatches);
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
3591
3592
3593
3594
3595 DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ?
3596 __raid_dev_status(rs, &rs->journal_dev.rdev) : "-");
3597 break;
3598
3599 case STATUSTYPE_TABLE:
3600
3601
3602
3603 for (i = 0; i < rs->raid_disks; i++)
3604 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3605 write_mostly_params += 2;
3606 rebuild_disks = memweight(rs->rebuild_disks, DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks));
3607 raid_param_cnt += rebuild_disks * 2 +
3608 write_mostly_params +
3609 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
3610 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2 +
3611 (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ? 2 : 0) +
3612 (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags) ? 2 : 0);
3613
3614
3615
3616 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
3617 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
3618 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
3619 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
3620 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
3621 if (rebuild_disks)
3622 for (i = 0; i < rs->raid_disks; i++)
3623 if (test_bit(rs->dev[i].rdev.raid_disk, (void *) rs->rebuild_disks))
3624 DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD),
3625 rs->dev[i].rdev.raid_disk);
3626 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
3627 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
3628 mddev->bitmap_info.daemon_sleep);
3629 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
3630 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE),
3631 mddev->sync_speed_min);
3632 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
3633 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
3634 mddev->sync_speed_max);
3635 if (write_mostly_params)
3636 for (i = 0; i < rs->raid_disks; i++)
3637 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3638 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
3639 rs->dev[i].rdev.raid_disk);
3640 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
3641 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND),
3642 mddev->bitmap_info.max_write_behind);
3643 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
3644 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE),
3645 max_nr_stripes);
3646 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
3647 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE),
3648 (unsigned long long) to_sector(mddev->bitmap_info.chunksize));
3649 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
3650 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES),
3651 raid10_md_layout_to_copies(mddev->layout));
3652 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
3653 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT),
3654 raid10_md_layout_to_format(mddev->layout));
3655 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags))
3656 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS),
3657 max(rs->delta_disks, mddev->delta_disks));
3658 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags))
3659 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET),
3660 (unsigned long long) rs->data_offset);
3661 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags))
3662 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV),
3663 __get_dev_name(rs->journal_dev.dev));
3664 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags))
3665 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE),
3666 md_journal_mode_to_dm_raid(rs->journal_dev.mode));
3667 DMEMIT(" %d", rs->raid_disks);
3668 for (i = 0; i < rs->raid_disks; i++)
3669 DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev),
3670 __get_dev_name(rs->dev[i].data_dev));
3671 }
3672}
3673
3674static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
3675 char *result, unsigned maxlen)
3676{
3677 struct raid_set *rs = ti->private;
3678 struct mddev *mddev = &rs->md;
3679
3680 if (!mddev->pers || !mddev->pers->sync_request)
3681 return -EINVAL;
3682
3683 if (!strcasecmp(argv[0], "frozen"))
3684 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3685 else
3686 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3687
3688 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
3689 if (mddev->sync_thread) {
3690 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3691 md_reap_sync_thread(mddev);
3692 }
3693 } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
3694 return -EBUSY;
3695 else if (!strcasecmp(argv[0], "resync"))
3696 ;
3697 else if (!strcasecmp(argv[0], "recover"))
3698 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3699 else {
3700 if (!strcasecmp(argv[0], "check")) {
3701 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3702 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3703 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3704 } else if (!strcasecmp(argv[0], "repair")) {
3705 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3706 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3707 } else
3708 return -EINVAL;
3709 }
3710 if (mddev->ro == 2) {
3711
3712
3713
3714 mddev->ro = 0;
3715 if (!mddev->suspended && mddev->sync_thread)
3716 md_wakeup_thread(mddev->sync_thread);
3717 }
3718 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3719 if (!mddev->suspended && mddev->thread)
3720 md_wakeup_thread(mddev->thread);
3721
3722 return 0;
3723}
3724
3725static int raid_iterate_devices(struct dm_target *ti,
3726 iterate_devices_callout_fn fn, void *data)
3727{
3728 struct raid_set *rs = ti->private;
3729 unsigned int i;
3730 int r = 0;
3731
3732 for (i = 0; !r && i < rs->md.raid_disks; i++)
3733 if (rs->dev[i].data_dev)
3734 r = fn(ti,
3735 rs->dev[i].data_dev,
3736 0,
3737 rs->md.dev_sectors,
3738 data);
3739
3740 return r;
3741}
3742
3743static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
3744{
3745 struct raid_set *rs = ti->private;
3746 unsigned int chunk_size = to_bytes(rs->md.chunk_sectors);
3747
3748 blk_limits_io_min(limits, chunk_size);
3749 blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
3750}
3751
3752static void raid_postsuspend(struct dm_target *ti)
3753{
3754 struct raid_set *rs = ti->private;
3755
3756 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
3757
3758 if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
3759 md_stop_writes(&rs->md);
3760
3761 mddev_lock_nointr(&rs->md);
3762 mddev_suspend(&rs->md);
3763 mddev_unlock(&rs->md);
3764 }
3765}
3766
3767static void attempt_restore_of_faulty_devices(struct raid_set *rs)
3768{
3769 int i;
3770 uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
3771 unsigned long flags;
3772 bool cleared = false;
3773 struct dm_raid_superblock *sb;
3774 struct mddev *mddev = &rs->md;
3775 struct md_rdev *r;
3776
3777
3778 if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
3779 return;
3780
3781 memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
3782
3783 for (i = 0; i < mddev->raid_disks; i++) {
3784 r = &rs->dev[i].rdev;
3785
3786 if (test_bit(Journal, &r->flags))
3787 continue;
3788
3789 if (test_bit(Faulty, &r->flags) &&
3790 r->meta_bdev && !read_disk_sb(r, r->sb_size, true)) {
3791 DMINFO("Faulty %s device #%d has readable super block."
3792 " Attempting to revive it.",
3793 rs->raid_type->name, i);
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804 flags = r->flags;
3805 clear_bit(In_sync, &r->flags);
3806 if (r->raid_disk >= 0) {
3807 if (mddev->pers->hot_remove_disk(mddev, r)) {
3808
3809 r->flags = flags;
3810 continue;
3811 }
3812 } else
3813 r->raid_disk = r->saved_raid_disk = i;
3814
3815 clear_bit(Faulty, &r->flags);
3816 clear_bit(WriteErrorSeen, &r->flags);
3817
3818 if (mddev->pers->hot_add_disk(mddev, r)) {
3819
3820 r->raid_disk = r->saved_raid_disk = -1;
3821 r->flags = flags;
3822 } else {
3823 clear_bit(In_sync, &r->flags);
3824 r->recovery_offset = 0;
3825 set_bit(i, (void *) cleared_failed_devices);
3826 cleared = true;
3827 }
3828 }
3829 }
3830
3831
3832 if (cleared) {
3833 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
3834
3835 rdev_for_each(r, &rs->md) {
3836 if (test_bit(Journal, &r->flags))
3837 continue;
3838
3839 sb = page_address(r->sb_page);
3840 sb_retrieve_failed_devices(sb, failed_devices);
3841
3842 for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
3843 failed_devices[i] &= ~cleared_failed_devices[i];
3844
3845 sb_update_failed_devices(sb, failed_devices);
3846 }
3847 }
3848}
3849
3850static int __load_dirty_region_bitmap(struct raid_set *rs)
3851{
3852 int r = 0;
3853
3854
3855 if (!rs_is_raid0(rs) &&
3856 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
3857 r = md_bitmap_load(&rs->md);
3858 if (r)
3859 DMERR("Failed to load bitmap");
3860 }
3861
3862 return r;
3863}
3864
3865
3866static void rs_update_sbs(struct raid_set *rs)
3867{
3868 struct mddev *mddev = &rs->md;
3869 int ro = mddev->ro;
3870
3871 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3872 mddev->ro = 0;
3873 md_update_sb(mddev, 1);
3874 mddev->ro = ro;
3875}
3876
3877
3878
3879
3880
3881
3882
3883
3884static int rs_start_reshape(struct raid_set *rs)
3885{
3886 int r;
3887 struct mddev *mddev = &rs->md;
3888 struct md_personality *pers = mddev->pers;
3889
3890
3891 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
3892
3893 r = rs_setup_reshape(rs);
3894 if (r)
3895 return r;
3896
3897
3898
3899
3900
3901
3902 r = pers->check_reshape(mddev);
3903 if (r) {
3904 rs->ti->error = "pers->check_reshape() failed";
3905 return r;
3906 }
3907
3908
3909
3910
3911
3912 if (pers->start_reshape) {
3913 r = pers->start_reshape(mddev);
3914 if (r) {
3915 rs->ti->error = "pers->start_reshape() failed";
3916 return r;
3917 }
3918 }
3919
3920
3921
3922
3923
3924
3925 rs_update_sbs(rs);
3926
3927 return 0;
3928}
3929
3930static int raid_preresume(struct dm_target *ti)
3931{
3932 int r;
3933 struct raid_set *rs = ti->private;
3934 struct mddev *mddev = &rs->md;
3935
3936
3937 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
3938 return 0;
3939
3940
3941
3942
3943
3944
3945
3946 if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
3947 rs_update_sbs(rs);
3948
3949
3950 r = __load_dirty_region_bitmap(rs);
3951 if (r)
3952 return r;
3953
3954
3955 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
3956 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
3957 r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors,
3958 to_bytes(rs->requested_bitmap_chunk_sectors), 0);
3959 if (r)
3960 DMERR("Failed to resize bitmap");
3961 }
3962
3963
3964
3965 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3966 if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
3967 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3968 mddev->resync_min = mddev->recovery_cp;
3969 }
3970
3971
3972 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
3973
3974 rs_set_rdev_sectors(rs);
3975 mddev_lock_nointr(mddev);
3976 r = rs_start_reshape(rs);
3977 mddev_unlock(mddev);
3978 if (r)
3979 DMWARN("Failed to check/start reshape, continuing without change");
3980 r = 0;
3981 }
3982
3983 return r;
3984}
3985
3986static void raid_resume(struct dm_target *ti)
3987{
3988 struct raid_set *rs = ti->private;
3989 struct mddev *mddev = &rs->md;
3990
3991 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
3992
3993
3994
3995
3996
3997 attempt_restore_of_faulty_devices(rs);
3998 }
3999
4000 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
4001
4002 if (mddev->delta_disks < 0)
4003 rs_set_capacity(rs);
4004
4005 mddev_lock_nointr(mddev);
4006 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4007 mddev->ro = 0;
4008 mddev->in_sync = 0;
4009 mddev_resume(mddev);
4010 mddev_unlock(mddev);
4011 }
4012}
4013
4014static struct target_type raid_target = {
4015 .name = "raid",
4016 .version = {1, 14, 0},
4017 .module = THIS_MODULE,
4018 .ctr = raid_ctr,
4019 .dtr = raid_dtr,
4020 .map = raid_map,
4021 .status = raid_status,
4022 .message = raid_message,
4023 .iterate_devices = raid_iterate_devices,
4024 .io_hints = raid_io_hints,
4025 .postsuspend = raid_postsuspend,
4026 .preresume = raid_preresume,
4027 .resume = raid_resume,
4028};
4029
4030static int __init dm_raid_init(void)
4031{
4032 DMINFO("Loading target version %u.%u.%u",
4033 raid_target.version[0],
4034 raid_target.version[1],
4035 raid_target.version[2]);
4036 return dm_register_target(&raid_target);
4037}
4038
4039static void __exit dm_raid_exit(void)
4040{
4041 dm_unregister_target(&raid_target);
4042}
4043
4044module_init(dm_raid_init);
4045module_exit(dm_raid_exit);
4046
4047module_param(devices_handle_discard_safely, bool, 0644);
4048MODULE_PARM_DESC(devices_handle_discard_safely,
4049 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
4050
4051MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target");
4052MODULE_ALIAS("dm-raid0");
4053MODULE_ALIAS("dm-raid1");
4054MODULE_ALIAS("dm-raid10");
4055MODULE_ALIAS("dm-raid4");
4056MODULE_ALIAS("dm-raid5");
4057MODULE_ALIAS("dm-raid6");
4058MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
4059MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
4060MODULE_LICENSE("GPL");
4061