1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/blkdev.h>
29#include <linux/module.h>
30#include <linux/seq_file.h>
31#include <linux/ratelimit.h>
32#include <linux/interval_tree_generic.h>
33
34#include <trace/events/block.h>
35
36#include "md.h"
37#include "raid1.h"
38#include "md-bitmap.h"
39
40#define UNSUPPORTED_MDDEV_FLAGS \
41 ((1L << MD_HAS_JOURNAL) | \
42 (1L << MD_JOURNAL_CLEAN) | \
43 (1L << MD_HAS_PPL) | \
44 (1L << MD_HAS_MULTIPLE_PPLS))
45
46static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
48
49#define raid1_log(md, fmt, args...) \
50 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
51
52#include "raid1-10.c"
53
54#define START(node) ((node)->start)
55#define LAST(node) ((node)->last)
56INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
57 START, LAST, static inline, raid1_rb);
58
59static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
60 struct serial_info *si, int idx)
61{
62 unsigned long flags;
63 int ret = 0;
64 sector_t lo = r1_bio->sector;
65 sector_t hi = lo + r1_bio->sectors;
66 struct serial_in_rdev *serial = &rdev->serial[idx];
67
68 spin_lock_irqsave(&serial->serial_lock, flags);
69
70 if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
71 ret = -EBUSY;
72 else {
73 si->start = lo;
74 si->last = hi;
75 raid1_rb_insert(si, &serial->serial_rb);
76 }
77 spin_unlock_irqrestore(&serial->serial_lock, flags);
78
79 return ret;
80}
81
82static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
83{
84 struct mddev *mddev = rdev->mddev;
85 struct serial_info *si;
86 int idx = sector_to_idx(r1_bio->sector);
87 struct serial_in_rdev *serial = &rdev->serial[idx];
88
89 if (WARN_ON(!mddev->serial_info_pool))
90 return;
91 si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
92 wait_event(serial->serial_io_wait,
93 check_and_add_serial(rdev, r1_bio, si, idx) == 0);
94}
95
96static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
97{
98 struct serial_info *si;
99 unsigned long flags;
100 int found = 0;
101 struct mddev *mddev = rdev->mddev;
102 int idx = sector_to_idx(lo);
103 struct serial_in_rdev *serial = &rdev->serial[idx];
104
105 spin_lock_irqsave(&serial->serial_lock, flags);
106 for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
107 si; si = raid1_rb_iter_next(si, lo, hi)) {
108 if (si->start == lo && si->last == hi) {
109 raid1_rb_remove(si, &serial->serial_rb);
110 mempool_free(si, mddev->serial_info_pool);
111 found = 1;
112 break;
113 }
114 }
115 if (!found)
116 WARN(1, "The write IO is not recorded for serialization\n");
117 spin_unlock_irqrestore(&serial->serial_lock, flags);
118 wake_up(&serial->serial_io_wait);
119}
120
121
122
123
124
125static inline struct r1bio *get_resync_r1bio(struct bio *bio)
126{
127 return get_resync_pages(bio)->raid_bio;
128}
129
130static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
131{
132 struct pool_info *pi = data;
133 int size = offsetof(struct r1bio, bios[pi->raid_disks]);
134
135
136 return kzalloc(size, gfp_flags);
137}
138
139#define RESYNC_DEPTH 32
140#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
141#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
142#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
143#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
144#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
145
146static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
147{
148 struct pool_info *pi = data;
149 struct r1bio *r1_bio;
150 struct bio *bio;
151 int need_pages;
152 int j;
153 struct resync_pages *rps;
154
155 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
156 if (!r1_bio)
157 return NULL;
158
159 rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
160 gfp_flags);
161 if (!rps)
162 goto out_free_r1bio;
163
164
165
166
167 for (j = pi->raid_disks ; j-- ; ) {
168 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
169 if (!bio)
170 goto out_free_bio;
171 r1_bio->bios[j] = bio;
172 }
173
174
175
176
177
178
179 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
180 need_pages = pi->raid_disks;
181 else
182 need_pages = 1;
183 for (j = 0; j < pi->raid_disks; j++) {
184 struct resync_pages *rp = &rps[j];
185
186 bio = r1_bio->bios[j];
187
188 if (j < need_pages) {
189 if (resync_alloc_pages(rp, gfp_flags))
190 goto out_free_pages;
191 } else {
192 memcpy(rp, &rps[0], sizeof(*rp));
193 resync_get_all_pages(rp);
194 }
195
196 rp->raid_bio = r1_bio;
197 bio->bi_private = rp;
198 }
199
200 r1_bio->master_bio = NULL;
201
202 return r1_bio;
203
204out_free_pages:
205 while (--j >= 0)
206 resync_free_pages(&rps[j]);
207
208out_free_bio:
209 while (++j < pi->raid_disks)
210 bio_put(r1_bio->bios[j]);
211 kfree(rps);
212
213out_free_r1bio:
214 rbio_pool_free(r1_bio, data);
215 return NULL;
216}
217
218static void r1buf_pool_free(void *__r1_bio, void *data)
219{
220 struct pool_info *pi = data;
221 int i;
222 struct r1bio *r1bio = __r1_bio;
223 struct resync_pages *rp = NULL;
224
225 for (i = pi->raid_disks; i--; ) {
226 rp = get_resync_pages(r1bio->bios[i]);
227 resync_free_pages(rp);
228 bio_put(r1bio->bios[i]);
229 }
230
231
232 kfree(rp);
233
234 rbio_pool_free(r1bio, data);
235}
236
237static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
238{
239 int i;
240
241 for (i = 0; i < conf->raid_disks * 2; i++) {
242 struct bio **bio = r1_bio->bios + i;
243 if (!BIO_SPECIAL(*bio))
244 bio_put(*bio);
245 *bio = NULL;
246 }
247}
248
249static void free_r1bio(struct r1bio *r1_bio)
250{
251 struct r1conf *conf = r1_bio->mddev->private;
252
253 put_all_bios(conf, r1_bio);
254 mempool_free(r1_bio, &conf->r1bio_pool);
255}
256
257static void put_buf(struct r1bio *r1_bio)
258{
259 struct r1conf *conf = r1_bio->mddev->private;
260 sector_t sect = r1_bio->sector;
261 int i;
262
263 for (i = 0; i < conf->raid_disks * 2; i++) {
264 struct bio *bio = r1_bio->bios[i];
265 if (bio->bi_end_io)
266 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
267 }
268
269 mempool_free(r1_bio, &conf->r1buf_pool);
270
271 lower_barrier(conf, sect);
272}
273
274static void reschedule_retry(struct r1bio *r1_bio)
275{
276 unsigned long flags;
277 struct mddev *mddev = r1_bio->mddev;
278 struct r1conf *conf = mddev->private;
279 int idx;
280
281 idx = sector_to_idx(r1_bio->sector);
282 spin_lock_irqsave(&conf->device_lock, flags);
283 list_add(&r1_bio->retry_list, &conf->retry_list);
284 atomic_inc(&conf->nr_queued[idx]);
285 spin_unlock_irqrestore(&conf->device_lock, flags);
286
287 wake_up(&conf->wait_barrier);
288 md_wakeup_thread(mddev->thread);
289}
290
291
292
293
294
295
296static void call_bio_endio(struct r1bio *r1_bio)
297{
298 struct bio *bio = r1_bio->master_bio;
299
300 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
301 bio->bi_status = BLK_STS_IOERR;
302
303 bio_endio(bio);
304}
305
306static void raid_end_bio_io(struct r1bio *r1_bio)
307{
308 struct bio *bio = r1_bio->master_bio;
309 struct r1conf *conf = r1_bio->mddev->private;
310
311
312 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
313 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
314 (bio_data_dir(bio) == WRITE) ? "write" : "read",
315 (unsigned long long) bio->bi_iter.bi_sector,
316 (unsigned long long) bio_end_sector(bio) - 1);
317
318 call_bio_endio(r1_bio);
319 }
320
321
322
323
324 allow_barrier(conf, r1_bio->sector);
325
326 free_r1bio(r1_bio);
327}
328
329
330
331
332static inline void update_head_pos(int disk, struct r1bio *r1_bio)
333{
334 struct r1conf *conf = r1_bio->mddev->private;
335
336 conf->mirrors[disk].head_position =
337 r1_bio->sector + (r1_bio->sectors);
338}
339
340
341
342
343static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
344{
345 int mirror;
346 struct r1conf *conf = r1_bio->mddev->private;
347 int raid_disks = conf->raid_disks;
348
349 for (mirror = 0; mirror < raid_disks * 2; mirror++)
350 if (r1_bio->bios[mirror] == bio)
351 break;
352
353 BUG_ON(mirror == raid_disks * 2);
354 update_head_pos(mirror, r1_bio);
355
356 return mirror;
357}
358
359static void raid1_end_read_request(struct bio *bio)
360{
361 int uptodate = !bio->bi_status;
362 struct r1bio *r1_bio = bio->bi_private;
363 struct r1conf *conf = r1_bio->mddev->private;
364 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
365
366
367
368
369 update_head_pos(r1_bio->read_disk, r1_bio);
370
371 if (uptodate)
372 set_bit(R1BIO_Uptodate, &r1_bio->state);
373 else if (test_bit(FailFast, &rdev->flags) &&
374 test_bit(R1BIO_FailFast, &r1_bio->state))
375
376
377 ;
378 else {
379
380
381
382
383 unsigned long flags;
384 spin_lock_irqsave(&conf->device_lock, flags);
385 if (r1_bio->mddev->degraded == conf->raid_disks ||
386 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
387 test_bit(In_sync, &rdev->flags)))
388 uptodate = 1;
389 spin_unlock_irqrestore(&conf->device_lock, flags);
390 }
391
392 if (uptodate) {
393 raid_end_bio_io(r1_bio);
394 rdev_dec_pending(rdev, conf->mddev);
395 } else {
396
397
398
399 char b[BDEVNAME_SIZE];
400 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
401 mdname(conf->mddev),
402 bdevname(rdev->bdev, b),
403 (unsigned long long)r1_bio->sector);
404 set_bit(R1BIO_ReadError, &r1_bio->state);
405 reschedule_retry(r1_bio);
406
407 }
408}
409
410static void close_write(struct r1bio *r1_bio)
411{
412
413 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
414 bio_free_pages(r1_bio->behind_master_bio);
415 bio_put(r1_bio->behind_master_bio);
416 r1_bio->behind_master_bio = NULL;
417 }
418
419 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
420 r1_bio->sectors,
421 !test_bit(R1BIO_Degraded, &r1_bio->state),
422 test_bit(R1BIO_BehindIO, &r1_bio->state));
423 md_write_end(r1_bio->mddev);
424}
425
426static void r1_bio_write_done(struct r1bio *r1_bio)
427{
428 if (!atomic_dec_and_test(&r1_bio->remaining))
429 return;
430
431 if (test_bit(R1BIO_WriteError, &r1_bio->state))
432 reschedule_retry(r1_bio);
433 else {
434 close_write(r1_bio);
435 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
436 reschedule_retry(r1_bio);
437 else
438 raid_end_bio_io(r1_bio);
439 }
440}
441
442static void raid1_end_write_request(struct bio *bio)
443{
444 struct r1bio *r1_bio = bio->bi_private;
445 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
446 struct r1conf *conf = r1_bio->mddev->private;
447 struct bio *to_put = NULL;
448 int mirror = find_bio_disk(r1_bio, bio);
449 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
450 bool discard_error;
451 sector_t lo = r1_bio->sector;
452 sector_t hi = r1_bio->sector + r1_bio->sectors;
453
454 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
455
456
457
458
459 if (bio->bi_status && !discard_error) {
460 set_bit(WriteErrorSeen, &rdev->flags);
461 if (!test_and_set_bit(WantReplacement, &rdev->flags))
462 set_bit(MD_RECOVERY_NEEDED, &
463 conf->mddev->recovery);
464
465 if (test_bit(FailFast, &rdev->flags) &&
466 (bio->bi_opf & MD_FAILFAST) &&
467
468 !test_bit(WriteMostly, &rdev->flags)) {
469 md_error(r1_bio->mddev, rdev);
470 }
471
472
473
474
475
476
477
478 if (!test_bit(Faulty, &rdev->flags))
479 set_bit(R1BIO_WriteError, &r1_bio->state);
480 else {
481
482 r1_bio->bios[mirror] = NULL;
483 to_put = bio;
484 }
485 } else {
486
487
488
489
490
491
492
493
494
495
496 sector_t first_bad;
497 int bad_sectors;
498
499 r1_bio->bios[mirror] = NULL;
500 to_put = bio;
501
502
503
504
505
506
507
508
509 if (test_bit(In_sync, &rdev->flags) &&
510 !test_bit(Faulty, &rdev->flags))
511 set_bit(R1BIO_Uptodate, &r1_bio->state);
512
513
514 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
515 &first_bad, &bad_sectors) && !discard_error) {
516 r1_bio->bios[mirror] = IO_MADE_GOOD;
517 set_bit(R1BIO_MadeGood, &r1_bio->state);
518 }
519 }
520
521 if (behind) {
522 if (test_bit(CollisionCheck, &rdev->flags))
523 remove_serial(rdev, lo, hi);
524 if (test_bit(WriteMostly, &rdev->flags))
525 atomic_dec(&r1_bio->behind_remaining);
526
527
528
529
530
531
532
533
534 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
535 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
536
537 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
538 struct bio *mbio = r1_bio->master_bio;
539 pr_debug("raid1: behind end write sectors"
540 " %llu-%llu\n",
541 (unsigned long long) mbio->bi_iter.bi_sector,
542 (unsigned long long) bio_end_sector(mbio) - 1);
543 call_bio_endio(r1_bio);
544 }
545 }
546 } else if (rdev->mddev->serialize_policy)
547 remove_serial(rdev, lo, hi);
548 if (r1_bio->bios[mirror] == NULL)
549 rdev_dec_pending(rdev, conf->mddev);
550
551
552
553
554
555 r1_bio_write_done(r1_bio);
556
557 if (to_put)
558 bio_put(to_put);
559}
560
561static sector_t align_to_barrier_unit_end(sector_t start_sector,
562 sector_t sectors)
563{
564 sector_t len;
565
566 WARN_ON(sectors == 0);
567
568
569
570
571 len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
572 start_sector;
573
574 if (len > sectors)
575 len = sectors;
576
577 return len;
578}
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
595{
596 const sector_t this_sector = r1_bio->sector;
597 int sectors;
598 int best_good_sectors;
599 int best_disk, best_dist_disk, best_pending_disk;
600 int has_nonrot_disk;
601 int disk;
602 sector_t best_dist;
603 unsigned int min_pending;
604 struct md_rdev *rdev;
605 int choose_first;
606 int choose_next_idle;
607
608 rcu_read_lock();
609
610
611
612
613
614 retry:
615 sectors = r1_bio->sectors;
616 best_disk = -1;
617 best_dist_disk = -1;
618 best_dist = MaxSector;
619 best_pending_disk = -1;
620 min_pending = UINT_MAX;
621 best_good_sectors = 0;
622 has_nonrot_disk = 0;
623 choose_next_idle = 0;
624 clear_bit(R1BIO_FailFast, &r1_bio->state);
625
626 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
627 (mddev_is_clustered(conf->mddev) &&
628 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
629 this_sector + sectors)))
630 choose_first = 1;
631 else
632 choose_first = 0;
633
634 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
635 sector_t dist;
636 sector_t first_bad;
637 int bad_sectors;
638 unsigned int pending;
639 bool nonrot;
640
641 rdev = rcu_dereference(conf->mirrors[disk].rdev);
642 if (r1_bio->bios[disk] == IO_BLOCKED
643 || rdev == NULL
644 || test_bit(Faulty, &rdev->flags))
645 continue;
646 if (!test_bit(In_sync, &rdev->flags) &&
647 rdev->recovery_offset < this_sector + sectors)
648 continue;
649 if (test_bit(WriteMostly, &rdev->flags)) {
650
651
652 if (best_dist_disk < 0) {
653 if (is_badblock(rdev, this_sector, sectors,
654 &first_bad, &bad_sectors)) {
655 if (first_bad <= this_sector)
656
657 continue;
658 best_good_sectors = first_bad - this_sector;
659 } else
660 best_good_sectors = sectors;
661 best_dist_disk = disk;
662 best_pending_disk = disk;
663 }
664 continue;
665 }
666
667
668
669 if (is_badblock(rdev, this_sector, sectors,
670 &first_bad, &bad_sectors)) {
671 if (best_dist < MaxSector)
672
673 continue;
674 if (first_bad <= this_sector) {
675
676
677
678
679 bad_sectors -= (this_sector - first_bad);
680 if (choose_first && sectors > bad_sectors)
681 sectors = bad_sectors;
682 if (best_good_sectors > sectors)
683 best_good_sectors = sectors;
684
685 } else {
686 sector_t good_sectors = first_bad - this_sector;
687 if (good_sectors > best_good_sectors) {
688 best_good_sectors = good_sectors;
689 best_disk = disk;
690 }
691 if (choose_first)
692 break;
693 }
694 continue;
695 } else {
696 if ((sectors > best_good_sectors) && (best_disk >= 0))
697 best_disk = -1;
698 best_good_sectors = sectors;
699 }
700
701 if (best_disk >= 0)
702
703 set_bit(R1BIO_FailFast, &r1_bio->state);
704
705 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
706 has_nonrot_disk |= nonrot;
707 pending = atomic_read(&rdev->nr_pending);
708 dist = abs(this_sector - conf->mirrors[disk].head_position);
709 if (choose_first) {
710 best_disk = disk;
711 break;
712 }
713
714 if (conf->mirrors[disk].next_seq_sect == this_sector
715 || dist == 0) {
716 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
717 struct raid1_info *mirror = &conf->mirrors[disk];
718
719 best_disk = disk;
720
721
722
723
724
725
726
727
728
729
730
731
732
733 if (nonrot && opt_iosize > 0 &&
734 mirror->seq_start != MaxSector &&
735 mirror->next_seq_sect > opt_iosize &&
736 mirror->next_seq_sect - opt_iosize >=
737 mirror->seq_start) {
738 choose_next_idle = 1;
739 continue;
740 }
741 break;
742 }
743
744 if (choose_next_idle)
745 continue;
746
747 if (min_pending > pending) {
748 min_pending = pending;
749 best_pending_disk = disk;
750 }
751
752 if (dist < best_dist) {
753 best_dist = dist;
754 best_dist_disk = disk;
755 }
756 }
757
758
759
760
761
762
763
764 if (best_disk == -1) {
765 if (has_nonrot_disk || min_pending == 0)
766 best_disk = best_pending_disk;
767 else
768 best_disk = best_dist_disk;
769 }
770
771 if (best_disk >= 0) {
772 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
773 if (!rdev)
774 goto retry;
775 atomic_inc(&rdev->nr_pending);
776 sectors = best_good_sectors;
777
778 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
779 conf->mirrors[best_disk].seq_start = this_sector;
780
781 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
782 }
783 rcu_read_unlock();
784 *max_sectors = sectors;
785
786 return best_disk;
787}
788
789static void flush_bio_list(struct r1conf *conf, struct bio *bio)
790{
791
792 md_bitmap_unplug(conf->mddev->bitmap);
793 wake_up(&conf->wait_barrier);
794
795 while (bio) {
796 struct bio *next = bio->bi_next;
797 struct md_rdev *rdev = (void *)bio->bi_disk;
798 bio->bi_next = NULL;
799 bio_set_dev(bio, rdev->bdev);
800 if (test_bit(Faulty, &rdev->flags)) {
801 bio_io_error(bio);
802 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
803 !blk_queue_discard(bio->bi_disk->queue)))
804
805 bio_endio(bio);
806 else
807 submit_bio_noacct(bio);
808 bio = next;
809 cond_resched();
810 }
811}
812
813static void flush_pending_writes(struct r1conf *conf)
814{
815
816
817
818 spin_lock_irq(&conf->device_lock);
819
820 if (conf->pending_bio_list.head) {
821 struct blk_plug plug;
822 struct bio *bio;
823
824 bio = bio_list_get(&conf->pending_bio_list);
825 conf->pending_count = 0;
826 spin_unlock_irq(&conf->device_lock);
827
828
829
830
831
832
833
834
835
836
837 __set_current_state(TASK_RUNNING);
838 blk_start_plug(&plug);
839 flush_bio_list(conf, bio);
840 blk_finish_plug(&plug);
841 } else
842 spin_unlock_irq(&conf->device_lock);
843}
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
870{
871 int idx = sector_to_idx(sector_nr);
872
873 spin_lock_irq(&conf->resync_lock);
874
875
876 wait_event_lock_irq(conf->wait_barrier,
877 !atomic_read(&conf->nr_waiting[idx]),
878 conf->resync_lock);
879
880
881 atomic_inc(&conf->barrier[idx]);
882
883
884
885
886
887
888
889
890 smp_mb__after_atomic();
891
892
893
894
895
896
897
898
899 wait_event_lock_irq(conf->wait_barrier,
900 (!conf->array_frozen &&
901 !atomic_read(&conf->nr_pending[idx]) &&
902 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
903 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
904 conf->resync_lock);
905
906 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
907 atomic_dec(&conf->barrier[idx]);
908 spin_unlock_irq(&conf->resync_lock);
909 wake_up(&conf->wait_barrier);
910 return -EINTR;
911 }
912
913 atomic_inc(&conf->nr_sync_pending);
914 spin_unlock_irq(&conf->resync_lock);
915
916 return 0;
917}
918
919static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
920{
921 int idx = sector_to_idx(sector_nr);
922
923 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
924
925 atomic_dec(&conf->barrier[idx]);
926 atomic_dec(&conf->nr_sync_pending);
927 wake_up(&conf->wait_barrier);
928}
929
930static void _wait_barrier(struct r1conf *conf, int idx)
931{
932
933
934
935
936
937
938
939
940 atomic_inc(&conf->nr_pending[idx]);
941
942
943
944
945
946
947
948
949 smp_mb__after_atomic();
950
951
952
953
954
955
956
957
958
959
960 if (!READ_ONCE(conf->array_frozen) &&
961 !atomic_read(&conf->barrier[idx]))
962 return;
963
964
965
966
967
968
969
970
971 spin_lock_irq(&conf->resync_lock);
972 atomic_inc(&conf->nr_waiting[idx]);
973 atomic_dec(&conf->nr_pending[idx]);
974
975
976
977
978 wake_up(&conf->wait_barrier);
979
980 wait_event_lock_irq(conf->wait_barrier,
981 !conf->array_frozen &&
982 !atomic_read(&conf->barrier[idx]),
983 conf->resync_lock);
984 atomic_inc(&conf->nr_pending[idx]);
985 atomic_dec(&conf->nr_waiting[idx]);
986 spin_unlock_irq(&conf->resync_lock);
987}
988
989static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
990{
991 int idx = sector_to_idx(sector_nr);
992
993
994
995
996
997
998
999
1000 atomic_inc(&conf->nr_pending[idx]);
1001
1002 if (!READ_ONCE(conf->array_frozen))
1003 return;
1004
1005 spin_lock_irq(&conf->resync_lock);
1006 atomic_inc(&conf->nr_waiting[idx]);
1007 atomic_dec(&conf->nr_pending[idx]);
1008
1009
1010
1011
1012 wake_up(&conf->wait_barrier);
1013
1014 wait_event_lock_irq(conf->wait_barrier,
1015 !conf->array_frozen,
1016 conf->resync_lock);
1017 atomic_inc(&conf->nr_pending[idx]);
1018 atomic_dec(&conf->nr_waiting[idx]);
1019 spin_unlock_irq(&conf->resync_lock);
1020}
1021
1022static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
1023{
1024 int idx = sector_to_idx(sector_nr);
1025
1026 _wait_barrier(conf, idx);
1027}
1028
1029static void _allow_barrier(struct r1conf *conf, int idx)
1030{
1031 atomic_dec(&conf->nr_pending[idx]);
1032 wake_up(&conf->wait_barrier);
1033}
1034
1035static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1036{
1037 int idx = sector_to_idx(sector_nr);
1038
1039 _allow_barrier(conf, idx);
1040}
1041
1042
1043static int get_unqueued_pending(struct r1conf *conf)
1044{
1045 int idx, ret;
1046
1047 ret = atomic_read(&conf->nr_sync_pending);
1048 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1049 ret += atomic_read(&conf->nr_pending[idx]) -
1050 atomic_read(&conf->nr_queued[idx]);
1051
1052 return ret;
1053}
1054
1055static void freeze_array(struct r1conf *conf, int extra)
1056{
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 spin_lock_irq(&conf->resync_lock);
1081 conf->array_frozen = 1;
1082 raid1_log(conf->mddev, "wait freeze");
1083 wait_event_lock_irq_cmd(
1084 conf->wait_barrier,
1085 get_unqueued_pending(conf) == extra,
1086 conf->resync_lock,
1087 flush_pending_writes(conf));
1088 spin_unlock_irq(&conf->resync_lock);
1089}
1090static void unfreeze_array(struct r1conf *conf)
1091{
1092
1093 spin_lock_irq(&conf->resync_lock);
1094 conf->array_frozen = 0;
1095 spin_unlock_irq(&conf->resync_lock);
1096 wake_up(&conf->wait_barrier);
1097}
1098
1099static void alloc_behind_master_bio(struct r1bio *r1_bio,
1100 struct bio *bio)
1101{
1102 int size = bio->bi_iter.bi_size;
1103 unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1104 int i = 0;
1105 struct bio *behind_bio = NULL;
1106
1107 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1108 if (!behind_bio)
1109 return;
1110
1111
1112 if (!bio_has_data(bio)) {
1113 behind_bio->bi_iter.bi_size = size;
1114 goto skip_copy;
1115 }
1116
1117 behind_bio->bi_write_hint = bio->bi_write_hint;
1118
1119 while (i < vcnt && size) {
1120 struct page *page;
1121 int len = min_t(int, PAGE_SIZE, size);
1122
1123 page = alloc_page(GFP_NOIO);
1124 if (unlikely(!page))
1125 goto free_pages;
1126
1127 bio_add_page(behind_bio, page, len, 0);
1128
1129 size -= len;
1130 i++;
1131 }
1132
1133 bio_copy_data(behind_bio, bio);
1134skip_copy:
1135 r1_bio->behind_master_bio = behind_bio;
1136 set_bit(R1BIO_BehindIO, &r1_bio->state);
1137
1138 return;
1139
1140free_pages:
1141 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1142 bio->bi_iter.bi_size);
1143 bio_free_pages(behind_bio);
1144 bio_put(behind_bio);
1145}
1146
1147struct raid1_plug_cb {
1148 struct blk_plug_cb cb;
1149 struct bio_list pending;
1150 int pending_cnt;
1151};
1152
1153static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1154{
1155 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1156 cb);
1157 struct mddev *mddev = plug->cb.data;
1158 struct r1conf *conf = mddev->private;
1159 struct bio *bio;
1160
1161 if (from_schedule || current->bio_list) {
1162 spin_lock_irq(&conf->device_lock);
1163 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1164 conf->pending_count += plug->pending_cnt;
1165 spin_unlock_irq(&conf->device_lock);
1166 wake_up(&conf->wait_barrier);
1167 md_wakeup_thread(mddev->thread);
1168 kfree(plug);
1169 return;
1170 }
1171
1172
1173 bio = bio_list_get(&plug->pending);
1174 flush_bio_list(conf, bio);
1175 kfree(plug);
1176}
1177
1178static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1179{
1180 r1_bio->master_bio = bio;
1181 r1_bio->sectors = bio_sectors(bio);
1182 r1_bio->state = 0;
1183 r1_bio->mddev = mddev;
1184 r1_bio->sector = bio->bi_iter.bi_sector;
1185}
1186
1187static inline struct r1bio *
1188alloc_r1bio(struct mddev *mddev, struct bio *bio)
1189{
1190 struct r1conf *conf = mddev->private;
1191 struct r1bio *r1_bio;
1192
1193 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1194
1195 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1196 init_r1bio(r1_bio, mddev, bio);
1197 return r1_bio;
1198}
1199
1200static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1201 int max_read_sectors, struct r1bio *r1_bio)
1202{
1203 struct r1conf *conf = mddev->private;
1204 struct raid1_info *mirror;
1205 struct bio *read_bio;
1206 struct bitmap *bitmap = mddev->bitmap;
1207 const int op = bio_op(bio);
1208 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1209 int max_sectors;
1210 int rdisk;
1211 bool print_msg = !!r1_bio;
1212 char b[BDEVNAME_SIZE];
1213
1214
1215
1216
1217
1218
1219 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1220
1221 if (print_msg) {
1222
1223 struct md_rdev *rdev;
1224 rcu_read_lock();
1225 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1226 if (rdev)
1227 bdevname(rdev->bdev, b);
1228 else
1229 strcpy(b, "???");
1230 rcu_read_unlock();
1231 }
1232
1233
1234
1235
1236
1237 wait_read_barrier(conf, bio->bi_iter.bi_sector);
1238
1239 if (!r1_bio)
1240 r1_bio = alloc_r1bio(mddev, bio);
1241 else
1242 init_r1bio(r1_bio, mddev, bio);
1243 r1_bio->sectors = max_read_sectors;
1244
1245
1246
1247
1248
1249 rdisk = read_balance(conf, r1_bio, &max_sectors);
1250
1251 if (rdisk < 0) {
1252
1253 if (print_msg) {
1254 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1255 mdname(mddev),
1256 b,
1257 (unsigned long long)r1_bio->sector);
1258 }
1259 raid_end_bio_io(r1_bio);
1260 return;
1261 }
1262 mirror = conf->mirrors + rdisk;
1263
1264 if (print_msg)
1265 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
1266 mdname(mddev),
1267 (unsigned long long)r1_bio->sector,
1268 bdevname(mirror->rdev->bdev, b));
1269
1270 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1271 bitmap) {
1272
1273
1274
1275
1276 raid1_log(mddev, "wait behind writes");
1277 wait_event(bitmap->behind_wait,
1278 atomic_read(&bitmap->behind_writes) == 0);
1279 }
1280
1281 if (max_sectors < bio_sectors(bio)) {
1282 struct bio *split = bio_split(bio, max_sectors,
1283 gfp, &conf->bio_split);
1284 bio_chain(split, bio);
1285 submit_bio_noacct(bio);
1286 bio = split;
1287 r1_bio->master_bio = bio;
1288 r1_bio->sectors = max_sectors;
1289 }
1290
1291 r1_bio->read_disk = rdisk;
1292
1293 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
1294
1295 r1_bio->bios[rdisk] = read_bio;
1296
1297 read_bio->bi_iter.bi_sector = r1_bio->sector +
1298 mirror->rdev->data_offset;
1299 bio_set_dev(read_bio, mirror->rdev->bdev);
1300 read_bio->bi_end_io = raid1_end_read_request;
1301 bio_set_op_attrs(read_bio, op, do_sync);
1302 if (test_bit(FailFast, &mirror->rdev->flags) &&
1303 test_bit(R1BIO_FailFast, &r1_bio->state))
1304 read_bio->bi_opf |= MD_FAILFAST;
1305 read_bio->bi_private = r1_bio;
1306
1307 if (mddev->gendisk)
1308 trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
1309 disk_devt(mddev->gendisk), r1_bio->sector);
1310
1311 submit_bio_noacct(read_bio);
1312}
1313
1314static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1315 int max_write_sectors)
1316{
1317 struct r1conf *conf = mddev->private;
1318 struct r1bio *r1_bio;
1319 int i, disks;
1320 struct bitmap *bitmap = mddev->bitmap;
1321 unsigned long flags;
1322 struct md_rdev *blocked_rdev;
1323 struct blk_plug_cb *cb;
1324 struct raid1_plug_cb *plug = NULL;
1325 int first_clone;
1326 int max_sectors;
1327
1328 if (mddev_is_clustered(mddev) &&
1329 md_cluster_ops->area_resyncing(mddev, WRITE,
1330 bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1331
1332 DEFINE_WAIT(w);
1333 for (;;) {
1334 prepare_to_wait(&conf->wait_barrier,
1335 &w, TASK_IDLE);
1336 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1337 bio->bi_iter.bi_sector,
1338 bio_end_sector(bio)))
1339 break;
1340 schedule();
1341 }
1342 finish_wait(&conf->wait_barrier, &w);
1343 }
1344
1345
1346
1347
1348
1349
1350 wait_barrier(conf, bio->bi_iter.bi_sector);
1351
1352 r1_bio = alloc_r1bio(mddev, bio);
1353 r1_bio->sectors = max_write_sectors;
1354
1355 if (conf->pending_count >= max_queued_requests) {
1356 md_wakeup_thread(mddev->thread);
1357 raid1_log(mddev, "wait queued");
1358 wait_event(conf->wait_barrier,
1359 conf->pending_count < max_queued_requests);
1360 }
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 disks = conf->raid_disks * 2;
1373 retry_write:
1374 blocked_rdev = NULL;
1375 rcu_read_lock();
1376 max_sectors = r1_bio->sectors;
1377 for (i = 0; i < disks; i++) {
1378 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1379 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1380 atomic_inc(&rdev->nr_pending);
1381 blocked_rdev = rdev;
1382 break;
1383 }
1384 r1_bio->bios[i] = NULL;
1385 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1386 if (i < conf->raid_disks)
1387 set_bit(R1BIO_Degraded, &r1_bio->state);
1388 continue;
1389 }
1390
1391 atomic_inc(&rdev->nr_pending);
1392 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1393 sector_t first_bad;
1394 int bad_sectors;
1395 int is_bad;
1396
1397 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1398 &first_bad, &bad_sectors);
1399 if (is_bad < 0) {
1400
1401
1402 set_bit(BlockedBadBlocks, &rdev->flags);
1403 blocked_rdev = rdev;
1404 break;
1405 }
1406 if (is_bad && first_bad <= r1_bio->sector) {
1407
1408 bad_sectors -= (r1_bio->sector - first_bad);
1409 if (bad_sectors < max_sectors)
1410
1411
1412
1413 max_sectors = bad_sectors;
1414 rdev_dec_pending(rdev, mddev);
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425 continue;
1426 }
1427 if (is_bad) {
1428 int good_sectors = first_bad - r1_bio->sector;
1429 if (good_sectors < max_sectors)
1430 max_sectors = good_sectors;
1431 }
1432 }
1433 r1_bio->bios[i] = bio;
1434 }
1435 rcu_read_unlock();
1436
1437 if (unlikely(blocked_rdev)) {
1438
1439 int j;
1440
1441 for (j = 0; j < i; j++)
1442 if (r1_bio->bios[j])
1443 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1444 r1_bio->state = 0;
1445 allow_barrier(conf, bio->bi_iter.bi_sector);
1446 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1447 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1448 wait_barrier(conf, bio->bi_iter.bi_sector);
1449 goto retry_write;
1450 }
1451
1452 if (max_sectors < bio_sectors(bio)) {
1453 struct bio *split = bio_split(bio, max_sectors,
1454 GFP_NOIO, &conf->bio_split);
1455 bio_chain(split, bio);
1456 submit_bio_noacct(bio);
1457 bio = split;
1458 r1_bio->master_bio = bio;
1459 r1_bio->sectors = max_sectors;
1460 }
1461
1462 atomic_set(&r1_bio->remaining, 1);
1463 atomic_set(&r1_bio->behind_remaining, 0);
1464
1465 first_clone = 1;
1466
1467 for (i = 0; i < disks; i++) {
1468 struct bio *mbio = NULL;
1469 struct md_rdev *rdev = conf->mirrors[i].rdev;
1470 if (!r1_bio->bios[i])
1471 continue;
1472
1473 if (first_clone) {
1474
1475
1476
1477
1478 if (bitmap &&
1479 (atomic_read(&bitmap->behind_writes)
1480 < mddev->bitmap_info.max_write_behind) &&
1481 !waitqueue_active(&bitmap->behind_wait)) {
1482 alloc_behind_master_bio(r1_bio, bio);
1483 }
1484
1485 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1486 test_bit(R1BIO_BehindIO, &r1_bio->state));
1487 first_clone = 0;
1488 }
1489
1490 if (r1_bio->behind_master_bio)
1491 mbio = bio_clone_fast(r1_bio->behind_master_bio,
1492 GFP_NOIO, &mddev->bio_set);
1493 else
1494 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
1495
1496 if (r1_bio->behind_master_bio) {
1497 if (test_bit(CollisionCheck, &rdev->flags))
1498 wait_for_serialization(rdev, r1_bio);
1499 if (test_bit(WriteMostly, &rdev->flags))
1500 atomic_inc(&r1_bio->behind_remaining);
1501 } else if (mddev->serialize_policy)
1502 wait_for_serialization(rdev, r1_bio);
1503
1504 r1_bio->bios[i] = mbio;
1505
1506 mbio->bi_iter.bi_sector = (r1_bio->sector +
1507 conf->mirrors[i].rdev->data_offset);
1508 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
1509 mbio->bi_end_io = raid1_end_write_request;
1510 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
1511 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1512 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1513 conf->raid_disks - mddev->degraded > 1)
1514 mbio->bi_opf |= MD_FAILFAST;
1515 mbio->bi_private = r1_bio;
1516
1517 atomic_inc(&r1_bio->remaining);
1518
1519 if (mddev->gendisk)
1520 trace_block_bio_remap(mbio->bi_disk->queue,
1521 mbio, disk_devt(mddev->gendisk),
1522 r1_bio->sector);
1523
1524 mbio->bi_disk = (void *)conf->mirrors[i].rdev;
1525
1526 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1527 if (cb)
1528 plug = container_of(cb, struct raid1_plug_cb, cb);
1529 else
1530 plug = NULL;
1531 if (plug) {
1532 bio_list_add(&plug->pending, mbio);
1533 plug->pending_cnt++;
1534 } else {
1535 spin_lock_irqsave(&conf->device_lock, flags);
1536 bio_list_add(&conf->pending_bio_list, mbio);
1537 conf->pending_count++;
1538 spin_unlock_irqrestore(&conf->device_lock, flags);
1539 md_wakeup_thread(mddev->thread);
1540 }
1541 }
1542
1543 r1_bio_write_done(r1_bio);
1544
1545
1546 wake_up(&conf->wait_barrier);
1547}
1548
1549static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1550{
1551 sector_t sectors;
1552
1553 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1554 && md_flush_request(mddev, bio))
1555 return true;
1556
1557
1558
1559
1560
1561
1562
1563
1564 sectors = align_to_barrier_unit_end(
1565 bio->bi_iter.bi_sector, bio_sectors(bio));
1566
1567 if (bio_data_dir(bio) == READ)
1568 raid1_read_request(mddev, bio, sectors, NULL);
1569 else {
1570 if (!md_write_start(mddev,bio))
1571 return false;
1572 raid1_write_request(mddev, bio, sectors);
1573 }
1574 return true;
1575}
1576
1577static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1578{
1579 struct r1conf *conf = mddev->private;
1580 int i;
1581
1582 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1583 conf->raid_disks - mddev->degraded);
1584 rcu_read_lock();
1585 for (i = 0; i < conf->raid_disks; i++) {
1586 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1587 seq_printf(seq, "%s",
1588 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1589 }
1590 rcu_read_unlock();
1591 seq_printf(seq, "]");
1592}
1593
1594static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1595{
1596 char b[BDEVNAME_SIZE];
1597 struct r1conf *conf = mddev->private;
1598 unsigned long flags;
1599
1600
1601
1602
1603
1604
1605
1606 spin_lock_irqsave(&conf->device_lock, flags);
1607 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
1608 && (conf->raid_disks - mddev->degraded) == 1) {
1609
1610
1611
1612
1613
1614
1615 conf->recovery_disabled = mddev->recovery_disabled;
1616 spin_unlock_irqrestore(&conf->device_lock, flags);
1617 return;
1618 }
1619 set_bit(Blocked, &rdev->flags);
1620 if (test_and_clear_bit(In_sync, &rdev->flags))
1621 mddev->degraded++;
1622 set_bit(Faulty, &rdev->flags);
1623 spin_unlock_irqrestore(&conf->device_lock, flags);
1624
1625
1626
1627 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1628 set_mask_bits(&mddev->sb_flags, 0,
1629 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1630 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1631 "md/raid1:%s: Operation continuing on %d devices.\n",
1632 mdname(mddev), bdevname(rdev->bdev, b),
1633 mdname(mddev), conf->raid_disks - mddev->degraded);
1634}
1635
1636static void print_conf(struct r1conf *conf)
1637{
1638 int i;
1639
1640 pr_debug("RAID1 conf printout:\n");
1641 if (!conf) {
1642 pr_debug("(!conf)\n");
1643 return;
1644 }
1645 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1646 conf->raid_disks);
1647
1648 rcu_read_lock();
1649 for (i = 0; i < conf->raid_disks; i++) {
1650 char b[BDEVNAME_SIZE];
1651 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1652 if (rdev)
1653 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1654 i, !test_bit(In_sync, &rdev->flags),
1655 !test_bit(Faulty, &rdev->flags),
1656 bdevname(rdev->bdev,b));
1657 }
1658 rcu_read_unlock();
1659}
1660
1661static void close_sync(struct r1conf *conf)
1662{
1663 int idx;
1664
1665 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1666 _wait_barrier(conf, idx);
1667 _allow_barrier(conf, idx);
1668 }
1669
1670 mempool_exit(&conf->r1buf_pool);
1671}
1672
1673static int raid1_spare_active(struct mddev *mddev)
1674{
1675 int i;
1676 struct r1conf *conf = mddev->private;
1677 int count = 0;
1678 unsigned long flags;
1679
1680
1681
1682
1683
1684
1685
1686
1687 spin_lock_irqsave(&conf->device_lock, flags);
1688 for (i = 0; i < conf->raid_disks; i++) {
1689 struct md_rdev *rdev = conf->mirrors[i].rdev;
1690 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1691 if (repl
1692 && !test_bit(Candidate, &repl->flags)
1693 && repl->recovery_offset == MaxSector
1694 && !test_bit(Faulty, &repl->flags)
1695 && !test_and_set_bit(In_sync, &repl->flags)) {
1696
1697 if (!rdev ||
1698 !test_and_clear_bit(In_sync, &rdev->flags))
1699 count++;
1700 if (rdev) {
1701
1702
1703
1704
1705 set_bit(Faulty, &rdev->flags);
1706 sysfs_notify_dirent_safe(
1707 rdev->sysfs_state);
1708 }
1709 }
1710 if (rdev
1711 && rdev->recovery_offset == MaxSector
1712 && !test_bit(Faulty, &rdev->flags)
1713 && !test_and_set_bit(In_sync, &rdev->flags)) {
1714 count++;
1715 sysfs_notify_dirent_safe(rdev->sysfs_state);
1716 }
1717 }
1718 mddev->degraded -= count;
1719 spin_unlock_irqrestore(&conf->device_lock, flags);
1720
1721 print_conf(conf);
1722 return count;
1723}
1724
1725static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1726{
1727 struct r1conf *conf = mddev->private;
1728 int err = -EEXIST;
1729 int mirror = 0;
1730 struct raid1_info *p;
1731 int first = 0;
1732 int last = conf->raid_disks - 1;
1733
1734 if (mddev->recovery_disabled == conf->recovery_disabled)
1735 return -EBUSY;
1736
1737 if (md_integrity_add_rdev(rdev, mddev))
1738 return -ENXIO;
1739
1740 if (rdev->raid_disk >= 0)
1741 first = last = rdev->raid_disk;
1742
1743
1744
1745
1746
1747 if (rdev->saved_raid_disk >= 0 &&
1748 rdev->saved_raid_disk >= first &&
1749 rdev->saved_raid_disk < conf->raid_disks &&
1750 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1751 first = last = rdev->saved_raid_disk;
1752
1753 for (mirror = first; mirror <= last; mirror++) {
1754 p = conf->mirrors + mirror;
1755 if (!p->rdev) {
1756 if (mddev->gendisk)
1757 disk_stack_limits(mddev->gendisk, rdev->bdev,
1758 rdev->data_offset << 9);
1759
1760 p->head_position = 0;
1761 rdev->raid_disk = mirror;
1762 err = 0;
1763
1764
1765
1766 if (rdev->saved_raid_disk < 0)
1767 conf->fullsync = 1;
1768 rcu_assign_pointer(p->rdev, rdev);
1769 break;
1770 }
1771 if (test_bit(WantReplacement, &p->rdev->flags) &&
1772 p[conf->raid_disks].rdev == NULL) {
1773
1774 clear_bit(In_sync, &rdev->flags);
1775 set_bit(Replacement, &rdev->flags);
1776 rdev->raid_disk = mirror;
1777 err = 0;
1778 conf->fullsync = 1;
1779 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1780 break;
1781 }
1782 }
1783 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1784 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1785 print_conf(conf);
1786 return err;
1787}
1788
1789static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1790{
1791 struct r1conf *conf = mddev->private;
1792 int err = 0;
1793 int number = rdev->raid_disk;
1794 struct raid1_info *p = conf->mirrors + number;
1795
1796 if (rdev != p->rdev)
1797 p = conf->mirrors + conf->raid_disks + number;
1798
1799 print_conf(conf);
1800 if (rdev == p->rdev) {
1801 if (test_bit(In_sync, &rdev->flags) ||
1802 atomic_read(&rdev->nr_pending)) {
1803 err = -EBUSY;
1804 goto abort;
1805 }
1806
1807
1808
1809 if (!test_bit(Faulty, &rdev->flags) &&
1810 mddev->recovery_disabled != conf->recovery_disabled &&
1811 mddev->degraded < conf->raid_disks) {
1812 err = -EBUSY;
1813 goto abort;
1814 }
1815 p->rdev = NULL;
1816 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1817 synchronize_rcu();
1818 if (atomic_read(&rdev->nr_pending)) {
1819
1820 err = -EBUSY;
1821 p->rdev = rdev;
1822 goto abort;
1823 }
1824 }
1825 if (conf->mirrors[conf->raid_disks + number].rdev) {
1826
1827
1828
1829
1830 struct md_rdev *repl =
1831 conf->mirrors[conf->raid_disks + number].rdev;
1832 freeze_array(conf, 0);
1833 if (atomic_read(&repl->nr_pending)) {
1834
1835
1836
1837
1838
1839
1840 err = -EBUSY;
1841 unfreeze_array(conf);
1842 goto abort;
1843 }
1844 clear_bit(Replacement, &repl->flags);
1845 p->rdev = repl;
1846 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1847 unfreeze_array(conf);
1848 }
1849
1850 clear_bit(WantReplacement, &rdev->flags);
1851 err = md_integrity_register(mddev);
1852 }
1853abort:
1854
1855 print_conf(conf);
1856 return err;
1857}
1858
1859static void end_sync_read(struct bio *bio)
1860{
1861 struct r1bio *r1_bio = get_resync_r1bio(bio);
1862
1863 update_head_pos(r1_bio->read_disk, r1_bio);
1864
1865
1866
1867
1868
1869
1870 if (!bio->bi_status)
1871 set_bit(R1BIO_Uptodate, &r1_bio->state);
1872
1873 if (atomic_dec_and_test(&r1_bio->remaining))
1874 reschedule_retry(r1_bio);
1875}
1876
1877static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1878{
1879 sector_t sync_blocks = 0;
1880 sector_t s = r1_bio->sector;
1881 long sectors_to_go = r1_bio->sectors;
1882
1883
1884 do {
1885 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1886 s += sync_blocks;
1887 sectors_to_go -= sync_blocks;
1888 } while (sectors_to_go > 0);
1889}
1890
1891static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
1892{
1893 if (atomic_dec_and_test(&r1_bio->remaining)) {
1894 struct mddev *mddev = r1_bio->mddev;
1895 int s = r1_bio->sectors;
1896
1897 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1898 test_bit(R1BIO_WriteError, &r1_bio->state))
1899 reschedule_retry(r1_bio);
1900 else {
1901 put_buf(r1_bio);
1902 md_done_sync(mddev, s, uptodate);
1903 }
1904 }
1905}
1906
1907static void end_sync_write(struct bio *bio)
1908{
1909 int uptodate = !bio->bi_status;
1910 struct r1bio *r1_bio = get_resync_r1bio(bio);
1911 struct mddev *mddev = r1_bio->mddev;
1912 struct r1conf *conf = mddev->private;
1913 sector_t first_bad;
1914 int bad_sectors;
1915 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1916
1917 if (!uptodate) {
1918 abort_sync_write(mddev, r1_bio);
1919 set_bit(WriteErrorSeen, &rdev->flags);
1920 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1921 set_bit(MD_RECOVERY_NEEDED, &
1922 mddev->recovery);
1923 set_bit(R1BIO_WriteError, &r1_bio->state);
1924 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1925 &first_bad, &bad_sectors) &&
1926 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1927 r1_bio->sector,
1928 r1_bio->sectors,
1929 &first_bad, &bad_sectors)
1930 )
1931 set_bit(R1BIO_MadeGood, &r1_bio->state);
1932
1933 put_sync_write_buf(r1_bio, uptodate);
1934}
1935
1936static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1937 int sectors, struct page *page, int rw)
1938{
1939 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1940
1941 return 1;
1942 if (rw == WRITE) {
1943 set_bit(WriteErrorSeen, &rdev->flags);
1944 if (!test_and_set_bit(WantReplacement,
1945 &rdev->flags))
1946 set_bit(MD_RECOVERY_NEEDED, &
1947 rdev->mddev->recovery);
1948 }
1949
1950 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1951 md_error(rdev->mddev, rdev);
1952 return 0;
1953}
1954
1955static int fix_sync_read_error(struct r1bio *r1_bio)
1956{
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968 struct mddev *mddev = r1_bio->mddev;
1969 struct r1conf *conf = mddev->private;
1970 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1971 struct page **pages = get_resync_pages(bio)->pages;
1972 sector_t sect = r1_bio->sector;
1973 int sectors = r1_bio->sectors;
1974 int idx = 0;
1975 struct md_rdev *rdev;
1976
1977 rdev = conf->mirrors[r1_bio->read_disk].rdev;
1978 if (test_bit(FailFast, &rdev->flags)) {
1979
1980
1981 md_error(mddev, rdev);
1982 if (test_bit(Faulty, &rdev->flags))
1983
1984
1985
1986 bio->bi_end_io = end_sync_write;
1987 }
1988
1989 while(sectors) {
1990 int s = sectors;
1991 int d = r1_bio->read_disk;
1992 int success = 0;
1993 int start;
1994
1995 if (s > (PAGE_SIZE>>9))
1996 s = PAGE_SIZE >> 9;
1997 do {
1998 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1999
2000
2001
2002
2003 rdev = conf->mirrors[d].rdev;
2004 if (sync_page_io(rdev, sect, s<<9,
2005 pages[idx],
2006 REQ_OP_READ, 0, false)) {
2007 success = 1;
2008 break;
2009 }
2010 }
2011 d++;
2012 if (d == conf->raid_disks * 2)
2013 d = 0;
2014 } while (!success && d != r1_bio->read_disk);
2015
2016 if (!success) {
2017 char b[BDEVNAME_SIZE];
2018 int abort = 0;
2019
2020
2021
2022
2023
2024 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
2025 mdname(mddev), bio_devname(bio, b),
2026 (unsigned long long)r1_bio->sector);
2027 for (d = 0; d < conf->raid_disks * 2; d++) {
2028 rdev = conf->mirrors[d].rdev;
2029 if (!rdev || test_bit(Faulty, &rdev->flags))
2030 continue;
2031 if (!rdev_set_badblocks(rdev, sect, s, 0))
2032 abort = 1;
2033 }
2034 if (abort) {
2035 conf->recovery_disabled =
2036 mddev->recovery_disabled;
2037 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2038 md_done_sync(mddev, r1_bio->sectors, 0);
2039 put_buf(r1_bio);
2040 return 0;
2041 }
2042
2043 sectors -= s;
2044 sect += s;
2045 idx++;
2046 continue;
2047 }
2048
2049 start = d;
2050
2051 while (d != r1_bio->read_disk) {
2052 if (d == 0)
2053 d = conf->raid_disks * 2;
2054 d--;
2055 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2056 continue;
2057 rdev = conf->mirrors[d].rdev;
2058 if (r1_sync_page_io(rdev, sect, s,
2059 pages[idx],
2060 WRITE) == 0) {
2061 r1_bio->bios[d]->bi_end_io = NULL;
2062 rdev_dec_pending(rdev, mddev);
2063 }
2064 }
2065 d = start;
2066 while (d != r1_bio->read_disk) {
2067 if (d == 0)
2068 d = conf->raid_disks * 2;
2069 d--;
2070 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2071 continue;
2072 rdev = conf->mirrors[d].rdev;
2073 if (r1_sync_page_io(rdev, sect, s,
2074 pages[idx],
2075 READ) != 0)
2076 atomic_add(s, &rdev->corrected_errors);
2077 }
2078 sectors -= s;
2079 sect += s;
2080 idx ++;
2081 }
2082 set_bit(R1BIO_Uptodate, &r1_bio->state);
2083 bio->bi_status = 0;
2084 return 1;
2085}
2086
2087static void process_checks(struct r1bio *r1_bio)
2088{
2089
2090
2091
2092
2093
2094
2095
2096 struct mddev *mddev = r1_bio->mddev;
2097 struct r1conf *conf = mddev->private;
2098 int primary;
2099 int i;
2100 int vcnt;
2101
2102
2103 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2104 for (i = 0; i < conf->raid_disks * 2; i++) {
2105 blk_status_t status;
2106 struct bio *b = r1_bio->bios[i];
2107 struct resync_pages *rp = get_resync_pages(b);
2108 if (b->bi_end_io != end_sync_read)
2109 continue;
2110
2111 status = b->bi_status;
2112 bio_reset(b);
2113 b->bi_status = status;
2114 b->bi_iter.bi_sector = r1_bio->sector +
2115 conf->mirrors[i].rdev->data_offset;
2116 bio_set_dev(b, conf->mirrors[i].rdev->bdev);
2117 b->bi_end_io = end_sync_read;
2118 rp->raid_bio = r1_bio;
2119 b->bi_private = rp;
2120
2121
2122 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2123 }
2124 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2125 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2126 !r1_bio->bios[primary]->bi_status) {
2127 r1_bio->bios[primary]->bi_end_io = NULL;
2128 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2129 break;
2130 }
2131 r1_bio->read_disk = primary;
2132 for (i = 0; i < conf->raid_disks * 2; i++) {
2133 int j = 0;
2134 struct bio *pbio = r1_bio->bios[primary];
2135 struct bio *sbio = r1_bio->bios[i];
2136 blk_status_t status = sbio->bi_status;
2137 struct page **ppages = get_resync_pages(pbio)->pages;
2138 struct page **spages = get_resync_pages(sbio)->pages;
2139 struct bio_vec *bi;
2140 int page_len[RESYNC_PAGES] = { 0 };
2141 struct bvec_iter_all iter_all;
2142
2143 if (sbio->bi_end_io != end_sync_read)
2144 continue;
2145
2146 sbio->bi_status = 0;
2147
2148 bio_for_each_segment_all(bi, sbio, iter_all)
2149 page_len[j++] = bi->bv_len;
2150
2151 if (!status) {
2152 for (j = vcnt; j-- ; ) {
2153 if (memcmp(page_address(ppages[j]),
2154 page_address(spages[j]),
2155 page_len[j]))
2156 break;
2157 }
2158 } else
2159 j = 0;
2160 if (j >= 0)
2161 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2162 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2163 && !status)) {
2164
2165 sbio->bi_end_io = NULL;
2166 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2167 continue;
2168 }
2169
2170 bio_copy_data(sbio, pbio);
2171 }
2172}
2173
2174static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2175{
2176 struct r1conf *conf = mddev->private;
2177 int i;
2178 int disks = conf->raid_disks * 2;
2179 struct bio *wbio;
2180
2181 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2182
2183 if (!fix_sync_read_error(r1_bio))
2184 return;
2185
2186 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2187 process_checks(r1_bio);
2188
2189
2190
2191
2192 atomic_set(&r1_bio->remaining, 1);
2193 for (i = 0; i < disks ; i++) {
2194 wbio = r1_bio->bios[i];
2195 if (wbio->bi_end_io == NULL ||
2196 (wbio->bi_end_io == end_sync_read &&
2197 (i == r1_bio->read_disk ||
2198 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2199 continue;
2200 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2201 abort_sync_write(mddev, r1_bio);
2202 continue;
2203 }
2204
2205 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2206 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2207 wbio->bi_opf |= MD_FAILFAST;
2208
2209 wbio->bi_end_io = end_sync_write;
2210 atomic_inc(&r1_bio->remaining);
2211 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2212
2213 submit_bio_noacct(wbio);
2214 }
2215
2216 put_sync_write_buf(r1_bio, 1);
2217}
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227static void fix_read_error(struct r1conf *conf, int read_disk,
2228 sector_t sect, int sectors)
2229{
2230 struct mddev *mddev = conf->mddev;
2231 while(sectors) {
2232 int s = sectors;
2233 int d = read_disk;
2234 int success = 0;
2235 int start;
2236 struct md_rdev *rdev;
2237
2238 if (s > (PAGE_SIZE>>9))
2239 s = PAGE_SIZE >> 9;
2240
2241 do {
2242 sector_t first_bad;
2243 int bad_sectors;
2244
2245 rcu_read_lock();
2246 rdev = rcu_dereference(conf->mirrors[d].rdev);
2247 if (rdev &&
2248 (test_bit(In_sync, &rdev->flags) ||
2249 (!test_bit(Faulty, &rdev->flags) &&
2250 rdev->recovery_offset >= sect + s)) &&
2251 is_badblock(rdev, sect, s,
2252 &first_bad, &bad_sectors) == 0) {
2253 atomic_inc(&rdev->nr_pending);
2254 rcu_read_unlock();
2255 if (sync_page_io(rdev, sect, s<<9,
2256 conf->tmppage, REQ_OP_READ, 0, false))
2257 success = 1;
2258 rdev_dec_pending(rdev, mddev);
2259 if (success)
2260 break;
2261 } else
2262 rcu_read_unlock();
2263 d++;
2264 if (d == conf->raid_disks * 2)
2265 d = 0;
2266 } while (!success && d != read_disk);
2267
2268 if (!success) {
2269
2270 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2271 if (!rdev_set_badblocks(rdev, sect, s, 0))
2272 md_error(mddev, rdev);
2273 break;
2274 }
2275
2276 start = d;
2277 while (d != read_disk) {
2278 if (d==0)
2279 d = conf->raid_disks * 2;
2280 d--;
2281 rcu_read_lock();
2282 rdev = rcu_dereference(conf->mirrors[d].rdev);
2283 if (rdev &&
2284 !test_bit(Faulty, &rdev->flags)) {
2285 atomic_inc(&rdev->nr_pending);
2286 rcu_read_unlock();
2287 r1_sync_page_io(rdev, sect, s,
2288 conf->tmppage, WRITE);
2289 rdev_dec_pending(rdev, mddev);
2290 } else
2291 rcu_read_unlock();
2292 }
2293 d = start;
2294 while (d != read_disk) {
2295 char b[BDEVNAME_SIZE];
2296 if (d==0)
2297 d = conf->raid_disks * 2;
2298 d--;
2299 rcu_read_lock();
2300 rdev = rcu_dereference(conf->mirrors[d].rdev);
2301 if (rdev &&
2302 !test_bit(Faulty, &rdev->flags)) {
2303 atomic_inc(&rdev->nr_pending);
2304 rcu_read_unlock();
2305 if (r1_sync_page_io(rdev, sect, s,
2306 conf->tmppage, READ)) {
2307 atomic_add(s, &rdev->corrected_errors);
2308 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2309 mdname(mddev), s,
2310 (unsigned long long)(sect +
2311 rdev->data_offset),
2312 bdevname(rdev->bdev, b));
2313 }
2314 rdev_dec_pending(rdev, mddev);
2315 } else
2316 rcu_read_unlock();
2317 }
2318 sectors -= s;
2319 sect += s;
2320 }
2321}
2322
2323static int narrow_write_error(struct r1bio *r1_bio, int i)
2324{
2325 struct mddev *mddev = r1_bio->mddev;
2326 struct r1conf *conf = mddev->private;
2327 struct md_rdev *rdev = conf->mirrors[i].rdev;
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340 int block_sectors;
2341 sector_t sector;
2342 int sectors;
2343 int sect_to_write = r1_bio->sectors;
2344 int ok = 1;
2345
2346 if (rdev->badblocks.shift < 0)
2347 return 0;
2348
2349 block_sectors = roundup(1 << rdev->badblocks.shift,
2350 bdev_logical_block_size(rdev->bdev) >> 9);
2351 sector = r1_bio->sector;
2352 sectors = ((sector + block_sectors)
2353 & ~(sector_t)(block_sectors - 1))
2354 - sector;
2355
2356 while (sect_to_write) {
2357 struct bio *wbio;
2358 if (sectors > sect_to_write)
2359 sectors = sect_to_write;
2360
2361
2362 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2363 wbio = bio_clone_fast(r1_bio->behind_master_bio,
2364 GFP_NOIO,
2365 &mddev->bio_set);
2366 } else {
2367 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2368 &mddev->bio_set);
2369 }
2370
2371 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2372 wbio->bi_iter.bi_sector = r1_bio->sector;
2373 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2374
2375 bio_trim(wbio, sector - r1_bio->sector, sectors);
2376 wbio->bi_iter.bi_sector += rdev->data_offset;
2377 bio_set_dev(wbio, rdev->bdev);
2378
2379 if (submit_bio_wait(wbio) < 0)
2380
2381 ok = rdev_set_badblocks(rdev, sector,
2382 sectors, 0)
2383 && ok;
2384
2385 bio_put(wbio);
2386 sect_to_write -= sectors;
2387 sector += sectors;
2388 sectors = block_sectors;
2389 }
2390 return ok;
2391}
2392
2393static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2394{
2395 int m;
2396 int s = r1_bio->sectors;
2397 for (m = 0; m < conf->raid_disks * 2 ; m++) {
2398 struct md_rdev *rdev = conf->mirrors[m].rdev;
2399 struct bio *bio = r1_bio->bios[m];
2400 if (bio->bi_end_io == NULL)
2401 continue;
2402 if (!bio->bi_status &&
2403 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2404 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2405 }
2406 if (bio->bi_status &&
2407 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2408 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2409 md_error(conf->mddev, rdev);
2410 }
2411 }
2412 put_buf(r1_bio);
2413 md_done_sync(conf->mddev, s, 1);
2414}
2415
2416static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2417{
2418 int m, idx;
2419 bool fail = false;
2420
2421 for (m = 0; m < conf->raid_disks * 2 ; m++)
2422 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2423 struct md_rdev *rdev = conf->mirrors[m].rdev;
2424 rdev_clear_badblocks(rdev,
2425 r1_bio->sector,
2426 r1_bio->sectors, 0);
2427 rdev_dec_pending(rdev, conf->mddev);
2428 } else if (r1_bio->bios[m] != NULL) {
2429
2430
2431
2432
2433 fail = true;
2434 if (!narrow_write_error(r1_bio, m)) {
2435 md_error(conf->mddev,
2436 conf->mirrors[m].rdev);
2437
2438 set_bit(R1BIO_Degraded, &r1_bio->state);
2439 }
2440 rdev_dec_pending(conf->mirrors[m].rdev,
2441 conf->mddev);
2442 }
2443 if (fail) {
2444 spin_lock_irq(&conf->device_lock);
2445 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2446 idx = sector_to_idx(r1_bio->sector);
2447 atomic_inc(&conf->nr_queued[idx]);
2448 spin_unlock_irq(&conf->device_lock);
2449
2450
2451
2452
2453 wake_up(&conf->wait_barrier);
2454 md_wakeup_thread(conf->mddev->thread);
2455 } else {
2456 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2457 close_write(r1_bio);
2458 raid_end_bio_io(r1_bio);
2459 }
2460}
2461
2462static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2463{
2464 struct mddev *mddev = conf->mddev;
2465 struct bio *bio;
2466 struct md_rdev *rdev;
2467
2468 clear_bit(R1BIO_ReadError, &r1_bio->state);
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478 bio = r1_bio->bios[r1_bio->read_disk];
2479 bio_put(bio);
2480 r1_bio->bios[r1_bio->read_disk] = NULL;
2481
2482 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2483 if (mddev->ro == 0
2484 && !test_bit(FailFast, &rdev->flags)) {
2485 freeze_array(conf, 1);
2486 fix_read_error(conf, r1_bio->read_disk,
2487 r1_bio->sector, r1_bio->sectors);
2488 unfreeze_array(conf);
2489 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2490 md_error(mddev, rdev);
2491 } else {
2492 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2493 }
2494
2495 rdev_dec_pending(rdev, conf->mddev);
2496 allow_barrier(conf, r1_bio->sector);
2497 bio = r1_bio->master_bio;
2498
2499
2500 r1_bio->state = 0;
2501 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2502}
2503
2504static void raid1d(struct md_thread *thread)
2505{
2506 struct mddev *mddev = thread->mddev;
2507 struct r1bio *r1_bio;
2508 unsigned long flags;
2509 struct r1conf *conf = mddev->private;
2510 struct list_head *head = &conf->retry_list;
2511 struct blk_plug plug;
2512 int idx;
2513
2514 md_check_recovery(mddev);
2515
2516 if (!list_empty_careful(&conf->bio_end_io_list) &&
2517 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2518 LIST_HEAD(tmp);
2519 spin_lock_irqsave(&conf->device_lock, flags);
2520 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2521 list_splice_init(&conf->bio_end_io_list, &tmp);
2522 spin_unlock_irqrestore(&conf->device_lock, flags);
2523 while (!list_empty(&tmp)) {
2524 r1_bio = list_first_entry(&tmp, struct r1bio,
2525 retry_list);
2526 list_del(&r1_bio->retry_list);
2527 idx = sector_to_idx(r1_bio->sector);
2528 atomic_dec(&conf->nr_queued[idx]);
2529 if (mddev->degraded)
2530 set_bit(R1BIO_Degraded, &r1_bio->state);
2531 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2532 close_write(r1_bio);
2533 raid_end_bio_io(r1_bio);
2534 }
2535 }
2536
2537 blk_start_plug(&plug);
2538 for (;;) {
2539
2540 flush_pending_writes(conf);
2541
2542 spin_lock_irqsave(&conf->device_lock, flags);
2543 if (list_empty(head)) {
2544 spin_unlock_irqrestore(&conf->device_lock, flags);
2545 break;
2546 }
2547 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2548 list_del(head->prev);
2549 idx = sector_to_idx(r1_bio->sector);
2550 atomic_dec(&conf->nr_queued[idx]);
2551 spin_unlock_irqrestore(&conf->device_lock, flags);
2552
2553 mddev = r1_bio->mddev;
2554 conf = mddev->private;
2555 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2556 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2557 test_bit(R1BIO_WriteError, &r1_bio->state))
2558 handle_sync_write_finished(conf, r1_bio);
2559 else
2560 sync_request_write(mddev, r1_bio);
2561 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2562 test_bit(R1BIO_WriteError, &r1_bio->state))
2563 handle_write_finished(conf, r1_bio);
2564 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2565 handle_read_error(conf, r1_bio);
2566 else
2567 WARN_ON_ONCE(1);
2568
2569 cond_resched();
2570 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2571 md_check_recovery(mddev);
2572 }
2573 blk_finish_plug(&plug);
2574}
2575
2576static int init_resync(struct r1conf *conf)
2577{
2578 int buffs;
2579
2580 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2581 BUG_ON(mempool_initialized(&conf->r1buf_pool));
2582
2583 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2584 r1buf_pool_free, conf->poolinfo);
2585}
2586
2587static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2588{
2589 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2590 struct resync_pages *rps;
2591 struct bio *bio;
2592 int i;
2593
2594 for (i = conf->poolinfo->raid_disks; i--; ) {
2595 bio = r1bio->bios[i];
2596 rps = bio->bi_private;
2597 bio_reset(bio);
2598 bio->bi_private = rps;
2599 }
2600 r1bio->master_bio = NULL;
2601 return r1bio;
2602}
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2615 int *skipped)
2616{
2617 struct r1conf *conf = mddev->private;
2618 struct r1bio *r1_bio;
2619 struct bio *bio;
2620 sector_t max_sector, nr_sectors;
2621 int disk = -1;
2622 int i;
2623 int wonly = -1;
2624 int write_targets = 0, read_targets = 0;
2625 sector_t sync_blocks;
2626 int still_degraded = 0;
2627 int good_sectors = RESYNC_SECTORS;
2628 int min_bad = 0;
2629 int idx = sector_to_idx(sector_nr);
2630 int page_idx = 0;
2631
2632 if (!mempool_initialized(&conf->r1buf_pool))
2633 if (init_resync(conf))
2634 return 0;
2635
2636 max_sector = mddev->dev_sectors;
2637 if (sector_nr >= max_sector) {
2638
2639
2640
2641
2642
2643 if (mddev->curr_resync < max_sector)
2644 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2645 &sync_blocks, 1);
2646 else
2647 conf->fullsync = 0;
2648
2649 md_bitmap_close_sync(mddev->bitmap);
2650 close_sync(conf);
2651
2652 if (mddev_is_clustered(mddev)) {
2653 conf->cluster_sync_low = 0;
2654 conf->cluster_sync_high = 0;
2655 }
2656 return 0;
2657 }
2658
2659 if (mddev->bitmap == NULL &&
2660 mddev->recovery_cp == MaxSector &&
2661 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2662 conf->fullsync == 0) {
2663 *skipped = 1;
2664 return max_sector - sector_nr;
2665 }
2666
2667
2668
2669 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2670 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2671
2672 *skipped = 1;
2673 return sync_blocks;
2674 }
2675
2676
2677
2678
2679
2680 if (atomic_read(&conf->nr_waiting[idx]))
2681 schedule_timeout_uninterruptible(1);
2682
2683
2684
2685
2686
2687 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2688 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2689
2690
2691 if (raise_barrier(conf, sector_nr))
2692 return 0;
2693
2694 r1_bio = raid1_alloc_init_r1buf(conf);
2695
2696 rcu_read_lock();
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706 r1_bio->mddev = mddev;
2707 r1_bio->sector = sector_nr;
2708 r1_bio->state = 0;
2709 set_bit(R1BIO_IsSync, &r1_bio->state);
2710
2711 good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2712
2713 for (i = 0; i < conf->raid_disks * 2; i++) {
2714 struct md_rdev *rdev;
2715 bio = r1_bio->bios[i];
2716
2717 rdev = rcu_dereference(conf->mirrors[i].rdev);
2718 if (rdev == NULL ||
2719 test_bit(Faulty, &rdev->flags)) {
2720 if (i < conf->raid_disks)
2721 still_degraded = 1;
2722 } else if (!test_bit(In_sync, &rdev->flags)) {
2723 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2724 bio->bi_end_io = end_sync_write;
2725 write_targets ++;
2726 } else {
2727
2728 sector_t first_bad = MaxSector;
2729 int bad_sectors;
2730
2731 if (is_badblock(rdev, sector_nr, good_sectors,
2732 &first_bad, &bad_sectors)) {
2733 if (first_bad > sector_nr)
2734 good_sectors = first_bad - sector_nr;
2735 else {
2736 bad_sectors -= (sector_nr - first_bad);
2737 if (min_bad == 0 ||
2738 min_bad > bad_sectors)
2739 min_bad = bad_sectors;
2740 }
2741 }
2742 if (sector_nr < first_bad) {
2743 if (test_bit(WriteMostly, &rdev->flags)) {
2744 if (wonly < 0)
2745 wonly = i;
2746 } else {
2747 if (disk < 0)
2748 disk = i;
2749 }
2750 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2751 bio->bi_end_io = end_sync_read;
2752 read_targets++;
2753 } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2754 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2755 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2756
2757
2758
2759
2760
2761
2762 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2763 bio->bi_end_io = end_sync_write;
2764 write_targets++;
2765 }
2766 }
2767 if (rdev && bio->bi_end_io) {
2768 atomic_inc(&rdev->nr_pending);
2769 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2770 bio_set_dev(bio, rdev->bdev);
2771 if (test_bit(FailFast, &rdev->flags))
2772 bio->bi_opf |= MD_FAILFAST;
2773 }
2774 }
2775 rcu_read_unlock();
2776 if (disk < 0)
2777 disk = wonly;
2778 r1_bio->read_disk = disk;
2779
2780 if (read_targets == 0 && min_bad > 0) {
2781
2782
2783
2784 int ok = 1;
2785 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2786 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2787 struct md_rdev *rdev = conf->mirrors[i].rdev;
2788 ok = rdev_set_badblocks(rdev, sector_nr,
2789 min_bad, 0
2790 ) && ok;
2791 }
2792 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2793 *skipped = 1;
2794 put_buf(r1_bio);
2795
2796 if (!ok) {
2797
2798
2799
2800
2801
2802 conf->recovery_disabled = mddev->recovery_disabled;
2803 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2804 return 0;
2805 } else
2806 return min_bad;
2807
2808 }
2809 if (min_bad > 0 && min_bad < good_sectors) {
2810
2811
2812 good_sectors = min_bad;
2813 }
2814
2815 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2816
2817 write_targets += read_targets-1;
2818
2819 if (write_targets == 0 || read_targets == 0) {
2820
2821
2822
2823 sector_t rv;
2824 if (min_bad > 0)
2825 max_sector = sector_nr + min_bad;
2826 rv = max_sector - sector_nr;
2827 *skipped = 1;
2828 put_buf(r1_bio);
2829 return rv;
2830 }
2831
2832 if (max_sector > mddev->resync_max)
2833 max_sector = mddev->resync_max;
2834 if (max_sector > sector_nr + good_sectors)
2835 max_sector = sector_nr + good_sectors;
2836 nr_sectors = 0;
2837 sync_blocks = 0;
2838 do {
2839 struct page *page;
2840 int len = PAGE_SIZE;
2841 if (sector_nr + (len>>9) > max_sector)
2842 len = (max_sector - sector_nr) << 9;
2843 if (len == 0)
2844 break;
2845 if (sync_blocks == 0) {
2846 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2847 &sync_blocks, still_degraded) &&
2848 !conf->fullsync &&
2849 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2850 break;
2851 if ((len >> 9) > sync_blocks)
2852 len = sync_blocks<<9;
2853 }
2854
2855 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2856 struct resync_pages *rp;
2857
2858 bio = r1_bio->bios[i];
2859 rp = get_resync_pages(bio);
2860 if (bio->bi_end_io) {
2861 page = resync_fetch_page(rp, page_idx);
2862
2863
2864
2865
2866
2867 bio_add_page(bio, page, len, 0);
2868 }
2869 }
2870 nr_sectors += len>>9;
2871 sector_nr += len>>9;
2872 sync_blocks -= (len>>9);
2873 } while (++page_idx < RESYNC_PAGES);
2874
2875 r1_bio->sectors = nr_sectors;
2876
2877 if (mddev_is_clustered(mddev) &&
2878 conf->cluster_sync_high < sector_nr + nr_sectors) {
2879 conf->cluster_sync_low = mddev->curr_resync_completed;
2880 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2881
2882 md_cluster_ops->resync_info_update(mddev,
2883 conf->cluster_sync_low,
2884 conf->cluster_sync_high);
2885 }
2886
2887
2888
2889
2890 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2891 atomic_set(&r1_bio->remaining, read_targets);
2892 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2893 bio = r1_bio->bios[i];
2894 if (bio->bi_end_io == end_sync_read) {
2895 read_targets--;
2896 md_sync_acct_bio(bio, nr_sectors);
2897 if (read_targets == 1)
2898 bio->bi_opf &= ~MD_FAILFAST;
2899 submit_bio_noacct(bio);
2900 }
2901 }
2902 } else {
2903 atomic_set(&r1_bio->remaining, 1);
2904 bio = r1_bio->bios[r1_bio->read_disk];
2905 md_sync_acct_bio(bio, nr_sectors);
2906 if (read_targets == 1)
2907 bio->bi_opf &= ~MD_FAILFAST;
2908 submit_bio_noacct(bio);
2909 }
2910 return nr_sectors;
2911}
2912
2913static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2914{
2915 if (sectors)
2916 return sectors;
2917
2918 return mddev->dev_sectors;
2919}
2920
2921static struct r1conf *setup_conf(struct mddev *mddev)
2922{
2923 struct r1conf *conf;
2924 int i;
2925 struct raid1_info *disk;
2926 struct md_rdev *rdev;
2927 int err = -ENOMEM;
2928
2929 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2930 if (!conf)
2931 goto abort;
2932
2933 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
2934 sizeof(atomic_t), GFP_KERNEL);
2935 if (!conf->nr_pending)
2936 goto abort;
2937
2938 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
2939 sizeof(atomic_t), GFP_KERNEL);
2940 if (!conf->nr_waiting)
2941 goto abort;
2942
2943 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
2944 sizeof(atomic_t), GFP_KERNEL);
2945 if (!conf->nr_queued)
2946 goto abort;
2947
2948 conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
2949 sizeof(atomic_t), GFP_KERNEL);
2950 if (!conf->barrier)
2951 goto abort;
2952
2953 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
2954 mddev->raid_disks, 2),
2955 GFP_KERNEL);
2956 if (!conf->mirrors)
2957 goto abort;
2958
2959 conf->tmppage = alloc_page(GFP_KERNEL);
2960 if (!conf->tmppage)
2961 goto abort;
2962
2963 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2964 if (!conf->poolinfo)
2965 goto abort;
2966 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2967 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
2968 rbio_pool_free, conf->poolinfo);
2969 if (err)
2970 goto abort;
2971
2972 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
2973 if (err)
2974 goto abort;
2975
2976 conf->poolinfo->mddev = mddev;
2977
2978 err = -EINVAL;
2979 spin_lock_init(&conf->device_lock);
2980 rdev_for_each(rdev, mddev) {
2981 int disk_idx = rdev->raid_disk;
2982 if (disk_idx >= mddev->raid_disks
2983 || disk_idx < 0)
2984 continue;
2985 if (test_bit(Replacement, &rdev->flags))
2986 disk = conf->mirrors + mddev->raid_disks + disk_idx;
2987 else
2988 disk = conf->mirrors + disk_idx;
2989
2990 if (disk->rdev)
2991 goto abort;
2992 disk->rdev = rdev;
2993 disk->head_position = 0;
2994 disk->seq_start = MaxSector;
2995 }
2996 conf->raid_disks = mddev->raid_disks;
2997 conf->mddev = mddev;
2998 INIT_LIST_HEAD(&conf->retry_list);
2999 INIT_LIST_HEAD(&conf->bio_end_io_list);
3000
3001 spin_lock_init(&conf->resync_lock);
3002 init_waitqueue_head(&conf->wait_barrier);
3003
3004 bio_list_init(&conf->pending_bio_list);
3005 conf->pending_count = 0;
3006 conf->recovery_disabled = mddev->recovery_disabled - 1;
3007
3008 err = -EIO;
3009 for (i = 0; i < conf->raid_disks * 2; i++) {
3010
3011 disk = conf->mirrors + i;
3012
3013 if (i < conf->raid_disks &&
3014 disk[conf->raid_disks].rdev) {
3015
3016 if (!disk->rdev) {
3017
3018
3019
3020 disk->rdev =
3021 disk[conf->raid_disks].rdev;
3022 disk[conf->raid_disks].rdev = NULL;
3023 } else if (!test_bit(In_sync, &disk->rdev->flags))
3024
3025 goto abort;
3026 }
3027
3028 if (!disk->rdev ||
3029 !test_bit(In_sync, &disk->rdev->flags)) {
3030 disk->head_position = 0;
3031 if (disk->rdev &&
3032 (disk->rdev->saved_raid_disk < 0))
3033 conf->fullsync = 1;
3034 }
3035 }
3036
3037 err = -ENOMEM;
3038 conf->thread = md_register_thread(raid1d, mddev, "raid1");
3039 if (!conf->thread)
3040 goto abort;
3041
3042 return conf;
3043
3044 abort:
3045 if (conf) {
3046 mempool_exit(&conf->r1bio_pool);
3047 kfree(conf->mirrors);
3048 safe_put_page(conf->tmppage);
3049 kfree(conf->poolinfo);
3050 kfree(conf->nr_pending);
3051 kfree(conf->nr_waiting);
3052 kfree(conf->nr_queued);
3053 kfree(conf->barrier);
3054 bioset_exit(&conf->bio_split);
3055 kfree(conf);
3056 }
3057 return ERR_PTR(err);
3058}
3059
3060static void raid1_free(struct mddev *mddev, void *priv);
3061static int raid1_run(struct mddev *mddev)
3062{
3063 struct r1conf *conf;
3064 int i;
3065 struct md_rdev *rdev;
3066 int ret;
3067 bool discard_supported = false;
3068
3069 if (mddev->level != 1) {
3070 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3071 mdname(mddev), mddev->level);
3072 return -EIO;
3073 }
3074 if (mddev->reshape_position != MaxSector) {
3075 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3076 mdname(mddev));
3077 return -EIO;
3078 }
3079 if (mddev_init_writes_pending(mddev) < 0)
3080 return -ENOMEM;
3081
3082
3083
3084
3085
3086 if (mddev->private == NULL)
3087 conf = setup_conf(mddev);
3088 else
3089 conf = mddev->private;
3090
3091 if (IS_ERR(conf))
3092 return PTR_ERR(conf);
3093
3094 if (mddev->queue) {
3095 blk_queue_max_write_same_sectors(mddev->queue, 0);
3096 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3097 }
3098
3099 rdev_for_each(rdev, mddev) {
3100 if (!mddev->gendisk)
3101 continue;
3102 disk_stack_limits(mddev->gendisk, rdev->bdev,
3103 rdev->data_offset << 9);
3104 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3105 discard_supported = true;
3106 }
3107
3108 mddev->degraded = 0;
3109 for (i = 0; i < conf->raid_disks; i++)
3110 if (conf->mirrors[i].rdev == NULL ||
3111 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3112 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3113 mddev->degraded++;
3114
3115
3116
3117 if (conf->raid_disks - mddev->degraded < 1) {
3118 ret = -EINVAL;
3119 goto abort;
3120 }
3121
3122 if (conf->raid_disks - mddev->degraded == 1)
3123 mddev->recovery_cp = MaxSector;
3124
3125 if (mddev->recovery_cp != MaxSector)
3126 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3127 mdname(mddev));
3128 pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3129 mdname(mddev), mddev->raid_disks - mddev->degraded,
3130 mddev->raid_disks);
3131
3132
3133
3134
3135 mddev->thread = conf->thread;
3136 conf->thread = NULL;
3137 mddev->private = conf;
3138 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3139
3140 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3141
3142 if (mddev->queue) {
3143 if (discard_supported)
3144 blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3145 mddev->queue);
3146 else
3147 blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3148 mddev->queue);
3149 }
3150
3151 ret = md_integrity_register(mddev);
3152 if (ret) {
3153 md_unregister_thread(&mddev->thread);
3154 goto abort;
3155 }
3156 return 0;
3157
3158abort:
3159 raid1_free(mddev, conf);
3160 return ret;
3161}
3162
3163static void raid1_free(struct mddev *mddev, void *priv)
3164{
3165 struct r1conf *conf = priv;
3166
3167 mempool_exit(&conf->r1bio_pool);
3168 kfree(conf->mirrors);
3169 safe_put_page(conf->tmppage);
3170 kfree(conf->poolinfo);
3171 kfree(conf->nr_pending);
3172 kfree(conf->nr_waiting);
3173 kfree(conf->nr_queued);
3174 kfree(conf->barrier);
3175 bioset_exit(&conf->bio_split);
3176 kfree(conf);
3177}
3178
3179static int raid1_resize(struct mddev *mddev, sector_t sectors)
3180{
3181
3182
3183
3184
3185
3186
3187
3188 sector_t newsize = raid1_size(mddev, sectors, 0);
3189 if (mddev->external_size &&
3190 mddev->array_sectors > newsize)
3191 return -EINVAL;
3192 if (mddev->bitmap) {
3193 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
3194 if (ret)
3195 return ret;
3196 }
3197 md_set_array_sectors(mddev, newsize);
3198 if (sectors > mddev->dev_sectors &&
3199 mddev->recovery_cp > mddev->dev_sectors) {
3200 mddev->recovery_cp = mddev->dev_sectors;
3201 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3202 }
3203 mddev->dev_sectors = sectors;
3204 mddev->resync_max_sectors = sectors;
3205 return 0;
3206}
3207
3208static int raid1_reshape(struct mddev *mddev)
3209{
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221 mempool_t newpool, oldpool;
3222 struct pool_info *newpoolinfo;
3223 struct raid1_info *newmirrors;
3224 struct r1conf *conf = mddev->private;
3225 int cnt, raid_disks;
3226 unsigned long flags;
3227 int d, d2;
3228 int ret;
3229
3230 memset(&newpool, 0, sizeof(newpool));
3231 memset(&oldpool, 0, sizeof(oldpool));
3232
3233
3234 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3235 mddev->layout != mddev->new_layout ||
3236 mddev->level != mddev->new_level) {
3237 mddev->new_chunk_sectors = mddev->chunk_sectors;
3238 mddev->new_layout = mddev->layout;
3239 mddev->new_level = mddev->level;
3240 return -EINVAL;
3241 }
3242
3243 if (!mddev_is_clustered(mddev))
3244 md_allow_write(mddev);
3245
3246 raid_disks = mddev->raid_disks + mddev->delta_disks;
3247
3248 if (raid_disks < conf->raid_disks) {
3249 cnt=0;
3250 for (d= 0; d < conf->raid_disks; d++)
3251 if (conf->mirrors[d].rdev)
3252 cnt++;
3253 if (cnt > raid_disks)
3254 return -EBUSY;
3255 }
3256
3257 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3258 if (!newpoolinfo)
3259 return -ENOMEM;
3260 newpoolinfo->mddev = mddev;
3261 newpoolinfo->raid_disks = raid_disks * 2;
3262
3263 ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
3264 rbio_pool_free, newpoolinfo);
3265 if (ret) {
3266 kfree(newpoolinfo);
3267 return ret;
3268 }
3269 newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3270 raid_disks, 2),
3271 GFP_KERNEL);
3272 if (!newmirrors) {
3273 kfree(newpoolinfo);
3274 mempool_exit(&newpool);
3275 return -ENOMEM;
3276 }
3277
3278 freeze_array(conf, 0);
3279
3280
3281 oldpool = conf->r1bio_pool;
3282 conf->r1bio_pool = newpool;
3283
3284 for (d = d2 = 0; d < conf->raid_disks; d++) {
3285 struct md_rdev *rdev = conf->mirrors[d].rdev;
3286 if (rdev && rdev->raid_disk != d2) {
3287 sysfs_unlink_rdev(mddev, rdev);
3288 rdev->raid_disk = d2;
3289 sysfs_unlink_rdev(mddev, rdev);
3290 if (sysfs_link_rdev(mddev, rdev))
3291 pr_warn("md/raid1:%s: cannot register rd%d\n",
3292 mdname(mddev), rdev->raid_disk);
3293 }
3294 if (rdev)
3295 newmirrors[d2++].rdev = rdev;
3296 }
3297 kfree(conf->mirrors);
3298 conf->mirrors = newmirrors;
3299 kfree(conf->poolinfo);
3300 conf->poolinfo = newpoolinfo;
3301
3302 spin_lock_irqsave(&conf->device_lock, flags);
3303 mddev->degraded += (raid_disks - conf->raid_disks);
3304 spin_unlock_irqrestore(&conf->device_lock, flags);
3305 conf->raid_disks = mddev->raid_disks = raid_disks;
3306 mddev->delta_disks = 0;
3307
3308 unfreeze_array(conf);
3309
3310 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3311 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3312 md_wakeup_thread(mddev->thread);
3313
3314 mempool_exit(&oldpool);
3315 return 0;
3316}
3317
3318static void raid1_quiesce(struct mddev *mddev, int quiesce)
3319{
3320 struct r1conf *conf = mddev->private;
3321
3322 if (quiesce)
3323 freeze_array(conf, 0);
3324 else
3325 unfreeze_array(conf);
3326}
3327
3328static void *raid1_takeover(struct mddev *mddev)
3329{
3330
3331
3332
3333 if (mddev->level == 5 && mddev->raid_disks == 2) {
3334 struct r1conf *conf;
3335 mddev->new_level = 1;
3336 mddev->new_layout = 0;
3337 mddev->new_chunk_sectors = 0;
3338 conf = setup_conf(mddev);
3339 if (!IS_ERR(conf)) {
3340
3341 conf->array_frozen = 1;
3342 mddev_clear_unsupported_flags(mddev,
3343 UNSUPPORTED_MDDEV_FLAGS);
3344 }
3345 return conf;
3346 }
3347 return ERR_PTR(-EINVAL);
3348}
3349
3350static struct md_personality raid1_personality =
3351{
3352 .name = "raid1",
3353 .level = 1,
3354 .owner = THIS_MODULE,
3355 .make_request = raid1_make_request,
3356 .run = raid1_run,
3357 .free = raid1_free,
3358 .status = raid1_status,
3359 .error_handler = raid1_error,
3360 .hot_add_disk = raid1_add_disk,
3361 .hot_remove_disk= raid1_remove_disk,
3362 .spare_active = raid1_spare_active,
3363 .sync_request = raid1_sync_request,
3364 .resize = raid1_resize,
3365 .size = raid1_size,
3366 .check_reshape = raid1_reshape,
3367 .quiesce = raid1_quiesce,
3368 .takeover = raid1_takeover,
3369};
3370
3371static int __init raid_init(void)
3372{
3373 return register_md_personality(&raid1_personality);
3374}
3375
3376static void raid_exit(void)
3377{
3378 unregister_md_personality(&raid1_personality);
3379}
3380
3381module_init(raid_init);
3382module_exit(raid_exit);
3383MODULE_LICENSE("GPL");
3384MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3385MODULE_ALIAS("md-personality-3");
3386MODULE_ALIAS("md-raid1");
3387MODULE_ALIAS("md-level-1");
3388
3389module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
3390