1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/slab.h>
35#include <linux/delay.h>
36#include <linux/blkdev.h>
37#include <linux/module.h>
38#include <linux/seq_file.h>
39#include <linux/ratelimit.h>
40
41#include <trace/events/block.h>
42
43#include "md.h"
44#include "raid1.h"
45#include "md-bitmap.h"
46
47#define UNSUPPORTED_MDDEV_FLAGS \
48 ((1L << MD_HAS_JOURNAL) | \
49 (1L << MD_JOURNAL_CLEAN) | \
50 (1L << MD_HAS_PPL) | \
51 (1L << MD_HAS_MULTIPLE_PPLS))
52
53
54
55
56#define NR_RAID1_BIOS 256
57
58
59
60
61
62
63#define IO_BLOCKED ((struct bio *)1)
64
65
66
67
68#define IO_MADE_GOOD ((struct bio *)2)
69
70#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
71
72
73
74
75
76static int max_queued_requests = 1024;
77
78static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
79static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
80
81#define raid1_log(md, fmt, args...) \
82 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
83
84#include "raid1-10.c"
85
86
87
88
89
90static inline struct r1bio *get_resync_r1bio(struct bio *bio)
91{
92 return get_resync_pages(bio)->raid_bio;
93}
94
95static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
96{
97 struct pool_info *pi = data;
98 int size = offsetof(struct r1bio, bios[pi->raid_disks]);
99
100
101 return kzalloc(size, gfp_flags);
102}
103
104static void r1bio_pool_free(void *r1_bio, void *data)
105{
106 kfree(r1_bio);
107}
108
109#define RESYNC_DEPTH 32
110#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
111#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
112#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
113#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
114#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
115
116static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
117{
118 struct pool_info *pi = data;
119 struct r1bio *r1_bio;
120 struct bio *bio;
121 int need_pages;
122 int j;
123 struct resync_pages *rps;
124
125 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
126 if (!r1_bio)
127 return NULL;
128
129 rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
130 gfp_flags);
131 if (!rps)
132 goto out_free_r1bio;
133
134
135
136
137 for (j = pi->raid_disks ; j-- ; ) {
138 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
139 if (!bio)
140 goto out_free_bio;
141 r1_bio->bios[j] = bio;
142 }
143
144
145
146
147
148
149 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
150 need_pages = pi->raid_disks;
151 else
152 need_pages = 1;
153 for (j = 0; j < pi->raid_disks; j++) {
154 struct resync_pages *rp = &rps[j];
155
156 bio = r1_bio->bios[j];
157
158 if (j < need_pages) {
159 if (resync_alloc_pages(rp, gfp_flags))
160 goto out_free_pages;
161 } else {
162 memcpy(rp, &rps[0], sizeof(*rp));
163 resync_get_all_pages(rp);
164 }
165
166 rp->raid_bio = r1_bio;
167 bio->bi_private = rp;
168 }
169
170 r1_bio->master_bio = NULL;
171
172 return r1_bio;
173
174out_free_pages:
175 while (--j >= 0)
176 resync_free_pages(&rps[j]);
177
178out_free_bio:
179 while (++j < pi->raid_disks)
180 bio_put(r1_bio->bios[j]);
181 kfree(rps);
182
183out_free_r1bio:
184 r1bio_pool_free(r1_bio, data);
185 return NULL;
186}
187
188static void r1buf_pool_free(void *__r1_bio, void *data)
189{
190 struct pool_info *pi = data;
191 int i;
192 struct r1bio *r1bio = __r1_bio;
193 struct resync_pages *rp = NULL;
194
195 for (i = pi->raid_disks; i--; ) {
196 rp = get_resync_pages(r1bio->bios[i]);
197 resync_free_pages(rp);
198 bio_put(r1bio->bios[i]);
199 }
200
201
202 kfree(rp);
203
204 r1bio_pool_free(r1bio, data);
205}
206
207static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
208{
209 int i;
210
211 for (i = 0; i < conf->raid_disks * 2; i++) {
212 struct bio **bio = r1_bio->bios + i;
213 if (!BIO_SPECIAL(*bio))
214 bio_put(*bio);
215 *bio = NULL;
216 }
217}
218
219static void free_r1bio(struct r1bio *r1_bio)
220{
221 struct r1conf *conf = r1_bio->mddev->private;
222
223 put_all_bios(conf, r1_bio);
224 mempool_free(r1_bio, &conf->r1bio_pool);
225}
226
227static void put_buf(struct r1bio *r1_bio)
228{
229 struct r1conf *conf = r1_bio->mddev->private;
230 sector_t sect = r1_bio->sector;
231 int i;
232
233 for (i = 0; i < conf->raid_disks * 2; i++) {
234 struct bio *bio = r1_bio->bios[i];
235 if (bio->bi_end_io)
236 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
237 }
238
239 mempool_free(r1_bio, &conf->r1buf_pool);
240
241 lower_barrier(conf, sect);
242}
243
244static void reschedule_retry(struct r1bio *r1_bio)
245{
246 unsigned long flags;
247 struct mddev *mddev = r1_bio->mddev;
248 struct r1conf *conf = mddev->private;
249 int idx;
250
251 idx = sector_to_idx(r1_bio->sector);
252 spin_lock_irqsave(&conf->device_lock, flags);
253 list_add(&r1_bio->retry_list, &conf->retry_list);
254 atomic_inc(&conf->nr_queued[idx]);
255 spin_unlock_irqrestore(&conf->device_lock, flags);
256
257 wake_up(&conf->wait_barrier);
258 md_wakeup_thread(mddev->thread);
259}
260
261
262
263
264
265
266static void call_bio_endio(struct r1bio *r1_bio)
267{
268 struct bio *bio = r1_bio->master_bio;
269 struct r1conf *conf = r1_bio->mddev->private;
270
271 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
272 bio->bi_status = BLK_STS_IOERR;
273
274 bio_endio(bio);
275
276
277
278
279 allow_barrier(conf, r1_bio->sector);
280}
281
282static void raid_end_bio_io(struct r1bio *r1_bio)
283{
284 struct bio *bio = r1_bio->master_bio;
285
286
287 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
288 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
289 (bio_data_dir(bio) == WRITE) ? "write" : "read",
290 (unsigned long long) bio->bi_iter.bi_sector,
291 (unsigned long long) bio_end_sector(bio) - 1);
292
293 call_bio_endio(r1_bio);
294 }
295 free_r1bio(r1_bio);
296}
297
298
299
300
301static inline void update_head_pos(int disk, struct r1bio *r1_bio)
302{
303 struct r1conf *conf = r1_bio->mddev->private;
304
305 conf->mirrors[disk].head_position =
306 r1_bio->sector + (r1_bio->sectors);
307}
308
309
310
311
312static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
313{
314 int mirror;
315 struct r1conf *conf = r1_bio->mddev->private;
316 int raid_disks = conf->raid_disks;
317
318 for (mirror = 0; mirror < raid_disks * 2; mirror++)
319 if (r1_bio->bios[mirror] == bio)
320 break;
321
322 BUG_ON(mirror == raid_disks * 2);
323 update_head_pos(mirror, r1_bio);
324
325 return mirror;
326}
327
328static void raid1_end_read_request(struct bio *bio)
329{
330 int uptodate = !bio->bi_status;
331 struct r1bio *r1_bio = bio->bi_private;
332 struct r1conf *conf = r1_bio->mddev->private;
333 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
334
335
336
337
338 update_head_pos(r1_bio->read_disk, r1_bio);
339
340 if (uptodate)
341 set_bit(R1BIO_Uptodate, &r1_bio->state);
342 else if (test_bit(FailFast, &rdev->flags) &&
343 test_bit(R1BIO_FailFast, &r1_bio->state))
344
345
346 ;
347 else {
348
349
350
351
352 unsigned long flags;
353 spin_lock_irqsave(&conf->device_lock, flags);
354 if (r1_bio->mddev->degraded == conf->raid_disks ||
355 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
356 test_bit(In_sync, &rdev->flags)))
357 uptodate = 1;
358 spin_unlock_irqrestore(&conf->device_lock, flags);
359 }
360
361 if (uptodate) {
362 raid_end_bio_io(r1_bio);
363 rdev_dec_pending(rdev, conf->mddev);
364 } else {
365
366
367
368 char b[BDEVNAME_SIZE];
369 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
370 mdname(conf->mddev),
371 bdevname(rdev->bdev, b),
372 (unsigned long long)r1_bio->sector);
373 set_bit(R1BIO_ReadError, &r1_bio->state);
374 reschedule_retry(r1_bio);
375
376 }
377}
378
379static void close_write(struct r1bio *r1_bio)
380{
381
382 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
383 bio_free_pages(r1_bio->behind_master_bio);
384 bio_put(r1_bio->behind_master_bio);
385 r1_bio->behind_master_bio = NULL;
386 }
387
388 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
389 r1_bio->sectors,
390 !test_bit(R1BIO_Degraded, &r1_bio->state),
391 test_bit(R1BIO_BehindIO, &r1_bio->state));
392 md_write_end(r1_bio->mddev);
393}
394
395static void r1_bio_write_done(struct r1bio *r1_bio)
396{
397 if (!atomic_dec_and_test(&r1_bio->remaining))
398 return;
399
400 if (test_bit(R1BIO_WriteError, &r1_bio->state))
401 reschedule_retry(r1_bio);
402 else {
403 close_write(r1_bio);
404 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
405 reschedule_retry(r1_bio);
406 else
407 raid_end_bio_io(r1_bio);
408 }
409}
410
411static void raid1_end_write_request(struct bio *bio)
412{
413 struct r1bio *r1_bio = bio->bi_private;
414 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
415 struct r1conf *conf = r1_bio->mddev->private;
416 struct bio *to_put = NULL;
417 int mirror = find_bio_disk(r1_bio, bio);
418 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
419 bool discard_error;
420
421 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
422
423
424
425
426 if (bio->bi_status && !discard_error) {
427 set_bit(WriteErrorSeen, &rdev->flags);
428 if (!test_and_set_bit(WantReplacement, &rdev->flags))
429 set_bit(MD_RECOVERY_NEEDED, &
430 conf->mddev->recovery);
431
432 if (test_bit(FailFast, &rdev->flags) &&
433 (bio->bi_opf & MD_FAILFAST) &&
434
435 !test_bit(WriteMostly, &rdev->flags)) {
436 md_error(r1_bio->mddev, rdev);
437 if (!test_bit(Faulty, &rdev->flags))
438
439
440
441
442 set_bit(R1BIO_WriteError, &r1_bio->state);
443 else {
444
445 r1_bio->bios[mirror] = NULL;
446 to_put = bio;
447 }
448 } else
449 set_bit(R1BIO_WriteError, &r1_bio->state);
450 } else {
451
452
453
454
455
456
457
458
459
460
461 sector_t first_bad;
462 int bad_sectors;
463
464 r1_bio->bios[mirror] = NULL;
465 to_put = bio;
466
467
468
469
470
471
472
473
474 if (test_bit(In_sync, &rdev->flags) &&
475 !test_bit(Faulty, &rdev->flags))
476 set_bit(R1BIO_Uptodate, &r1_bio->state);
477
478
479 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
480 &first_bad, &bad_sectors) && !discard_error) {
481 r1_bio->bios[mirror] = IO_MADE_GOOD;
482 set_bit(R1BIO_MadeGood, &r1_bio->state);
483 }
484 }
485
486 if (behind) {
487 if (test_bit(WriteMostly, &rdev->flags))
488 atomic_dec(&r1_bio->behind_remaining);
489
490
491
492
493
494
495
496
497 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
498 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
499
500 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
501 struct bio *mbio = r1_bio->master_bio;
502 pr_debug("raid1: behind end write sectors"
503 " %llu-%llu\n",
504 (unsigned long long) mbio->bi_iter.bi_sector,
505 (unsigned long long) bio_end_sector(mbio) - 1);
506 call_bio_endio(r1_bio);
507 }
508 }
509 }
510 if (r1_bio->bios[mirror] == NULL)
511 rdev_dec_pending(rdev, conf->mddev);
512
513
514
515
516
517 r1_bio_write_done(r1_bio);
518
519 if (to_put)
520 bio_put(to_put);
521}
522
523static sector_t align_to_barrier_unit_end(sector_t start_sector,
524 sector_t sectors)
525{
526 sector_t len;
527
528 WARN_ON(sectors == 0);
529
530
531
532
533 len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
534 start_sector;
535
536 if (len > sectors)
537 len = sectors;
538
539 return len;
540}
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
557{
558 const sector_t this_sector = r1_bio->sector;
559 int sectors;
560 int best_good_sectors;
561 int best_disk, best_dist_disk, best_pending_disk;
562 int has_nonrot_disk;
563 int disk;
564 sector_t best_dist;
565 unsigned int min_pending;
566 struct md_rdev *rdev;
567 int choose_first;
568 int choose_next_idle;
569
570 rcu_read_lock();
571
572
573
574
575
576 retry:
577 sectors = r1_bio->sectors;
578 best_disk = -1;
579 best_dist_disk = -1;
580 best_dist = MaxSector;
581 best_pending_disk = -1;
582 min_pending = UINT_MAX;
583 best_good_sectors = 0;
584 has_nonrot_disk = 0;
585 choose_next_idle = 0;
586 clear_bit(R1BIO_FailFast, &r1_bio->state);
587
588 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
589 (mddev_is_clustered(conf->mddev) &&
590 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
591 this_sector + sectors)))
592 choose_first = 1;
593 else
594 choose_first = 0;
595
596 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
597 sector_t dist;
598 sector_t first_bad;
599 int bad_sectors;
600 unsigned int pending;
601 bool nonrot;
602
603 rdev = rcu_dereference(conf->mirrors[disk].rdev);
604 if (r1_bio->bios[disk] == IO_BLOCKED
605 || rdev == NULL
606 || test_bit(Faulty, &rdev->flags))
607 continue;
608 if (!test_bit(In_sync, &rdev->flags) &&
609 rdev->recovery_offset < this_sector + sectors)
610 continue;
611 if (test_bit(WriteMostly, &rdev->flags)) {
612
613
614 if (best_dist_disk < 0) {
615 if (is_badblock(rdev, this_sector, sectors,
616 &first_bad, &bad_sectors)) {
617 if (first_bad <= this_sector)
618
619 continue;
620 best_good_sectors = first_bad - this_sector;
621 } else
622 best_good_sectors = sectors;
623 best_dist_disk = disk;
624 best_pending_disk = disk;
625 }
626 continue;
627 }
628
629
630
631 if (is_badblock(rdev, this_sector, sectors,
632 &first_bad, &bad_sectors)) {
633 if (best_dist < MaxSector)
634
635 continue;
636 if (first_bad <= this_sector) {
637
638
639
640
641 bad_sectors -= (this_sector - first_bad);
642 if (choose_first && sectors > bad_sectors)
643 sectors = bad_sectors;
644 if (best_good_sectors > sectors)
645 best_good_sectors = sectors;
646
647 } else {
648 sector_t good_sectors = first_bad - this_sector;
649 if (good_sectors > best_good_sectors) {
650 best_good_sectors = good_sectors;
651 best_disk = disk;
652 }
653 if (choose_first)
654 break;
655 }
656 continue;
657 } else {
658 if ((sectors > best_good_sectors) && (best_disk >= 0))
659 best_disk = -1;
660 best_good_sectors = sectors;
661 }
662
663 if (best_disk >= 0)
664
665 set_bit(R1BIO_FailFast, &r1_bio->state);
666
667 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
668 has_nonrot_disk |= nonrot;
669 pending = atomic_read(&rdev->nr_pending);
670 dist = abs(this_sector - conf->mirrors[disk].head_position);
671 if (choose_first) {
672 best_disk = disk;
673 break;
674 }
675
676 if (conf->mirrors[disk].next_seq_sect == this_sector
677 || dist == 0) {
678 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
679 struct raid1_info *mirror = &conf->mirrors[disk];
680
681 best_disk = disk;
682
683
684
685
686
687
688
689
690
691
692
693
694
695 if (nonrot && opt_iosize > 0 &&
696 mirror->seq_start != MaxSector &&
697 mirror->next_seq_sect > opt_iosize &&
698 mirror->next_seq_sect - opt_iosize >=
699 mirror->seq_start) {
700 choose_next_idle = 1;
701 continue;
702 }
703 break;
704 }
705
706 if (choose_next_idle)
707 continue;
708
709 if (min_pending > pending) {
710 min_pending = pending;
711 best_pending_disk = disk;
712 }
713
714 if (dist < best_dist) {
715 best_dist = dist;
716 best_dist_disk = disk;
717 }
718 }
719
720
721
722
723
724
725
726 if (best_disk == -1) {
727 if (has_nonrot_disk || min_pending == 0)
728 best_disk = best_pending_disk;
729 else
730 best_disk = best_dist_disk;
731 }
732
733 if (best_disk >= 0) {
734 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
735 if (!rdev)
736 goto retry;
737 atomic_inc(&rdev->nr_pending);
738 sectors = best_good_sectors;
739
740 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
741 conf->mirrors[best_disk].seq_start = this_sector;
742
743 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
744 }
745 rcu_read_unlock();
746 *max_sectors = sectors;
747
748 return best_disk;
749}
750
751static int raid1_congested(struct mddev *mddev, int bits)
752{
753 struct r1conf *conf = mddev->private;
754 int i, ret = 0;
755
756 if ((bits & (1 << WB_async_congested)) &&
757 conf->pending_count >= max_queued_requests)
758 return 1;
759
760 rcu_read_lock();
761 for (i = 0; i < conf->raid_disks * 2; i++) {
762 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
763 if (rdev && !test_bit(Faulty, &rdev->flags)) {
764 struct request_queue *q = bdev_get_queue(rdev->bdev);
765
766 BUG_ON(!q);
767
768
769
770
771 if ((bits & (1 << WB_async_congested)) || 1)
772 ret |= bdi_congested(q->backing_dev_info, bits);
773 else
774 ret &= bdi_congested(q->backing_dev_info, bits);
775 }
776 }
777 rcu_read_unlock();
778 return ret;
779}
780
781static void flush_bio_list(struct r1conf *conf, struct bio *bio)
782{
783
784 md_bitmap_unplug(conf->mddev->bitmap);
785 wake_up(&conf->wait_barrier);
786
787 while (bio) {
788 struct bio *next = bio->bi_next;
789 struct md_rdev *rdev = (void *)bio->bi_disk;
790 bio->bi_next = NULL;
791 bio_set_dev(bio, rdev->bdev);
792 if (test_bit(Faulty, &rdev->flags)) {
793 bio_io_error(bio);
794 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
795 !blk_queue_discard(bio->bi_disk->queue)))
796
797 bio_endio(bio);
798 else
799 generic_make_request(bio);
800 bio = next;
801 }
802}
803
804static void flush_pending_writes(struct r1conf *conf)
805{
806
807
808
809 spin_lock_irq(&conf->device_lock);
810
811 if (conf->pending_bio_list.head) {
812 struct blk_plug plug;
813 struct bio *bio;
814
815 bio = bio_list_get(&conf->pending_bio_list);
816 conf->pending_count = 0;
817 spin_unlock_irq(&conf->device_lock);
818
819
820
821
822
823
824
825
826
827
828 __set_current_state(TASK_RUNNING);
829 blk_start_plug(&plug);
830 flush_bio_list(conf, bio);
831 blk_finish_plug(&plug);
832 } else
833 spin_unlock_irq(&conf->device_lock);
834}
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr)
858{
859 int idx = sector_to_idx(sector_nr);
860
861 spin_lock_irq(&conf->resync_lock);
862
863
864 wait_event_lock_irq(conf->wait_barrier,
865 !atomic_read(&conf->nr_waiting[idx]),
866 conf->resync_lock);
867
868
869 atomic_inc(&conf->barrier[idx]);
870
871
872
873
874
875
876
877
878 smp_mb__after_atomic();
879
880
881
882
883
884
885
886
887 wait_event_lock_irq(conf->wait_barrier,
888 (!conf->array_frozen &&
889 !atomic_read(&conf->nr_pending[idx]) &&
890 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
891 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
892 conf->resync_lock);
893
894 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
895 atomic_dec(&conf->barrier[idx]);
896 spin_unlock_irq(&conf->resync_lock);
897 wake_up(&conf->wait_barrier);
898 return -EINTR;
899 }
900
901 atomic_inc(&conf->nr_sync_pending);
902 spin_unlock_irq(&conf->resync_lock);
903
904 return 0;
905}
906
907static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
908{
909 int idx = sector_to_idx(sector_nr);
910
911 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
912
913 atomic_dec(&conf->barrier[idx]);
914 atomic_dec(&conf->nr_sync_pending);
915 wake_up(&conf->wait_barrier);
916}
917
918static void _wait_barrier(struct r1conf *conf, int idx)
919{
920
921
922
923
924
925
926
927
928 atomic_inc(&conf->nr_pending[idx]);
929
930
931
932
933
934
935
936
937 smp_mb__after_atomic();
938
939
940
941
942
943
944
945
946
947
948 if (!READ_ONCE(conf->array_frozen) &&
949 !atomic_read(&conf->barrier[idx]))
950 return;
951
952
953
954
955
956
957
958
959 spin_lock_irq(&conf->resync_lock);
960 atomic_inc(&conf->nr_waiting[idx]);
961 atomic_dec(&conf->nr_pending[idx]);
962
963
964
965
966 wake_up(&conf->wait_barrier);
967
968 wait_event_lock_irq(conf->wait_barrier,
969 !conf->array_frozen &&
970 !atomic_read(&conf->barrier[idx]),
971 conf->resync_lock);
972 atomic_inc(&conf->nr_pending[idx]);
973 atomic_dec(&conf->nr_waiting[idx]);
974 spin_unlock_irq(&conf->resync_lock);
975}
976
977static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
978{
979 int idx = sector_to_idx(sector_nr);
980
981
982
983
984
985
986
987
988 atomic_inc(&conf->nr_pending[idx]);
989
990 if (!READ_ONCE(conf->array_frozen))
991 return;
992
993 spin_lock_irq(&conf->resync_lock);
994 atomic_inc(&conf->nr_waiting[idx]);
995 atomic_dec(&conf->nr_pending[idx]);
996
997
998
999
1000 wake_up(&conf->wait_barrier);
1001
1002 wait_event_lock_irq(conf->wait_barrier,
1003 !conf->array_frozen,
1004 conf->resync_lock);
1005 atomic_inc(&conf->nr_pending[idx]);
1006 atomic_dec(&conf->nr_waiting[idx]);
1007 spin_unlock_irq(&conf->resync_lock);
1008}
1009
1010static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
1011{
1012 int idx = sector_to_idx(sector_nr);
1013
1014 _wait_barrier(conf, idx);
1015}
1016
1017static void _allow_barrier(struct r1conf *conf, int idx)
1018{
1019 atomic_dec(&conf->nr_pending[idx]);
1020 wake_up(&conf->wait_barrier);
1021}
1022
1023static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1024{
1025 int idx = sector_to_idx(sector_nr);
1026
1027 _allow_barrier(conf, idx);
1028}
1029
1030
1031static int get_unqueued_pending(struct r1conf *conf)
1032{
1033 int idx, ret;
1034
1035 ret = atomic_read(&conf->nr_sync_pending);
1036 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1037 ret += atomic_read(&conf->nr_pending[idx]) -
1038 atomic_read(&conf->nr_queued[idx]);
1039
1040 return ret;
1041}
1042
1043static void freeze_array(struct r1conf *conf, int extra)
1044{
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068 spin_lock_irq(&conf->resync_lock);
1069 conf->array_frozen = 1;
1070 raid1_log(conf->mddev, "wait freeze");
1071 wait_event_lock_irq_cmd(
1072 conf->wait_barrier,
1073 get_unqueued_pending(conf) == extra,
1074 conf->resync_lock,
1075 flush_pending_writes(conf));
1076 spin_unlock_irq(&conf->resync_lock);
1077}
1078static void unfreeze_array(struct r1conf *conf)
1079{
1080
1081 spin_lock_irq(&conf->resync_lock);
1082 conf->array_frozen = 0;
1083 spin_unlock_irq(&conf->resync_lock);
1084 wake_up(&conf->wait_barrier);
1085}
1086
1087static void alloc_behind_master_bio(struct r1bio *r1_bio,
1088 struct bio *bio)
1089{
1090 int size = bio->bi_iter.bi_size;
1091 unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1092 int i = 0;
1093 struct bio *behind_bio = NULL;
1094
1095 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1096 if (!behind_bio)
1097 return;
1098
1099
1100 if (!bio_has_data(bio)) {
1101 behind_bio->bi_iter.bi_size = size;
1102 goto skip_copy;
1103 }
1104
1105 behind_bio->bi_write_hint = bio->bi_write_hint;
1106
1107 while (i < vcnt && size) {
1108 struct page *page;
1109 int len = min_t(int, PAGE_SIZE, size);
1110
1111 page = alloc_page(GFP_NOIO);
1112 if (unlikely(!page))
1113 goto free_pages;
1114
1115 bio_add_page(behind_bio, page, len, 0);
1116
1117 size -= len;
1118 i++;
1119 }
1120
1121 bio_copy_data(behind_bio, bio);
1122skip_copy:
1123 r1_bio->behind_master_bio = behind_bio;
1124 set_bit(R1BIO_BehindIO, &r1_bio->state);
1125
1126 return;
1127
1128free_pages:
1129 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1130 bio->bi_iter.bi_size);
1131 bio_free_pages(behind_bio);
1132 bio_put(behind_bio);
1133}
1134
1135struct raid1_plug_cb {
1136 struct blk_plug_cb cb;
1137 struct bio_list pending;
1138 int pending_cnt;
1139};
1140
1141static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1142{
1143 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1144 cb);
1145 struct mddev *mddev = plug->cb.data;
1146 struct r1conf *conf = mddev->private;
1147 struct bio *bio;
1148
1149 if (from_schedule || current->bio_list) {
1150 spin_lock_irq(&conf->device_lock);
1151 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1152 conf->pending_count += plug->pending_cnt;
1153 spin_unlock_irq(&conf->device_lock);
1154 wake_up(&conf->wait_barrier);
1155 md_wakeup_thread(mddev->thread);
1156 kfree(plug);
1157 return;
1158 }
1159
1160
1161 bio = bio_list_get(&plug->pending);
1162 flush_bio_list(conf, bio);
1163 kfree(plug);
1164}
1165
1166static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1167{
1168 r1_bio->master_bio = bio;
1169 r1_bio->sectors = bio_sectors(bio);
1170 r1_bio->state = 0;
1171 r1_bio->mddev = mddev;
1172 r1_bio->sector = bio->bi_iter.bi_sector;
1173}
1174
1175static inline struct r1bio *
1176alloc_r1bio(struct mddev *mddev, struct bio *bio)
1177{
1178 struct r1conf *conf = mddev->private;
1179 struct r1bio *r1_bio;
1180
1181 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1182
1183 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1184 init_r1bio(r1_bio, mddev, bio);
1185 return r1_bio;
1186}
1187
1188static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1189 int max_read_sectors, struct r1bio *r1_bio)
1190{
1191 struct r1conf *conf = mddev->private;
1192 struct raid1_info *mirror;
1193 struct bio *read_bio;
1194 struct bitmap *bitmap = mddev->bitmap;
1195 const int op = bio_op(bio);
1196 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1197 int max_sectors;
1198 int rdisk;
1199 bool print_msg = !!r1_bio;
1200 char b[BDEVNAME_SIZE];
1201
1202
1203
1204
1205
1206
1207 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1208
1209 if (print_msg) {
1210
1211 struct md_rdev *rdev;
1212 rcu_read_lock();
1213 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1214 if (rdev)
1215 bdevname(rdev->bdev, b);
1216 else
1217 strcpy(b, "???");
1218 rcu_read_unlock();
1219 }
1220
1221
1222
1223
1224
1225 wait_read_barrier(conf, bio->bi_iter.bi_sector);
1226
1227 if (!r1_bio)
1228 r1_bio = alloc_r1bio(mddev, bio);
1229 else
1230 init_r1bio(r1_bio, mddev, bio);
1231 r1_bio->sectors = max_read_sectors;
1232
1233
1234
1235
1236
1237 rdisk = read_balance(conf, r1_bio, &max_sectors);
1238
1239 if (rdisk < 0) {
1240
1241 if (print_msg) {
1242 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1243 mdname(mddev),
1244 b,
1245 (unsigned long long)r1_bio->sector);
1246 }
1247 raid_end_bio_io(r1_bio);
1248 return;
1249 }
1250 mirror = conf->mirrors + rdisk;
1251
1252 if (print_msg)
1253 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
1254 mdname(mddev),
1255 (unsigned long long)r1_bio->sector,
1256 bdevname(mirror->rdev->bdev, b));
1257
1258 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1259 bitmap) {
1260
1261
1262
1263
1264 raid1_log(mddev, "wait behind writes");
1265 wait_event(bitmap->behind_wait,
1266 atomic_read(&bitmap->behind_writes) == 0);
1267 }
1268
1269 if (max_sectors < bio_sectors(bio)) {
1270 struct bio *split = bio_split(bio, max_sectors,
1271 gfp, &conf->bio_split);
1272 bio_chain(split, bio);
1273 generic_make_request(bio);
1274 bio = split;
1275 r1_bio->master_bio = bio;
1276 r1_bio->sectors = max_sectors;
1277 }
1278
1279 r1_bio->read_disk = rdisk;
1280
1281 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
1282
1283 r1_bio->bios[rdisk] = read_bio;
1284
1285 read_bio->bi_iter.bi_sector = r1_bio->sector +
1286 mirror->rdev->data_offset;
1287 bio_set_dev(read_bio, mirror->rdev->bdev);
1288 read_bio->bi_end_io = raid1_end_read_request;
1289 bio_set_op_attrs(read_bio, op, do_sync);
1290 if (test_bit(FailFast, &mirror->rdev->flags) &&
1291 test_bit(R1BIO_FailFast, &r1_bio->state))
1292 read_bio->bi_opf |= MD_FAILFAST;
1293 read_bio->bi_private = r1_bio;
1294
1295 if (mddev->gendisk)
1296 trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
1297 disk_devt(mddev->gendisk), r1_bio->sector);
1298
1299 generic_make_request(read_bio);
1300}
1301
1302static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1303 int max_write_sectors)
1304{
1305 struct r1conf *conf = mddev->private;
1306 struct r1bio *r1_bio;
1307 int i, disks;
1308 struct bitmap *bitmap = mddev->bitmap;
1309 unsigned long flags;
1310 struct md_rdev *blocked_rdev;
1311 struct blk_plug_cb *cb;
1312 struct raid1_plug_cb *plug = NULL;
1313 int first_clone;
1314 int max_sectors;
1315
1316 if (mddev_is_clustered(mddev) &&
1317 md_cluster_ops->area_resyncing(mddev, WRITE,
1318 bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1319
1320 DEFINE_WAIT(w);
1321 for (;;) {
1322 prepare_to_wait(&conf->wait_barrier,
1323 &w, TASK_IDLE);
1324 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1325 bio->bi_iter.bi_sector,
1326 bio_end_sector(bio)))
1327 break;
1328 schedule();
1329 }
1330 finish_wait(&conf->wait_barrier, &w);
1331 }
1332
1333
1334
1335
1336
1337
1338 wait_barrier(conf, bio->bi_iter.bi_sector);
1339
1340 r1_bio = alloc_r1bio(mddev, bio);
1341 r1_bio->sectors = max_write_sectors;
1342
1343 if (conf->pending_count >= max_queued_requests) {
1344 md_wakeup_thread(mddev->thread);
1345 raid1_log(mddev, "wait queued");
1346 wait_event(conf->wait_barrier,
1347 conf->pending_count < max_queued_requests);
1348 }
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360 disks = conf->raid_disks * 2;
1361 retry_write:
1362 blocked_rdev = NULL;
1363 rcu_read_lock();
1364 max_sectors = r1_bio->sectors;
1365 for (i = 0; i < disks; i++) {
1366 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1367 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1368 atomic_inc(&rdev->nr_pending);
1369 blocked_rdev = rdev;
1370 break;
1371 }
1372 r1_bio->bios[i] = NULL;
1373 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1374 if (i < conf->raid_disks)
1375 set_bit(R1BIO_Degraded, &r1_bio->state);
1376 continue;
1377 }
1378
1379 atomic_inc(&rdev->nr_pending);
1380 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1381 sector_t first_bad;
1382 int bad_sectors;
1383 int is_bad;
1384
1385 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1386 &first_bad, &bad_sectors);
1387 if (is_bad < 0) {
1388
1389
1390 set_bit(BlockedBadBlocks, &rdev->flags);
1391 blocked_rdev = rdev;
1392 break;
1393 }
1394 if (is_bad && first_bad <= r1_bio->sector) {
1395
1396 bad_sectors -= (r1_bio->sector - first_bad);
1397 if (bad_sectors < max_sectors)
1398
1399
1400
1401 max_sectors = bad_sectors;
1402 rdev_dec_pending(rdev, mddev);
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413 continue;
1414 }
1415 if (is_bad) {
1416 int good_sectors = first_bad - r1_bio->sector;
1417 if (good_sectors < max_sectors)
1418 max_sectors = good_sectors;
1419 }
1420 }
1421 r1_bio->bios[i] = bio;
1422 }
1423 rcu_read_unlock();
1424
1425 if (unlikely(blocked_rdev)) {
1426
1427 int j;
1428
1429 for (j = 0; j < i; j++)
1430 if (r1_bio->bios[j])
1431 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1432 r1_bio->state = 0;
1433 allow_barrier(conf, bio->bi_iter.bi_sector);
1434 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1435 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1436 wait_barrier(conf, bio->bi_iter.bi_sector);
1437 goto retry_write;
1438 }
1439
1440 if (max_sectors < bio_sectors(bio)) {
1441 struct bio *split = bio_split(bio, max_sectors,
1442 GFP_NOIO, &conf->bio_split);
1443 bio_chain(split, bio);
1444 generic_make_request(bio);
1445 bio = split;
1446 r1_bio->master_bio = bio;
1447 r1_bio->sectors = max_sectors;
1448 }
1449
1450 atomic_set(&r1_bio->remaining, 1);
1451 atomic_set(&r1_bio->behind_remaining, 0);
1452
1453 first_clone = 1;
1454
1455 for (i = 0; i < disks; i++) {
1456 struct bio *mbio = NULL;
1457 if (!r1_bio->bios[i])
1458 continue;
1459
1460
1461 if (first_clone) {
1462
1463
1464
1465
1466 if (bitmap &&
1467 (atomic_read(&bitmap->behind_writes)
1468 < mddev->bitmap_info.max_write_behind) &&
1469 !waitqueue_active(&bitmap->behind_wait)) {
1470 alloc_behind_master_bio(r1_bio, bio);
1471 }
1472
1473 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1474 test_bit(R1BIO_BehindIO, &r1_bio->state));
1475 first_clone = 0;
1476 }
1477
1478 if (r1_bio->behind_master_bio)
1479 mbio = bio_clone_fast(r1_bio->behind_master_bio,
1480 GFP_NOIO, &mddev->bio_set);
1481 else
1482 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
1483
1484 if (r1_bio->behind_master_bio) {
1485 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1486 atomic_inc(&r1_bio->behind_remaining);
1487 }
1488
1489 r1_bio->bios[i] = mbio;
1490
1491 mbio->bi_iter.bi_sector = (r1_bio->sector +
1492 conf->mirrors[i].rdev->data_offset);
1493 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
1494 mbio->bi_end_io = raid1_end_write_request;
1495 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
1496 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1497 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1498 conf->raid_disks - mddev->degraded > 1)
1499 mbio->bi_opf |= MD_FAILFAST;
1500 mbio->bi_private = r1_bio;
1501
1502 atomic_inc(&r1_bio->remaining);
1503
1504 if (mddev->gendisk)
1505 trace_block_bio_remap(mbio->bi_disk->queue,
1506 mbio, disk_devt(mddev->gendisk),
1507 r1_bio->sector);
1508
1509 mbio->bi_disk = (void *)conf->mirrors[i].rdev;
1510
1511 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1512 if (cb)
1513 plug = container_of(cb, struct raid1_plug_cb, cb);
1514 else
1515 plug = NULL;
1516 if (plug) {
1517 bio_list_add(&plug->pending, mbio);
1518 plug->pending_cnt++;
1519 } else {
1520 spin_lock_irqsave(&conf->device_lock, flags);
1521 bio_list_add(&conf->pending_bio_list, mbio);
1522 conf->pending_count++;
1523 spin_unlock_irqrestore(&conf->device_lock, flags);
1524 md_wakeup_thread(mddev->thread);
1525 }
1526 }
1527
1528 r1_bio_write_done(r1_bio);
1529
1530
1531 wake_up(&conf->wait_barrier);
1532}
1533
1534static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1535{
1536 sector_t sectors;
1537
1538 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1539 md_flush_request(mddev, bio);
1540 return true;
1541 }
1542
1543
1544
1545
1546
1547
1548
1549
1550 sectors = align_to_barrier_unit_end(
1551 bio->bi_iter.bi_sector, bio_sectors(bio));
1552
1553 if (bio_data_dir(bio) == READ)
1554 raid1_read_request(mddev, bio, sectors, NULL);
1555 else {
1556 if (!md_write_start(mddev,bio))
1557 return false;
1558 raid1_write_request(mddev, bio, sectors);
1559 }
1560 return true;
1561}
1562
1563static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1564{
1565 struct r1conf *conf = mddev->private;
1566 int i;
1567
1568 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1569 conf->raid_disks - mddev->degraded);
1570 rcu_read_lock();
1571 for (i = 0; i < conf->raid_disks; i++) {
1572 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1573 seq_printf(seq, "%s",
1574 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1575 }
1576 rcu_read_unlock();
1577 seq_printf(seq, "]");
1578}
1579
1580static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1581{
1582 char b[BDEVNAME_SIZE];
1583 struct r1conf *conf = mddev->private;
1584 unsigned long flags;
1585
1586
1587
1588
1589
1590
1591
1592 spin_lock_irqsave(&conf->device_lock, flags);
1593 if (test_bit(In_sync, &rdev->flags)
1594 && (conf->raid_disks - mddev->degraded) == 1) {
1595
1596
1597
1598
1599
1600
1601 conf->recovery_disabled = mddev->recovery_disabled;
1602 spin_unlock_irqrestore(&conf->device_lock, flags);
1603 return;
1604 }
1605 set_bit(Blocked, &rdev->flags);
1606 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1607 mddev->degraded++;
1608 set_bit(Faulty, &rdev->flags);
1609 } else
1610 set_bit(Faulty, &rdev->flags);
1611 spin_unlock_irqrestore(&conf->device_lock, flags);
1612
1613
1614
1615 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1616 set_mask_bits(&mddev->sb_flags, 0,
1617 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1618 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1619 "md/raid1:%s: Operation continuing on %d devices.\n",
1620 mdname(mddev), bdevname(rdev->bdev, b),
1621 mdname(mddev), conf->raid_disks - mddev->degraded);
1622}
1623
1624static void print_conf(struct r1conf *conf)
1625{
1626 int i;
1627
1628 pr_debug("RAID1 conf printout:\n");
1629 if (!conf) {
1630 pr_debug("(!conf)\n");
1631 return;
1632 }
1633 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1634 conf->raid_disks);
1635
1636 rcu_read_lock();
1637 for (i = 0; i < conf->raid_disks; i++) {
1638 char b[BDEVNAME_SIZE];
1639 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1640 if (rdev)
1641 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1642 i, !test_bit(In_sync, &rdev->flags),
1643 !test_bit(Faulty, &rdev->flags),
1644 bdevname(rdev->bdev,b));
1645 }
1646 rcu_read_unlock();
1647}
1648
1649static void close_sync(struct r1conf *conf)
1650{
1651 int idx;
1652
1653 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1654 _wait_barrier(conf, idx);
1655 _allow_barrier(conf, idx);
1656 }
1657
1658 mempool_exit(&conf->r1buf_pool);
1659}
1660
1661static int raid1_spare_active(struct mddev *mddev)
1662{
1663 int i;
1664 struct r1conf *conf = mddev->private;
1665 int count = 0;
1666 unsigned long flags;
1667
1668
1669
1670
1671
1672
1673
1674
1675 spin_lock_irqsave(&conf->device_lock, flags);
1676 for (i = 0; i < conf->raid_disks; i++) {
1677 struct md_rdev *rdev = conf->mirrors[i].rdev;
1678 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1679 if (repl
1680 && !test_bit(Candidate, &repl->flags)
1681 && repl->recovery_offset == MaxSector
1682 && !test_bit(Faulty, &repl->flags)
1683 && !test_and_set_bit(In_sync, &repl->flags)) {
1684
1685 if (!rdev ||
1686 !test_and_clear_bit(In_sync, &rdev->flags))
1687 count++;
1688 if (rdev) {
1689
1690
1691
1692
1693 set_bit(Faulty, &rdev->flags);
1694 sysfs_notify_dirent_safe(
1695 rdev->sysfs_state);
1696 }
1697 }
1698 if (rdev
1699 && rdev->recovery_offset == MaxSector
1700 && !test_bit(Faulty, &rdev->flags)
1701 && !test_and_set_bit(In_sync, &rdev->flags)) {
1702 count++;
1703 sysfs_notify_dirent_safe(rdev->sysfs_state);
1704 }
1705 }
1706 mddev->degraded -= count;
1707 spin_unlock_irqrestore(&conf->device_lock, flags);
1708
1709 print_conf(conf);
1710 return count;
1711}
1712
1713static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1714{
1715 struct r1conf *conf = mddev->private;
1716 int err = -EEXIST;
1717 int mirror = 0;
1718 struct raid1_info *p;
1719 int first = 0;
1720 int last = conf->raid_disks - 1;
1721
1722 if (mddev->recovery_disabled == conf->recovery_disabled)
1723 return -EBUSY;
1724
1725 if (md_integrity_add_rdev(rdev, mddev))
1726 return -ENXIO;
1727
1728 if (rdev->raid_disk >= 0)
1729 first = last = rdev->raid_disk;
1730
1731
1732
1733
1734
1735 if (rdev->saved_raid_disk >= 0 &&
1736 rdev->saved_raid_disk >= first &&
1737 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1738 first = last = rdev->saved_raid_disk;
1739
1740 for (mirror = first; mirror <= last; mirror++) {
1741 p = conf->mirrors+mirror;
1742 if (!p->rdev) {
1743
1744 if (mddev->gendisk)
1745 disk_stack_limits(mddev->gendisk, rdev->bdev,
1746 rdev->data_offset << 9);
1747
1748 p->head_position = 0;
1749 rdev->raid_disk = mirror;
1750 err = 0;
1751
1752
1753
1754 if (rdev->saved_raid_disk < 0)
1755 conf->fullsync = 1;
1756 rcu_assign_pointer(p->rdev, rdev);
1757 break;
1758 }
1759 if (test_bit(WantReplacement, &p->rdev->flags) &&
1760 p[conf->raid_disks].rdev == NULL) {
1761
1762 clear_bit(In_sync, &rdev->flags);
1763 set_bit(Replacement, &rdev->flags);
1764 rdev->raid_disk = mirror;
1765 err = 0;
1766 conf->fullsync = 1;
1767 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1768 break;
1769 }
1770 }
1771 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1772 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1773 print_conf(conf);
1774 return err;
1775}
1776
1777static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1778{
1779 struct r1conf *conf = mddev->private;
1780 int err = 0;
1781 int number = rdev->raid_disk;
1782 struct raid1_info *p = conf->mirrors + number;
1783
1784 if (rdev != p->rdev)
1785 p = conf->mirrors + conf->raid_disks + number;
1786
1787 print_conf(conf);
1788 if (rdev == p->rdev) {
1789 if (test_bit(In_sync, &rdev->flags) ||
1790 atomic_read(&rdev->nr_pending)) {
1791 err = -EBUSY;
1792 goto abort;
1793 }
1794
1795
1796
1797 if (!test_bit(Faulty, &rdev->flags) &&
1798 mddev->recovery_disabled != conf->recovery_disabled &&
1799 mddev->degraded < conf->raid_disks) {
1800 err = -EBUSY;
1801 goto abort;
1802 }
1803 p->rdev = NULL;
1804 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1805 synchronize_rcu();
1806 if (atomic_read(&rdev->nr_pending)) {
1807
1808 err = -EBUSY;
1809 p->rdev = rdev;
1810 goto abort;
1811 }
1812 }
1813 if (conf->mirrors[conf->raid_disks + number].rdev) {
1814
1815
1816
1817
1818 struct md_rdev *repl =
1819 conf->mirrors[conf->raid_disks + number].rdev;
1820 freeze_array(conf, 0);
1821 if (atomic_read(&repl->nr_pending)) {
1822
1823
1824
1825
1826
1827
1828 err = -EBUSY;
1829 unfreeze_array(conf);
1830 goto abort;
1831 }
1832 clear_bit(Replacement, &repl->flags);
1833 p->rdev = repl;
1834 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1835 unfreeze_array(conf);
1836 }
1837
1838 clear_bit(WantReplacement, &rdev->flags);
1839 err = md_integrity_register(mddev);
1840 }
1841abort:
1842
1843 print_conf(conf);
1844 return err;
1845}
1846
1847static void end_sync_read(struct bio *bio)
1848{
1849 struct r1bio *r1_bio = get_resync_r1bio(bio);
1850
1851 update_head_pos(r1_bio->read_disk, r1_bio);
1852
1853
1854
1855
1856
1857
1858 if (!bio->bi_status)
1859 set_bit(R1BIO_Uptodate, &r1_bio->state);
1860
1861 if (atomic_dec_and_test(&r1_bio->remaining))
1862 reschedule_retry(r1_bio);
1863}
1864
1865static void end_sync_write(struct bio *bio)
1866{
1867 int uptodate = !bio->bi_status;
1868 struct r1bio *r1_bio = get_resync_r1bio(bio);
1869 struct mddev *mddev = r1_bio->mddev;
1870 struct r1conf *conf = mddev->private;
1871 sector_t first_bad;
1872 int bad_sectors;
1873 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1874
1875 if (!uptodate) {
1876 sector_t sync_blocks = 0;
1877 sector_t s = r1_bio->sector;
1878 long sectors_to_go = r1_bio->sectors;
1879
1880 do {
1881 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1882 s += sync_blocks;
1883 sectors_to_go -= sync_blocks;
1884 } while (sectors_to_go > 0);
1885 set_bit(WriteErrorSeen, &rdev->flags);
1886 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1887 set_bit(MD_RECOVERY_NEEDED, &
1888 mddev->recovery);
1889 set_bit(R1BIO_WriteError, &r1_bio->state);
1890 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1891 &first_bad, &bad_sectors) &&
1892 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1893 r1_bio->sector,
1894 r1_bio->sectors,
1895 &first_bad, &bad_sectors)
1896 )
1897 set_bit(R1BIO_MadeGood, &r1_bio->state);
1898
1899 if (atomic_dec_and_test(&r1_bio->remaining)) {
1900 int s = r1_bio->sectors;
1901 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1902 test_bit(R1BIO_WriteError, &r1_bio->state))
1903 reschedule_retry(r1_bio);
1904 else {
1905 put_buf(r1_bio);
1906 md_done_sync(mddev, s, uptodate);
1907 }
1908 }
1909}
1910
1911static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1912 int sectors, struct page *page, int rw)
1913{
1914 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1915
1916 return 1;
1917 if (rw == WRITE) {
1918 set_bit(WriteErrorSeen, &rdev->flags);
1919 if (!test_and_set_bit(WantReplacement,
1920 &rdev->flags))
1921 set_bit(MD_RECOVERY_NEEDED, &
1922 rdev->mddev->recovery);
1923 }
1924
1925 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1926 md_error(rdev->mddev, rdev);
1927 return 0;
1928}
1929
1930static int fix_sync_read_error(struct r1bio *r1_bio)
1931{
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943 struct mddev *mddev = r1_bio->mddev;
1944 struct r1conf *conf = mddev->private;
1945 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1946 struct page **pages = get_resync_pages(bio)->pages;
1947 sector_t sect = r1_bio->sector;
1948 int sectors = r1_bio->sectors;
1949 int idx = 0;
1950 struct md_rdev *rdev;
1951
1952 rdev = conf->mirrors[r1_bio->read_disk].rdev;
1953 if (test_bit(FailFast, &rdev->flags)) {
1954
1955
1956 md_error(mddev, rdev);
1957 if (test_bit(Faulty, &rdev->flags))
1958
1959
1960
1961 bio->bi_end_io = end_sync_write;
1962 }
1963
1964 while(sectors) {
1965 int s = sectors;
1966 int d = r1_bio->read_disk;
1967 int success = 0;
1968 int start;
1969
1970 if (s > (PAGE_SIZE>>9))
1971 s = PAGE_SIZE >> 9;
1972 do {
1973 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1974
1975
1976
1977
1978 rdev = conf->mirrors[d].rdev;
1979 if (sync_page_io(rdev, sect, s<<9,
1980 pages[idx],
1981 REQ_OP_READ, 0, false)) {
1982 success = 1;
1983 break;
1984 }
1985 }
1986 d++;
1987 if (d == conf->raid_disks * 2)
1988 d = 0;
1989 } while (!success && d != r1_bio->read_disk);
1990
1991 if (!success) {
1992 char b[BDEVNAME_SIZE];
1993 int abort = 0;
1994
1995
1996
1997
1998
1999 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
2000 mdname(mddev), bio_devname(bio, b),
2001 (unsigned long long)r1_bio->sector);
2002 for (d = 0; d < conf->raid_disks * 2; d++) {
2003 rdev = conf->mirrors[d].rdev;
2004 if (!rdev || test_bit(Faulty, &rdev->flags))
2005 continue;
2006 if (!rdev_set_badblocks(rdev, sect, s, 0))
2007 abort = 1;
2008 }
2009 if (abort) {
2010 conf->recovery_disabled =
2011 mddev->recovery_disabled;
2012 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2013 md_done_sync(mddev, r1_bio->sectors, 0);
2014 put_buf(r1_bio);
2015 return 0;
2016 }
2017
2018 sectors -= s;
2019 sect += s;
2020 idx++;
2021 continue;
2022 }
2023
2024 start = d;
2025
2026 while (d != r1_bio->read_disk) {
2027 if (d == 0)
2028 d = conf->raid_disks * 2;
2029 d--;
2030 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2031 continue;
2032 rdev = conf->mirrors[d].rdev;
2033 if (r1_sync_page_io(rdev, sect, s,
2034 pages[idx],
2035 WRITE) == 0) {
2036 r1_bio->bios[d]->bi_end_io = NULL;
2037 rdev_dec_pending(rdev, mddev);
2038 }
2039 }
2040 d = start;
2041 while (d != r1_bio->read_disk) {
2042 if (d == 0)
2043 d = conf->raid_disks * 2;
2044 d--;
2045 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2046 continue;
2047 rdev = conf->mirrors[d].rdev;
2048 if (r1_sync_page_io(rdev, sect, s,
2049 pages[idx],
2050 READ) != 0)
2051 atomic_add(s, &rdev->corrected_errors);
2052 }
2053 sectors -= s;
2054 sect += s;
2055 idx ++;
2056 }
2057 set_bit(R1BIO_Uptodate, &r1_bio->state);
2058 bio->bi_status = 0;
2059 return 1;
2060}
2061
2062static void process_checks(struct r1bio *r1_bio)
2063{
2064
2065
2066
2067
2068
2069
2070
2071 struct mddev *mddev = r1_bio->mddev;
2072 struct r1conf *conf = mddev->private;
2073 int primary;
2074 int i;
2075 int vcnt;
2076
2077
2078 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2079 for (i = 0; i < conf->raid_disks * 2; i++) {
2080 blk_status_t status;
2081 struct bio *b = r1_bio->bios[i];
2082 struct resync_pages *rp = get_resync_pages(b);
2083 if (b->bi_end_io != end_sync_read)
2084 continue;
2085
2086 status = b->bi_status;
2087 bio_reset(b);
2088 b->bi_status = status;
2089 b->bi_iter.bi_sector = r1_bio->sector +
2090 conf->mirrors[i].rdev->data_offset;
2091 bio_set_dev(b, conf->mirrors[i].rdev->bdev);
2092 b->bi_end_io = end_sync_read;
2093 rp->raid_bio = r1_bio;
2094 b->bi_private = rp;
2095
2096
2097 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2098 }
2099 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2100 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2101 !r1_bio->bios[primary]->bi_status) {
2102 r1_bio->bios[primary]->bi_end_io = NULL;
2103 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2104 break;
2105 }
2106 r1_bio->read_disk = primary;
2107 for (i = 0; i < conf->raid_disks * 2; i++) {
2108 int j;
2109 struct bio *pbio = r1_bio->bios[primary];
2110 struct bio *sbio = r1_bio->bios[i];
2111 blk_status_t status = sbio->bi_status;
2112 struct page **ppages = get_resync_pages(pbio)->pages;
2113 struct page **spages = get_resync_pages(sbio)->pages;
2114 struct bio_vec *bi;
2115 int page_len[RESYNC_PAGES] = { 0 };
2116
2117 if (sbio->bi_end_io != end_sync_read)
2118 continue;
2119
2120 sbio->bi_status = 0;
2121
2122 bio_for_each_segment_all(bi, sbio, j)
2123 page_len[j] = bi->bv_len;
2124
2125 if (!status) {
2126 for (j = vcnt; j-- ; ) {
2127 if (memcmp(page_address(ppages[j]),
2128 page_address(spages[j]),
2129 page_len[j]))
2130 break;
2131 }
2132 } else
2133 j = 0;
2134 if (j >= 0)
2135 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2136 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2137 && !status)) {
2138
2139 sbio->bi_end_io = NULL;
2140 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2141 continue;
2142 }
2143
2144 bio_copy_data(sbio, pbio);
2145 }
2146}
2147
2148static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2149{
2150 struct r1conf *conf = mddev->private;
2151 int i;
2152 int disks = conf->raid_disks * 2;
2153 struct bio *wbio;
2154
2155 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2156
2157 if (!fix_sync_read_error(r1_bio))
2158 return;
2159
2160 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2161 process_checks(r1_bio);
2162
2163
2164
2165
2166 atomic_set(&r1_bio->remaining, 1);
2167 for (i = 0; i < disks ; i++) {
2168 wbio = r1_bio->bios[i];
2169 if (wbio->bi_end_io == NULL ||
2170 (wbio->bi_end_io == end_sync_read &&
2171 (i == r1_bio->read_disk ||
2172 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2173 continue;
2174 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2175 continue;
2176
2177 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2178 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2179 wbio->bi_opf |= MD_FAILFAST;
2180
2181 wbio->bi_end_io = end_sync_write;
2182 atomic_inc(&r1_bio->remaining);
2183 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2184
2185 generic_make_request(wbio);
2186 }
2187
2188 if (atomic_dec_and_test(&r1_bio->remaining)) {
2189
2190 int s = r1_bio->sectors;
2191 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2192 test_bit(R1BIO_WriteError, &r1_bio->state))
2193 reschedule_retry(r1_bio);
2194 else {
2195 put_buf(r1_bio);
2196 md_done_sync(mddev, s, 1);
2197 }
2198 }
2199}
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209static void fix_read_error(struct r1conf *conf, int read_disk,
2210 sector_t sect, int sectors)
2211{
2212 struct mddev *mddev = conf->mddev;
2213 while(sectors) {
2214 int s = sectors;
2215 int d = read_disk;
2216 int success = 0;
2217 int start;
2218 struct md_rdev *rdev;
2219
2220 if (s > (PAGE_SIZE>>9))
2221 s = PAGE_SIZE >> 9;
2222
2223 do {
2224 sector_t first_bad;
2225 int bad_sectors;
2226
2227 rcu_read_lock();
2228 rdev = rcu_dereference(conf->mirrors[d].rdev);
2229 if (rdev &&
2230 (test_bit(In_sync, &rdev->flags) ||
2231 (!test_bit(Faulty, &rdev->flags) &&
2232 rdev->recovery_offset >= sect + s)) &&
2233 is_badblock(rdev, sect, s,
2234 &first_bad, &bad_sectors) == 0) {
2235 atomic_inc(&rdev->nr_pending);
2236 rcu_read_unlock();
2237 if (sync_page_io(rdev, sect, s<<9,
2238 conf->tmppage, REQ_OP_READ, 0, false))
2239 success = 1;
2240 rdev_dec_pending(rdev, mddev);
2241 if (success)
2242 break;
2243 } else
2244 rcu_read_unlock();
2245 d++;
2246 if (d == conf->raid_disks * 2)
2247 d = 0;
2248 } while (!success && d != read_disk);
2249
2250 if (!success) {
2251
2252 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2253 if (!rdev_set_badblocks(rdev, sect, s, 0))
2254 md_error(mddev, rdev);
2255 break;
2256 }
2257
2258 start = d;
2259 while (d != read_disk) {
2260 if (d==0)
2261 d = conf->raid_disks * 2;
2262 d--;
2263 rcu_read_lock();
2264 rdev = rcu_dereference(conf->mirrors[d].rdev);
2265 if (rdev &&
2266 !test_bit(Faulty, &rdev->flags)) {
2267 atomic_inc(&rdev->nr_pending);
2268 rcu_read_unlock();
2269 r1_sync_page_io(rdev, sect, s,
2270 conf->tmppage, WRITE);
2271 rdev_dec_pending(rdev, mddev);
2272 } else
2273 rcu_read_unlock();
2274 }
2275 d = start;
2276 while (d != read_disk) {
2277 char b[BDEVNAME_SIZE];
2278 if (d==0)
2279 d = conf->raid_disks * 2;
2280 d--;
2281 rcu_read_lock();
2282 rdev = rcu_dereference(conf->mirrors[d].rdev);
2283 if (rdev &&
2284 !test_bit(Faulty, &rdev->flags)) {
2285 atomic_inc(&rdev->nr_pending);
2286 rcu_read_unlock();
2287 if (r1_sync_page_io(rdev, sect, s,
2288 conf->tmppage, READ)) {
2289 atomic_add(s, &rdev->corrected_errors);
2290 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2291 mdname(mddev), s,
2292 (unsigned long long)(sect +
2293 rdev->data_offset),
2294 bdevname(rdev->bdev, b));
2295 }
2296 rdev_dec_pending(rdev, mddev);
2297 } else
2298 rcu_read_unlock();
2299 }
2300 sectors -= s;
2301 sect += s;
2302 }
2303}
2304
2305static int narrow_write_error(struct r1bio *r1_bio, int i)
2306{
2307 struct mddev *mddev = r1_bio->mddev;
2308 struct r1conf *conf = mddev->private;
2309 struct md_rdev *rdev = conf->mirrors[i].rdev;
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322 int block_sectors;
2323 sector_t sector;
2324 int sectors;
2325 int sect_to_write = r1_bio->sectors;
2326 int ok = 1;
2327
2328 if (rdev->badblocks.shift < 0)
2329 return 0;
2330
2331 block_sectors = roundup(1 << rdev->badblocks.shift,
2332 bdev_logical_block_size(rdev->bdev) >> 9);
2333 sector = r1_bio->sector;
2334 sectors = ((sector + block_sectors)
2335 & ~(sector_t)(block_sectors - 1))
2336 - sector;
2337
2338 while (sect_to_write) {
2339 struct bio *wbio;
2340 if (sectors > sect_to_write)
2341 sectors = sect_to_write;
2342
2343
2344 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2345 wbio = bio_clone_fast(r1_bio->behind_master_bio,
2346 GFP_NOIO,
2347 &mddev->bio_set);
2348 } else {
2349 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2350 &mddev->bio_set);
2351 }
2352
2353 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2354 wbio->bi_iter.bi_sector = r1_bio->sector;
2355 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2356
2357 bio_trim(wbio, sector - r1_bio->sector, sectors);
2358 wbio->bi_iter.bi_sector += rdev->data_offset;
2359 bio_set_dev(wbio, rdev->bdev);
2360
2361 if (submit_bio_wait(wbio) < 0)
2362
2363 ok = rdev_set_badblocks(rdev, sector,
2364 sectors, 0)
2365 && ok;
2366
2367 bio_put(wbio);
2368 sect_to_write -= sectors;
2369 sector += sectors;
2370 sectors = block_sectors;
2371 }
2372 return ok;
2373}
2374
2375static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2376{
2377 int m;
2378 int s = r1_bio->sectors;
2379 for (m = 0; m < conf->raid_disks * 2 ; m++) {
2380 struct md_rdev *rdev = conf->mirrors[m].rdev;
2381 struct bio *bio = r1_bio->bios[m];
2382 if (bio->bi_end_io == NULL)
2383 continue;
2384 if (!bio->bi_status &&
2385 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2386 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2387 }
2388 if (bio->bi_status &&
2389 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2390 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2391 md_error(conf->mddev, rdev);
2392 }
2393 }
2394 put_buf(r1_bio);
2395 md_done_sync(conf->mddev, s, 1);
2396}
2397
2398static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2399{
2400 int m, idx;
2401 bool fail = false;
2402
2403 for (m = 0; m < conf->raid_disks * 2 ; m++)
2404 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2405 struct md_rdev *rdev = conf->mirrors[m].rdev;
2406 rdev_clear_badblocks(rdev,
2407 r1_bio->sector,
2408 r1_bio->sectors, 0);
2409 rdev_dec_pending(rdev, conf->mddev);
2410 } else if (r1_bio->bios[m] != NULL) {
2411
2412
2413
2414
2415 fail = true;
2416 if (!narrow_write_error(r1_bio, m)) {
2417 md_error(conf->mddev,
2418 conf->mirrors[m].rdev);
2419
2420 set_bit(R1BIO_Degraded, &r1_bio->state);
2421 }
2422 rdev_dec_pending(conf->mirrors[m].rdev,
2423 conf->mddev);
2424 }
2425 if (fail) {
2426 spin_lock_irq(&conf->device_lock);
2427 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2428 idx = sector_to_idx(r1_bio->sector);
2429 atomic_inc(&conf->nr_queued[idx]);
2430 spin_unlock_irq(&conf->device_lock);
2431
2432
2433
2434
2435 wake_up(&conf->wait_barrier);
2436 md_wakeup_thread(conf->mddev->thread);
2437 } else {
2438 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2439 close_write(r1_bio);
2440 raid_end_bio_io(r1_bio);
2441 }
2442}
2443
2444static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2445{
2446 struct mddev *mddev = conf->mddev;
2447 struct bio *bio;
2448 struct md_rdev *rdev;
2449
2450 clear_bit(R1BIO_ReadError, &r1_bio->state);
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460 bio = r1_bio->bios[r1_bio->read_disk];
2461 bio_put(bio);
2462 r1_bio->bios[r1_bio->read_disk] = NULL;
2463
2464 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2465 if (mddev->ro == 0
2466 && !test_bit(FailFast, &rdev->flags)) {
2467 freeze_array(conf, 1);
2468 fix_read_error(conf, r1_bio->read_disk,
2469 r1_bio->sector, r1_bio->sectors);
2470 unfreeze_array(conf);
2471 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2472 md_error(mddev, rdev);
2473 } else {
2474 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2475 }
2476
2477 rdev_dec_pending(rdev, conf->mddev);
2478 allow_barrier(conf, r1_bio->sector);
2479 bio = r1_bio->master_bio;
2480
2481
2482 r1_bio->state = 0;
2483 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2484}
2485
2486static void raid1d(struct md_thread *thread)
2487{
2488 struct mddev *mddev = thread->mddev;
2489 struct r1bio *r1_bio;
2490 unsigned long flags;
2491 struct r1conf *conf = mddev->private;
2492 struct list_head *head = &conf->retry_list;
2493 struct blk_plug plug;
2494 int idx;
2495
2496 md_check_recovery(mddev);
2497
2498 if (!list_empty_careful(&conf->bio_end_io_list) &&
2499 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2500 LIST_HEAD(tmp);
2501 spin_lock_irqsave(&conf->device_lock, flags);
2502 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2503 list_splice_init(&conf->bio_end_io_list, &tmp);
2504 spin_unlock_irqrestore(&conf->device_lock, flags);
2505 while (!list_empty(&tmp)) {
2506 r1_bio = list_first_entry(&tmp, struct r1bio,
2507 retry_list);
2508 list_del(&r1_bio->retry_list);
2509 idx = sector_to_idx(r1_bio->sector);
2510 atomic_dec(&conf->nr_queued[idx]);
2511 if (mddev->degraded)
2512 set_bit(R1BIO_Degraded, &r1_bio->state);
2513 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2514 close_write(r1_bio);
2515 raid_end_bio_io(r1_bio);
2516 }
2517 }
2518
2519 blk_start_plug(&plug);
2520 for (;;) {
2521
2522 flush_pending_writes(conf);
2523
2524 spin_lock_irqsave(&conf->device_lock, flags);
2525 if (list_empty(head)) {
2526 spin_unlock_irqrestore(&conf->device_lock, flags);
2527 break;
2528 }
2529 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2530 list_del(head->prev);
2531 idx = sector_to_idx(r1_bio->sector);
2532 atomic_dec(&conf->nr_queued[idx]);
2533 spin_unlock_irqrestore(&conf->device_lock, flags);
2534
2535 mddev = r1_bio->mddev;
2536 conf = mddev->private;
2537 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2538 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2539 test_bit(R1BIO_WriteError, &r1_bio->state))
2540 handle_sync_write_finished(conf, r1_bio);
2541 else
2542 sync_request_write(mddev, r1_bio);
2543 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2544 test_bit(R1BIO_WriteError, &r1_bio->state))
2545 handle_write_finished(conf, r1_bio);
2546 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2547 handle_read_error(conf, r1_bio);
2548 else
2549 WARN_ON_ONCE(1);
2550
2551 cond_resched();
2552 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2553 md_check_recovery(mddev);
2554 }
2555 blk_finish_plug(&plug);
2556}
2557
2558static int init_resync(struct r1conf *conf)
2559{
2560 int buffs;
2561
2562 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2563 BUG_ON(mempool_initialized(&conf->r1buf_pool));
2564
2565 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2566 r1buf_pool_free, conf->poolinfo);
2567}
2568
2569static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2570{
2571 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2572 struct resync_pages *rps;
2573 struct bio *bio;
2574 int i;
2575
2576 for (i = conf->poolinfo->raid_disks; i--; ) {
2577 bio = r1bio->bios[i];
2578 rps = bio->bi_private;
2579 bio_reset(bio);
2580 bio->bi_private = rps;
2581 }
2582 r1bio->master_bio = NULL;
2583 return r1bio;
2584}
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2597 int *skipped)
2598{
2599 struct r1conf *conf = mddev->private;
2600 struct r1bio *r1_bio;
2601 struct bio *bio;
2602 sector_t max_sector, nr_sectors;
2603 int disk = -1;
2604 int i;
2605 int wonly = -1;
2606 int write_targets = 0, read_targets = 0;
2607 sector_t sync_blocks;
2608 int still_degraded = 0;
2609 int good_sectors = RESYNC_SECTORS;
2610 int min_bad = 0;
2611 int idx = sector_to_idx(sector_nr);
2612 int page_idx = 0;
2613
2614 if (!mempool_initialized(&conf->r1buf_pool))
2615 if (init_resync(conf))
2616 return 0;
2617
2618 max_sector = mddev->dev_sectors;
2619 if (sector_nr >= max_sector) {
2620
2621
2622
2623
2624
2625 if (mddev->curr_resync < max_sector)
2626 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2627 &sync_blocks, 1);
2628 else
2629 conf->fullsync = 0;
2630
2631 md_bitmap_close_sync(mddev->bitmap);
2632 close_sync(conf);
2633
2634 if (mddev_is_clustered(mddev)) {
2635 conf->cluster_sync_low = 0;
2636 conf->cluster_sync_high = 0;
2637 }
2638 return 0;
2639 }
2640
2641 if (mddev->bitmap == NULL &&
2642 mddev->recovery_cp == MaxSector &&
2643 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2644 conf->fullsync == 0) {
2645 *skipped = 1;
2646 return max_sector - sector_nr;
2647 }
2648
2649
2650
2651 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2652 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2653
2654 *skipped = 1;
2655 return sync_blocks;
2656 }
2657
2658
2659
2660
2661
2662 if (atomic_read(&conf->nr_waiting[idx]))
2663 schedule_timeout_uninterruptible(1);
2664
2665
2666
2667
2668
2669 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2670 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2671
2672
2673 if (raise_barrier(conf, sector_nr))
2674 return 0;
2675
2676 r1_bio = raid1_alloc_init_r1buf(conf);
2677
2678 rcu_read_lock();
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688 r1_bio->mddev = mddev;
2689 r1_bio->sector = sector_nr;
2690 r1_bio->state = 0;
2691 set_bit(R1BIO_IsSync, &r1_bio->state);
2692
2693 good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2694
2695 for (i = 0; i < conf->raid_disks * 2; i++) {
2696 struct md_rdev *rdev;
2697 bio = r1_bio->bios[i];
2698
2699 rdev = rcu_dereference(conf->mirrors[i].rdev);
2700 if (rdev == NULL ||
2701 test_bit(Faulty, &rdev->flags)) {
2702 if (i < conf->raid_disks)
2703 still_degraded = 1;
2704 } else if (!test_bit(In_sync, &rdev->flags)) {
2705 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2706 bio->bi_end_io = end_sync_write;
2707 write_targets ++;
2708 } else {
2709
2710 sector_t first_bad = MaxSector;
2711 int bad_sectors;
2712
2713 if (is_badblock(rdev, sector_nr, good_sectors,
2714 &first_bad, &bad_sectors)) {
2715 if (first_bad > sector_nr)
2716 good_sectors = first_bad - sector_nr;
2717 else {
2718 bad_sectors -= (sector_nr - first_bad);
2719 if (min_bad == 0 ||
2720 min_bad > bad_sectors)
2721 min_bad = bad_sectors;
2722 }
2723 }
2724 if (sector_nr < first_bad) {
2725 if (test_bit(WriteMostly, &rdev->flags)) {
2726 if (wonly < 0)
2727 wonly = i;
2728 } else {
2729 if (disk < 0)
2730 disk = i;
2731 }
2732 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2733 bio->bi_end_io = end_sync_read;
2734 read_targets++;
2735 } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2736 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2737 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2738
2739
2740
2741
2742
2743
2744 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2745 bio->bi_end_io = end_sync_write;
2746 write_targets++;
2747 }
2748 }
2749 if (bio->bi_end_io) {
2750 atomic_inc(&rdev->nr_pending);
2751 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2752 bio_set_dev(bio, rdev->bdev);
2753 if (test_bit(FailFast, &rdev->flags))
2754 bio->bi_opf |= MD_FAILFAST;
2755 }
2756 }
2757 rcu_read_unlock();
2758 if (disk < 0)
2759 disk = wonly;
2760 r1_bio->read_disk = disk;
2761
2762 if (read_targets == 0 && min_bad > 0) {
2763
2764
2765
2766 int ok = 1;
2767 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2768 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2769 struct md_rdev *rdev = conf->mirrors[i].rdev;
2770 ok = rdev_set_badblocks(rdev, sector_nr,
2771 min_bad, 0
2772 ) && ok;
2773 }
2774 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2775 *skipped = 1;
2776 put_buf(r1_bio);
2777
2778 if (!ok) {
2779
2780
2781
2782
2783
2784 conf->recovery_disabled = mddev->recovery_disabled;
2785 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2786 return 0;
2787 } else
2788 return min_bad;
2789
2790 }
2791 if (min_bad > 0 && min_bad < good_sectors) {
2792
2793
2794 good_sectors = min_bad;
2795 }
2796
2797 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2798
2799 write_targets += read_targets-1;
2800
2801 if (write_targets == 0 || read_targets == 0) {
2802
2803
2804
2805 sector_t rv;
2806 if (min_bad > 0)
2807 max_sector = sector_nr + min_bad;
2808 rv = max_sector - sector_nr;
2809 *skipped = 1;
2810 put_buf(r1_bio);
2811 return rv;
2812 }
2813
2814 if (max_sector > mddev->resync_max)
2815 max_sector = mddev->resync_max;
2816 if (max_sector > sector_nr + good_sectors)
2817 max_sector = sector_nr + good_sectors;
2818 nr_sectors = 0;
2819 sync_blocks = 0;
2820 do {
2821 struct page *page;
2822 int len = PAGE_SIZE;
2823 if (sector_nr + (len>>9) > max_sector)
2824 len = (max_sector - sector_nr) << 9;
2825 if (len == 0)
2826 break;
2827 if (sync_blocks == 0) {
2828 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2829 &sync_blocks, still_degraded) &&
2830 !conf->fullsync &&
2831 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2832 break;
2833 if ((len >> 9) > sync_blocks)
2834 len = sync_blocks<<9;
2835 }
2836
2837 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2838 struct resync_pages *rp;
2839
2840 bio = r1_bio->bios[i];
2841 rp = get_resync_pages(bio);
2842 if (bio->bi_end_io) {
2843 page = resync_fetch_page(rp, page_idx);
2844
2845
2846
2847
2848
2849 bio_add_page(bio, page, len, 0);
2850 }
2851 }
2852 nr_sectors += len>>9;
2853 sector_nr += len>>9;
2854 sync_blocks -= (len>>9);
2855 } while (++page_idx < RESYNC_PAGES);
2856
2857 r1_bio->sectors = nr_sectors;
2858
2859 if (mddev_is_clustered(mddev) &&
2860 conf->cluster_sync_high < sector_nr + nr_sectors) {
2861 conf->cluster_sync_low = mddev->curr_resync_completed;
2862 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2863
2864 md_cluster_ops->resync_info_update(mddev,
2865 conf->cluster_sync_low,
2866 conf->cluster_sync_high);
2867 }
2868
2869
2870
2871
2872 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2873 atomic_set(&r1_bio->remaining, read_targets);
2874 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2875 bio = r1_bio->bios[i];
2876 if (bio->bi_end_io == end_sync_read) {
2877 read_targets--;
2878 md_sync_acct_bio(bio, nr_sectors);
2879 if (read_targets == 1)
2880 bio->bi_opf &= ~MD_FAILFAST;
2881 generic_make_request(bio);
2882 }
2883 }
2884 } else {
2885 atomic_set(&r1_bio->remaining, 1);
2886 bio = r1_bio->bios[r1_bio->read_disk];
2887 md_sync_acct_bio(bio, nr_sectors);
2888 if (read_targets == 1)
2889 bio->bi_opf &= ~MD_FAILFAST;
2890 generic_make_request(bio);
2891
2892 }
2893 return nr_sectors;
2894}
2895
2896static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2897{
2898 if (sectors)
2899 return sectors;
2900
2901 return mddev->dev_sectors;
2902}
2903
2904static struct r1conf *setup_conf(struct mddev *mddev)
2905{
2906 struct r1conf *conf;
2907 int i;
2908 struct raid1_info *disk;
2909 struct md_rdev *rdev;
2910 int err = -ENOMEM;
2911
2912 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2913 if (!conf)
2914 goto abort;
2915
2916 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
2917 sizeof(atomic_t), GFP_KERNEL);
2918 if (!conf->nr_pending)
2919 goto abort;
2920
2921 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
2922 sizeof(atomic_t), GFP_KERNEL);
2923 if (!conf->nr_waiting)
2924 goto abort;
2925
2926 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
2927 sizeof(atomic_t), GFP_KERNEL);
2928 if (!conf->nr_queued)
2929 goto abort;
2930
2931 conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
2932 sizeof(atomic_t), GFP_KERNEL);
2933 if (!conf->barrier)
2934 goto abort;
2935
2936 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
2937 mddev->raid_disks, 2),
2938 GFP_KERNEL);
2939 if (!conf->mirrors)
2940 goto abort;
2941
2942 conf->tmppage = alloc_page(GFP_KERNEL);
2943 if (!conf->tmppage)
2944 goto abort;
2945
2946 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2947 if (!conf->poolinfo)
2948 goto abort;
2949 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2950 err = mempool_init(&conf->r1bio_pool, NR_RAID1_BIOS, r1bio_pool_alloc,
2951 r1bio_pool_free, conf->poolinfo);
2952 if (err)
2953 goto abort;
2954
2955 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
2956 if (err)
2957 goto abort;
2958
2959 conf->poolinfo->mddev = mddev;
2960
2961 err = -EINVAL;
2962 spin_lock_init(&conf->device_lock);
2963 rdev_for_each(rdev, mddev) {
2964 int disk_idx = rdev->raid_disk;
2965 if (disk_idx >= mddev->raid_disks
2966 || disk_idx < 0)
2967 continue;
2968 if (test_bit(Replacement, &rdev->flags))
2969 disk = conf->mirrors + mddev->raid_disks + disk_idx;
2970 else
2971 disk = conf->mirrors + disk_idx;
2972
2973 if (disk->rdev)
2974 goto abort;
2975 disk->rdev = rdev;
2976 disk->head_position = 0;
2977 disk->seq_start = MaxSector;
2978 }
2979 conf->raid_disks = mddev->raid_disks;
2980 conf->mddev = mddev;
2981 INIT_LIST_HEAD(&conf->retry_list);
2982 INIT_LIST_HEAD(&conf->bio_end_io_list);
2983
2984 spin_lock_init(&conf->resync_lock);
2985 init_waitqueue_head(&conf->wait_barrier);
2986
2987 bio_list_init(&conf->pending_bio_list);
2988 conf->pending_count = 0;
2989 conf->recovery_disabled = mddev->recovery_disabled - 1;
2990
2991 err = -EIO;
2992 for (i = 0; i < conf->raid_disks * 2; i++) {
2993
2994 disk = conf->mirrors + i;
2995
2996 if (i < conf->raid_disks &&
2997 disk[conf->raid_disks].rdev) {
2998
2999 if (!disk->rdev) {
3000
3001
3002
3003 disk->rdev =
3004 disk[conf->raid_disks].rdev;
3005 disk[conf->raid_disks].rdev = NULL;
3006 } else if (!test_bit(In_sync, &disk->rdev->flags))
3007
3008 goto abort;
3009 }
3010
3011 if (!disk->rdev ||
3012 !test_bit(In_sync, &disk->rdev->flags)) {
3013 disk->head_position = 0;
3014 if (disk->rdev &&
3015 (disk->rdev->saved_raid_disk < 0))
3016 conf->fullsync = 1;
3017 }
3018 }
3019
3020 err = -ENOMEM;
3021 conf->thread = md_register_thread(raid1d, mddev, "raid1");
3022 if (!conf->thread)
3023 goto abort;
3024
3025 return conf;
3026
3027 abort:
3028 if (conf) {
3029 mempool_exit(&conf->r1bio_pool);
3030 kfree(conf->mirrors);
3031 safe_put_page(conf->tmppage);
3032 kfree(conf->poolinfo);
3033 kfree(conf->nr_pending);
3034 kfree(conf->nr_waiting);
3035 kfree(conf->nr_queued);
3036 kfree(conf->barrier);
3037 bioset_exit(&conf->bio_split);
3038 kfree(conf);
3039 }
3040 return ERR_PTR(err);
3041}
3042
3043static void raid1_free(struct mddev *mddev, void *priv);
3044static int raid1_run(struct mddev *mddev)
3045{
3046 struct r1conf *conf;
3047 int i;
3048 struct md_rdev *rdev;
3049 int ret;
3050 bool discard_supported = false;
3051
3052 if (mddev->level != 1) {
3053 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3054 mdname(mddev), mddev->level);
3055 return -EIO;
3056 }
3057 if (mddev->reshape_position != MaxSector) {
3058 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3059 mdname(mddev));
3060 return -EIO;
3061 }
3062 if (mddev_init_writes_pending(mddev) < 0)
3063 return -ENOMEM;
3064
3065
3066
3067
3068
3069 if (mddev->private == NULL)
3070 conf = setup_conf(mddev);
3071 else
3072 conf = mddev->private;
3073
3074 if (IS_ERR(conf))
3075 return PTR_ERR(conf);
3076
3077 if (mddev->queue) {
3078 blk_queue_max_write_same_sectors(mddev->queue, 0);
3079 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3080 }
3081
3082 rdev_for_each(rdev, mddev) {
3083 if (!mddev->gendisk)
3084 continue;
3085 disk_stack_limits(mddev->gendisk, rdev->bdev,
3086 rdev->data_offset << 9);
3087 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3088 discard_supported = true;
3089 }
3090
3091 mddev->degraded = 0;
3092 for (i=0; i < conf->raid_disks; i++)
3093 if (conf->mirrors[i].rdev == NULL ||
3094 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3095 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3096 mddev->degraded++;
3097
3098 if (conf->raid_disks - mddev->degraded == 1)
3099 mddev->recovery_cp = MaxSector;
3100
3101 if (mddev->recovery_cp != MaxSector)
3102 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3103 mdname(mddev));
3104 pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3105 mdname(mddev), mddev->raid_disks - mddev->degraded,
3106 mddev->raid_disks);
3107
3108
3109
3110
3111 mddev->thread = conf->thread;
3112 conf->thread = NULL;
3113 mddev->private = conf;
3114 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3115
3116 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3117
3118 if (mddev->queue) {
3119 if (discard_supported)
3120 blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3121 mddev->queue);
3122 else
3123 blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3124 mddev->queue);
3125 }
3126
3127 ret = md_integrity_register(mddev);
3128 if (ret) {
3129 md_unregister_thread(&mddev->thread);
3130 raid1_free(mddev, conf);
3131 }
3132 return ret;
3133}
3134
3135static void raid1_free(struct mddev *mddev, void *priv)
3136{
3137 struct r1conf *conf = priv;
3138
3139 mempool_exit(&conf->r1bio_pool);
3140 kfree(conf->mirrors);
3141 safe_put_page(conf->tmppage);
3142 kfree(conf->poolinfo);
3143 kfree(conf->nr_pending);
3144 kfree(conf->nr_waiting);
3145 kfree(conf->nr_queued);
3146 kfree(conf->barrier);
3147 bioset_exit(&conf->bio_split);
3148 kfree(conf);
3149}
3150
3151static int raid1_resize(struct mddev *mddev, sector_t sectors)
3152{
3153
3154
3155
3156
3157
3158
3159
3160 sector_t newsize = raid1_size(mddev, sectors, 0);
3161 if (mddev->external_size &&
3162 mddev->array_sectors > newsize)
3163 return -EINVAL;
3164 if (mddev->bitmap) {
3165 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
3166 if (ret)
3167 return ret;
3168 }
3169 md_set_array_sectors(mddev, newsize);
3170 if (sectors > mddev->dev_sectors &&
3171 mddev->recovery_cp > mddev->dev_sectors) {
3172 mddev->recovery_cp = mddev->dev_sectors;
3173 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3174 }
3175 mddev->dev_sectors = sectors;
3176 mddev->resync_max_sectors = sectors;
3177 return 0;
3178}
3179
3180static int raid1_reshape(struct mddev *mddev)
3181{
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193 mempool_t newpool, oldpool;
3194 struct pool_info *newpoolinfo;
3195 struct raid1_info *newmirrors;
3196 struct r1conf *conf = mddev->private;
3197 int cnt, raid_disks;
3198 unsigned long flags;
3199 int d, d2;
3200 int ret;
3201
3202 memset(&newpool, 0, sizeof(newpool));
3203 memset(&oldpool, 0, sizeof(oldpool));
3204
3205
3206 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3207 mddev->layout != mddev->new_layout ||
3208 mddev->level != mddev->new_level) {
3209 mddev->new_chunk_sectors = mddev->chunk_sectors;
3210 mddev->new_layout = mddev->layout;
3211 mddev->new_level = mddev->level;
3212 return -EINVAL;
3213 }
3214
3215 if (!mddev_is_clustered(mddev))
3216 md_allow_write(mddev);
3217
3218 raid_disks = mddev->raid_disks + mddev->delta_disks;
3219
3220 if (raid_disks < conf->raid_disks) {
3221 cnt=0;
3222 for (d= 0; d < conf->raid_disks; d++)
3223 if (conf->mirrors[d].rdev)
3224 cnt++;
3225 if (cnt > raid_disks)
3226 return -EBUSY;
3227 }
3228
3229 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3230 if (!newpoolinfo)
3231 return -ENOMEM;
3232 newpoolinfo->mddev = mddev;
3233 newpoolinfo->raid_disks = raid_disks * 2;
3234
3235 ret = mempool_init(&newpool, NR_RAID1_BIOS, r1bio_pool_alloc,
3236 r1bio_pool_free, newpoolinfo);
3237 if (ret) {
3238 kfree(newpoolinfo);
3239 return ret;
3240 }
3241 newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3242 raid_disks, 2),
3243 GFP_KERNEL);
3244 if (!newmirrors) {
3245 kfree(newpoolinfo);
3246 mempool_exit(&newpool);
3247 return -ENOMEM;
3248 }
3249
3250 freeze_array(conf, 0);
3251
3252
3253 oldpool = conf->r1bio_pool;
3254 conf->r1bio_pool = newpool;
3255
3256 for (d = d2 = 0; d < conf->raid_disks; d++) {
3257 struct md_rdev *rdev = conf->mirrors[d].rdev;
3258 if (rdev && rdev->raid_disk != d2) {
3259 sysfs_unlink_rdev(mddev, rdev);
3260 rdev->raid_disk = d2;
3261 sysfs_unlink_rdev(mddev, rdev);
3262 if (sysfs_link_rdev(mddev, rdev))
3263 pr_warn("md/raid1:%s: cannot register rd%d\n",
3264 mdname(mddev), rdev->raid_disk);
3265 }
3266 if (rdev)
3267 newmirrors[d2++].rdev = rdev;
3268 }
3269 kfree(conf->mirrors);
3270 conf->mirrors = newmirrors;
3271 kfree(conf->poolinfo);
3272 conf->poolinfo = newpoolinfo;
3273
3274 spin_lock_irqsave(&conf->device_lock, flags);
3275 mddev->degraded += (raid_disks - conf->raid_disks);
3276 spin_unlock_irqrestore(&conf->device_lock, flags);
3277 conf->raid_disks = mddev->raid_disks = raid_disks;
3278 mddev->delta_disks = 0;
3279
3280 unfreeze_array(conf);
3281
3282 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3283 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3284 md_wakeup_thread(mddev->thread);
3285
3286 mempool_exit(&oldpool);
3287 return 0;
3288}
3289
3290static void raid1_quiesce(struct mddev *mddev, int quiesce)
3291{
3292 struct r1conf *conf = mddev->private;
3293
3294 if (quiesce)
3295 freeze_array(conf, 0);
3296 else
3297 unfreeze_array(conf);
3298}
3299
3300static void *raid1_takeover(struct mddev *mddev)
3301{
3302
3303
3304
3305 if (mddev->level == 5 && mddev->raid_disks == 2) {
3306 struct r1conf *conf;
3307 mddev->new_level = 1;
3308 mddev->new_layout = 0;
3309 mddev->new_chunk_sectors = 0;
3310 conf = setup_conf(mddev);
3311 if (!IS_ERR(conf)) {
3312
3313 conf->array_frozen = 1;
3314 mddev_clear_unsupported_flags(mddev,
3315 UNSUPPORTED_MDDEV_FLAGS);
3316 }
3317 return conf;
3318 }
3319 return ERR_PTR(-EINVAL);
3320}
3321
3322static struct md_personality raid1_personality =
3323{
3324 .name = "raid1",
3325 .level = 1,
3326 .owner = THIS_MODULE,
3327 .make_request = raid1_make_request,
3328 .run = raid1_run,
3329 .free = raid1_free,
3330 .status = raid1_status,
3331 .error_handler = raid1_error,
3332 .hot_add_disk = raid1_add_disk,
3333 .hot_remove_disk= raid1_remove_disk,
3334 .spare_active = raid1_spare_active,
3335 .sync_request = raid1_sync_request,
3336 .resize = raid1_resize,
3337 .size = raid1_size,
3338 .check_reshape = raid1_reshape,
3339 .quiesce = raid1_quiesce,
3340 .takeover = raid1_takeover,
3341 .congested = raid1_congested,
3342};
3343
3344static int __init raid_init(void)
3345{
3346 return register_md_personality(&raid1_personality);
3347}
3348
3349static void raid_exit(void)
3350{
3351 unregister_md_personality(&raid1_personality);
3352}
3353
3354module_init(raid_init);
3355module_exit(raid_exit);
3356MODULE_LICENSE("GPL");
3357MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3358MODULE_ALIAS("md-personality-3");
3359MODULE_ALIAS("md-raid1");
3360MODULE_ALIAS("md-level-1");
3361
3362module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
3363