1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/blkdev.h>
29#include <linux/module.h>
30#include <linux/seq_file.h>
31#include <linux/ratelimit.h>
32
33#include <trace/events/block.h>
34
35#include "md.h"
36#include "raid1.h"
37#include "md-bitmap.h"
38
39#define UNSUPPORTED_MDDEV_FLAGS \
40 ((1L << MD_HAS_JOURNAL) | \
41 (1L << MD_JOURNAL_CLEAN) | \
42 (1L << MD_HAS_PPL) | \
43 (1L << MD_HAS_MULTIPLE_PPLS))
44
45static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
46static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
47
48#define raid1_log(md, fmt, args...) \
49 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
50
51#include "raid1-10.c"
52
53static int check_and_add_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
54{
55 struct wb_info *wi, *temp_wi;
56 unsigned long flags;
57 int ret = 0;
58 struct mddev *mddev = rdev->mddev;
59
60 wi = mempool_alloc(mddev->wb_info_pool, GFP_NOIO);
61
62 spin_lock_irqsave(&rdev->wb_list_lock, flags);
63 list_for_each_entry(temp_wi, &rdev->wb_list, list) {
64
65 if (hi > temp_wi->lo && lo < temp_wi->hi) {
66 ret = -EBUSY;
67 break;
68 }
69 }
70
71 if (!ret) {
72 wi->lo = lo;
73 wi->hi = hi;
74 list_add(&wi->list, &rdev->wb_list);
75 } else
76 mempool_free(wi, mddev->wb_info_pool);
77 spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
78
79 return ret;
80}
81
82static void remove_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
83{
84 struct wb_info *wi;
85 unsigned long flags;
86 int found = 0;
87 struct mddev *mddev = rdev->mddev;
88
89 spin_lock_irqsave(&rdev->wb_list_lock, flags);
90 list_for_each_entry(wi, &rdev->wb_list, list)
91 if (hi == wi->hi && lo == wi->lo) {
92 list_del(&wi->list);
93 mempool_free(wi, mddev->wb_info_pool);
94 found = 1;
95 break;
96 }
97
98 if (!found)
99 WARN(1, "The write behind IO is not recorded\n");
100 spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
101 wake_up(&rdev->wb_io_wait);
102}
103
104
105
106
107
108static inline struct r1bio *get_resync_r1bio(struct bio *bio)
109{
110 return get_resync_pages(bio)->raid_bio;
111}
112
113static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
114{
115 struct pool_info *pi = data;
116 int size = offsetof(struct r1bio, bios[pi->raid_disks]);
117
118
119 return kzalloc(size, gfp_flags);
120}
121
122#define RESYNC_DEPTH 32
123#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
124#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
125#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
126#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
127#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
128
129static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
130{
131 struct pool_info *pi = data;
132 struct r1bio *r1_bio;
133 struct bio *bio;
134 int need_pages;
135 int j;
136 struct resync_pages *rps;
137
138 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
139 if (!r1_bio)
140 return NULL;
141
142 rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
143 gfp_flags);
144 if (!rps)
145 goto out_free_r1bio;
146
147
148
149
150 for (j = pi->raid_disks ; j-- ; ) {
151 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
152 if (!bio)
153 goto out_free_bio;
154 r1_bio->bios[j] = bio;
155 }
156
157
158
159
160
161
162 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
163 need_pages = pi->raid_disks;
164 else
165 need_pages = 1;
166 for (j = 0; j < pi->raid_disks; j++) {
167 struct resync_pages *rp = &rps[j];
168
169 bio = r1_bio->bios[j];
170
171 if (j < need_pages) {
172 if (resync_alloc_pages(rp, gfp_flags))
173 goto out_free_pages;
174 } else {
175 memcpy(rp, &rps[0], sizeof(*rp));
176 resync_get_all_pages(rp);
177 }
178
179 rp->raid_bio = r1_bio;
180 bio->bi_private = rp;
181 }
182
183 r1_bio->master_bio = NULL;
184
185 return r1_bio;
186
187out_free_pages:
188 while (--j >= 0)
189 resync_free_pages(&rps[j]);
190
191out_free_bio:
192 while (++j < pi->raid_disks)
193 bio_put(r1_bio->bios[j]);
194 kfree(rps);
195
196out_free_r1bio:
197 rbio_pool_free(r1_bio, data);
198 return NULL;
199}
200
201static void r1buf_pool_free(void *__r1_bio, void *data)
202{
203 struct pool_info *pi = data;
204 int i;
205 struct r1bio *r1bio = __r1_bio;
206 struct resync_pages *rp = NULL;
207
208 for (i = pi->raid_disks; i--; ) {
209 rp = get_resync_pages(r1bio->bios[i]);
210 resync_free_pages(rp);
211 bio_put(r1bio->bios[i]);
212 }
213
214
215 kfree(rp);
216
217 rbio_pool_free(r1bio, data);
218}
219
220static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
221{
222 int i;
223
224 for (i = 0; i < conf->raid_disks * 2; i++) {
225 struct bio **bio = r1_bio->bios + i;
226 if (!BIO_SPECIAL(*bio))
227 bio_put(*bio);
228 *bio = NULL;
229 }
230}
231
232static void free_r1bio(struct r1bio *r1_bio)
233{
234 struct r1conf *conf = r1_bio->mddev->private;
235
236 put_all_bios(conf, r1_bio);
237 mempool_free(r1_bio, &conf->r1bio_pool);
238}
239
240static void put_buf(struct r1bio *r1_bio)
241{
242 struct r1conf *conf = r1_bio->mddev->private;
243 sector_t sect = r1_bio->sector;
244 int i;
245
246 for (i = 0; i < conf->raid_disks * 2; i++) {
247 struct bio *bio = r1_bio->bios[i];
248 if (bio->bi_end_io)
249 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
250 }
251
252 mempool_free(r1_bio, &conf->r1buf_pool);
253
254 lower_barrier(conf, sect);
255}
256
257static void reschedule_retry(struct r1bio *r1_bio)
258{
259 unsigned long flags;
260 struct mddev *mddev = r1_bio->mddev;
261 struct r1conf *conf = mddev->private;
262 int idx;
263
264 idx = sector_to_idx(r1_bio->sector);
265 spin_lock_irqsave(&conf->device_lock, flags);
266 list_add(&r1_bio->retry_list, &conf->retry_list);
267 atomic_inc(&conf->nr_queued[idx]);
268 spin_unlock_irqrestore(&conf->device_lock, flags);
269
270 wake_up(&conf->wait_barrier);
271 md_wakeup_thread(mddev->thread);
272}
273
274
275
276
277
278
279static void call_bio_endio(struct r1bio *r1_bio)
280{
281 struct bio *bio = r1_bio->master_bio;
282 struct r1conf *conf = r1_bio->mddev->private;
283
284 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
285 bio->bi_status = BLK_STS_IOERR;
286
287 bio_endio(bio);
288
289
290
291
292 allow_barrier(conf, r1_bio->sector);
293}
294
295static void raid_end_bio_io(struct r1bio *r1_bio)
296{
297 struct bio *bio = r1_bio->master_bio;
298
299
300 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
301 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
302 (bio_data_dir(bio) == WRITE) ? "write" : "read",
303 (unsigned long long) bio->bi_iter.bi_sector,
304 (unsigned long long) bio_end_sector(bio) - 1);
305
306 call_bio_endio(r1_bio);
307 }
308 free_r1bio(r1_bio);
309}
310
311
312
313
314static inline void update_head_pos(int disk, struct r1bio *r1_bio)
315{
316 struct r1conf *conf = r1_bio->mddev->private;
317
318 conf->mirrors[disk].head_position =
319 r1_bio->sector + (r1_bio->sectors);
320}
321
322
323
324
325static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
326{
327 int mirror;
328 struct r1conf *conf = r1_bio->mddev->private;
329 int raid_disks = conf->raid_disks;
330
331 for (mirror = 0; mirror < raid_disks * 2; mirror++)
332 if (r1_bio->bios[mirror] == bio)
333 break;
334
335 BUG_ON(mirror == raid_disks * 2);
336 update_head_pos(mirror, r1_bio);
337
338 return mirror;
339}
340
341static void raid1_end_read_request(struct bio *bio)
342{
343 int uptodate = !bio->bi_status;
344 struct r1bio *r1_bio = bio->bi_private;
345 struct r1conf *conf = r1_bio->mddev->private;
346 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
347
348
349
350
351 update_head_pos(r1_bio->read_disk, r1_bio);
352
353 if (uptodate)
354 set_bit(R1BIO_Uptodate, &r1_bio->state);
355 else if (test_bit(FailFast, &rdev->flags) &&
356 test_bit(R1BIO_FailFast, &r1_bio->state))
357
358
359 ;
360 else {
361
362
363
364
365 unsigned long flags;
366 spin_lock_irqsave(&conf->device_lock, flags);
367 if (r1_bio->mddev->degraded == conf->raid_disks ||
368 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
369 test_bit(In_sync, &rdev->flags)))
370 uptodate = 1;
371 spin_unlock_irqrestore(&conf->device_lock, flags);
372 }
373
374 if (uptodate) {
375 raid_end_bio_io(r1_bio);
376 rdev_dec_pending(rdev, conf->mddev);
377 } else {
378
379
380
381 char b[BDEVNAME_SIZE];
382 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
383 mdname(conf->mddev),
384 bdevname(rdev->bdev, b),
385 (unsigned long long)r1_bio->sector);
386 set_bit(R1BIO_ReadError, &r1_bio->state);
387 reschedule_retry(r1_bio);
388
389 }
390}
391
392static void close_write(struct r1bio *r1_bio)
393{
394
395 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
396 bio_free_pages(r1_bio->behind_master_bio);
397 bio_put(r1_bio->behind_master_bio);
398 r1_bio->behind_master_bio = NULL;
399 }
400
401 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
402 r1_bio->sectors,
403 !test_bit(R1BIO_Degraded, &r1_bio->state),
404 test_bit(R1BIO_BehindIO, &r1_bio->state));
405 md_write_end(r1_bio->mddev);
406}
407
408static void r1_bio_write_done(struct r1bio *r1_bio)
409{
410 if (!atomic_dec_and_test(&r1_bio->remaining))
411 return;
412
413 if (test_bit(R1BIO_WriteError, &r1_bio->state))
414 reschedule_retry(r1_bio);
415 else {
416 close_write(r1_bio);
417 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
418 reschedule_retry(r1_bio);
419 else
420 raid_end_bio_io(r1_bio);
421 }
422}
423
424static void raid1_end_write_request(struct bio *bio)
425{
426 struct r1bio *r1_bio = bio->bi_private;
427 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
428 struct r1conf *conf = r1_bio->mddev->private;
429 struct bio *to_put = NULL;
430 int mirror = find_bio_disk(r1_bio, bio);
431 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
432 bool discard_error;
433
434 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
435
436
437
438
439 if (bio->bi_status && !discard_error) {
440 set_bit(WriteErrorSeen, &rdev->flags);
441 if (!test_and_set_bit(WantReplacement, &rdev->flags))
442 set_bit(MD_RECOVERY_NEEDED, &
443 conf->mddev->recovery);
444
445 if (test_bit(FailFast, &rdev->flags) &&
446 (bio->bi_opf & MD_FAILFAST) &&
447
448 !test_bit(WriteMostly, &rdev->flags)) {
449 md_error(r1_bio->mddev, rdev);
450 if (!test_bit(Faulty, &rdev->flags))
451
452
453
454
455 set_bit(R1BIO_WriteError, &r1_bio->state);
456 else {
457
458 r1_bio->bios[mirror] = NULL;
459 to_put = bio;
460 }
461 } else
462 set_bit(R1BIO_WriteError, &r1_bio->state);
463 } else {
464
465
466
467
468
469
470
471
472
473
474 sector_t first_bad;
475 int bad_sectors;
476
477 r1_bio->bios[mirror] = NULL;
478 to_put = bio;
479
480
481
482
483
484
485
486
487 if (test_bit(In_sync, &rdev->flags) &&
488 !test_bit(Faulty, &rdev->flags))
489 set_bit(R1BIO_Uptodate, &r1_bio->state);
490
491
492 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
493 &first_bad, &bad_sectors) && !discard_error) {
494 r1_bio->bios[mirror] = IO_MADE_GOOD;
495 set_bit(R1BIO_MadeGood, &r1_bio->state);
496 }
497 }
498
499 if (behind) {
500 if (test_bit(WBCollisionCheck, &rdev->flags)) {
501 sector_t lo = r1_bio->sector;
502 sector_t hi = r1_bio->sector + r1_bio->sectors;
503
504 remove_wb(rdev, lo, hi);
505 }
506 if (test_bit(WriteMostly, &rdev->flags))
507 atomic_dec(&r1_bio->behind_remaining);
508
509
510
511
512
513
514
515
516 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
517 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
518
519 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
520 struct bio *mbio = r1_bio->master_bio;
521 pr_debug("raid1: behind end write sectors"
522 " %llu-%llu\n",
523 (unsigned long long) mbio->bi_iter.bi_sector,
524 (unsigned long long) bio_end_sector(mbio) - 1);
525 call_bio_endio(r1_bio);
526 }
527 }
528 }
529 if (r1_bio->bios[mirror] == NULL)
530 rdev_dec_pending(rdev, conf->mddev);
531
532
533
534
535
536 r1_bio_write_done(r1_bio);
537
538 if (to_put)
539 bio_put(to_put);
540}
541
542static sector_t align_to_barrier_unit_end(sector_t start_sector,
543 sector_t sectors)
544{
545 sector_t len;
546
547 WARN_ON(sectors == 0);
548
549
550
551
552 len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
553 start_sector;
554
555 if (len > sectors)
556 len = sectors;
557
558 return len;
559}
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
576{
577 const sector_t this_sector = r1_bio->sector;
578 int sectors;
579 int best_good_sectors;
580 int best_disk, best_dist_disk, best_pending_disk;
581 int has_nonrot_disk;
582 int disk;
583 sector_t best_dist;
584 unsigned int min_pending;
585 struct md_rdev *rdev;
586 int choose_first;
587 int choose_next_idle;
588
589 rcu_read_lock();
590
591
592
593
594
595 retry:
596 sectors = r1_bio->sectors;
597 best_disk = -1;
598 best_dist_disk = -1;
599 best_dist = MaxSector;
600 best_pending_disk = -1;
601 min_pending = UINT_MAX;
602 best_good_sectors = 0;
603 has_nonrot_disk = 0;
604 choose_next_idle = 0;
605 clear_bit(R1BIO_FailFast, &r1_bio->state);
606
607 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
608 (mddev_is_clustered(conf->mddev) &&
609 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
610 this_sector + sectors)))
611 choose_first = 1;
612 else
613 choose_first = 0;
614
615 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
616 sector_t dist;
617 sector_t first_bad;
618 int bad_sectors;
619 unsigned int pending;
620 bool nonrot;
621
622 rdev = rcu_dereference(conf->mirrors[disk].rdev);
623 if (r1_bio->bios[disk] == IO_BLOCKED
624 || rdev == NULL
625 || test_bit(Faulty, &rdev->flags))
626 continue;
627 if (!test_bit(In_sync, &rdev->flags) &&
628 rdev->recovery_offset < this_sector + sectors)
629 continue;
630 if (test_bit(WriteMostly, &rdev->flags)) {
631
632
633 if (best_dist_disk < 0) {
634 if (is_badblock(rdev, this_sector, sectors,
635 &first_bad, &bad_sectors)) {
636 if (first_bad <= this_sector)
637
638 continue;
639 best_good_sectors = first_bad - this_sector;
640 } else
641 best_good_sectors = sectors;
642 best_dist_disk = disk;
643 best_pending_disk = disk;
644 }
645 continue;
646 }
647
648
649
650 if (is_badblock(rdev, this_sector, sectors,
651 &first_bad, &bad_sectors)) {
652 if (best_dist < MaxSector)
653
654 continue;
655 if (first_bad <= this_sector) {
656
657
658
659
660 bad_sectors -= (this_sector - first_bad);
661 if (choose_first && sectors > bad_sectors)
662 sectors = bad_sectors;
663 if (best_good_sectors > sectors)
664 best_good_sectors = sectors;
665
666 } else {
667 sector_t good_sectors = first_bad - this_sector;
668 if (good_sectors > best_good_sectors) {
669 best_good_sectors = good_sectors;
670 best_disk = disk;
671 }
672 if (choose_first)
673 break;
674 }
675 continue;
676 } else {
677 if ((sectors > best_good_sectors) && (best_disk >= 0))
678 best_disk = -1;
679 best_good_sectors = sectors;
680 }
681
682 if (best_disk >= 0)
683
684 set_bit(R1BIO_FailFast, &r1_bio->state);
685
686 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
687 has_nonrot_disk |= nonrot;
688 pending = atomic_read(&rdev->nr_pending);
689 dist = abs(this_sector - conf->mirrors[disk].head_position);
690 if (choose_first) {
691 best_disk = disk;
692 break;
693 }
694
695 if (conf->mirrors[disk].next_seq_sect == this_sector
696 || dist == 0) {
697 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
698 struct raid1_info *mirror = &conf->mirrors[disk];
699
700 best_disk = disk;
701
702
703
704
705
706
707
708
709
710
711
712
713
714 if (nonrot && opt_iosize > 0 &&
715 mirror->seq_start != MaxSector &&
716 mirror->next_seq_sect > opt_iosize &&
717 mirror->next_seq_sect - opt_iosize >=
718 mirror->seq_start) {
719 choose_next_idle = 1;
720 continue;
721 }
722 break;
723 }
724
725 if (choose_next_idle)
726 continue;
727
728 if (min_pending > pending) {
729 min_pending = pending;
730 best_pending_disk = disk;
731 }
732
733 if (dist < best_dist) {
734 best_dist = dist;
735 best_dist_disk = disk;
736 }
737 }
738
739
740
741
742
743
744
745 if (best_disk == -1) {
746 if (has_nonrot_disk || min_pending == 0)
747 best_disk = best_pending_disk;
748 else
749 best_disk = best_dist_disk;
750 }
751
752 if (best_disk >= 0) {
753 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
754 if (!rdev)
755 goto retry;
756 atomic_inc(&rdev->nr_pending);
757 sectors = best_good_sectors;
758
759 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
760 conf->mirrors[best_disk].seq_start = this_sector;
761
762 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
763 }
764 rcu_read_unlock();
765 *max_sectors = sectors;
766
767 return best_disk;
768}
769
770static int raid1_congested(struct mddev *mddev, int bits)
771{
772 struct r1conf *conf = mddev->private;
773 int i, ret = 0;
774
775 if ((bits & (1 << WB_async_congested)) &&
776 conf->pending_count >= max_queued_requests)
777 return 1;
778
779 rcu_read_lock();
780 for (i = 0; i < conf->raid_disks * 2; i++) {
781 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
782 if (rdev && !test_bit(Faulty, &rdev->flags)) {
783 struct request_queue *q = bdev_get_queue(rdev->bdev);
784
785 BUG_ON(!q);
786
787
788
789
790 if ((bits & (1 << WB_async_congested)) || 1)
791 ret |= bdi_congested(q->backing_dev_info, bits);
792 else
793 ret &= bdi_congested(q->backing_dev_info, bits);
794 }
795 }
796 rcu_read_unlock();
797 return ret;
798}
799
800static void flush_bio_list(struct r1conf *conf, struct bio *bio)
801{
802
803 md_bitmap_unplug(conf->mddev->bitmap);
804 wake_up(&conf->wait_barrier);
805
806 while (bio) {
807 struct bio *next = bio->bi_next;
808 struct md_rdev *rdev = (void *)bio->bi_disk;
809 bio->bi_next = NULL;
810 bio_set_dev(bio, rdev->bdev);
811 if (test_bit(Faulty, &rdev->flags)) {
812 bio_io_error(bio);
813 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
814 !blk_queue_discard(bio->bi_disk->queue)))
815
816 bio_endio(bio);
817 else
818 generic_make_request(bio);
819 bio = next;
820 }
821}
822
823static void flush_pending_writes(struct r1conf *conf)
824{
825
826
827
828 spin_lock_irq(&conf->device_lock);
829
830 if (conf->pending_bio_list.head) {
831 struct blk_plug plug;
832 struct bio *bio;
833
834 bio = bio_list_get(&conf->pending_bio_list);
835 conf->pending_count = 0;
836 spin_unlock_irq(&conf->device_lock);
837
838
839
840
841
842
843
844
845
846
847 __set_current_state(TASK_RUNNING);
848 blk_start_plug(&plug);
849 flush_bio_list(conf, bio);
850 blk_finish_plug(&plug);
851 } else
852 spin_unlock_irq(&conf->device_lock);
853}
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr)
877{
878 int idx = sector_to_idx(sector_nr);
879
880 spin_lock_irq(&conf->resync_lock);
881
882
883 wait_event_lock_irq(conf->wait_barrier,
884 !atomic_read(&conf->nr_waiting[idx]),
885 conf->resync_lock);
886
887
888 atomic_inc(&conf->barrier[idx]);
889
890
891
892
893
894
895
896
897 smp_mb__after_atomic();
898
899
900
901
902
903
904
905
906 wait_event_lock_irq(conf->wait_barrier,
907 (!conf->array_frozen &&
908 !atomic_read(&conf->nr_pending[idx]) &&
909 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
910 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
911 conf->resync_lock);
912
913 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
914 atomic_dec(&conf->barrier[idx]);
915 spin_unlock_irq(&conf->resync_lock);
916 wake_up(&conf->wait_barrier);
917 return -EINTR;
918 }
919
920 atomic_inc(&conf->nr_sync_pending);
921 spin_unlock_irq(&conf->resync_lock);
922
923 return 0;
924}
925
926static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
927{
928 int idx = sector_to_idx(sector_nr);
929
930 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
931
932 atomic_dec(&conf->barrier[idx]);
933 atomic_dec(&conf->nr_sync_pending);
934 wake_up(&conf->wait_barrier);
935}
936
937static void _wait_barrier(struct r1conf *conf, int idx)
938{
939
940
941
942
943
944
945
946
947 atomic_inc(&conf->nr_pending[idx]);
948
949
950
951
952
953
954
955
956 smp_mb__after_atomic();
957
958
959
960
961
962
963
964
965
966
967 if (!READ_ONCE(conf->array_frozen) &&
968 !atomic_read(&conf->barrier[idx]))
969 return;
970
971
972
973
974
975
976
977
978 spin_lock_irq(&conf->resync_lock);
979 atomic_inc(&conf->nr_waiting[idx]);
980 atomic_dec(&conf->nr_pending[idx]);
981
982
983
984
985 wake_up(&conf->wait_barrier);
986
987 wait_event_lock_irq(conf->wait_barrier,
988 !conf->array_frozen &&
989 !atomic_read(&conf->barrier[idx]),
990 conf->resync_lock);
991 atomic_inc(&conf->nr_pending[idx]);
992 atomic_dec(&conf->nr_waiting[idx]);
993 spin_unlock_irq(&conf->resync_lock);
994}
995
996static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
997{
998 int idx = sector_to_idx(sector_nr);
999
1000
1001
1002
1003
1004
1005
1006
1007 atomic_inc(&conf->nr_pending[idx]);
1008
1009 if (!READ_ONCE(conf->array_frozen))
1010 return;
1011
1012 spin_lock_irq(&conf->resync_lock);
1013 atomic_inc(&conf->nr_waiting[idx]);
1014 atomic_dec(&conf->nr_pending[idx]);
1015
1016
1017
1018
1019 wake_up(&conf->wait_barrier);
1020
1021 wait_event_lock_irq(conf->wait_barrier,
1022 !conf->array_frozen,
1023 conf->resync_lock);
1024 atomic_inc(&conf->nr_pending[idx]);
1025 atomic_dec(&conf->nr_waiting[idx]);
1026 spin_unlock_irq(&conf->resync_lock);
1027}
1028
1029static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
1030{
1031 int idx = sector_to_idx(sector_nr);
1032
1033 _wait_barrier(conf, idx);
1034}
1035
1036static void _allow_barrier(struct r1conf *conf, int idx)
1037{
1038 atomic_dec(&conf->nr_pending[idx]);
1039 wake_up(&conf->wait_barrier);
1040}
1041
1042static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1043{
1044 int idx = sector_to_idx(sector_nr);
1045
1046 _allow_barrier(conf, idx);
1047}
1048
1049
1050static int get_unqueued_pending(struct r1conf *conf)
1051{
1052 int idx, ret;
1053
1054 ret = atomic_read(&conf->nr_sync_pending);
1055 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1056 ret += atomic_read(&conf->nr_pending[idx]) -
1057 atomic_read(&conf->nr_queued[idx]);
1058
1059 return ret;
1060}
1061
1062static void freeze_array(struct r1conf *conf, int extra)
1063{
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 spin_lock_irq(&conf->resync_lock);
1088 conf->array_frozen = 1;
1089 raid1_log(conf->mddev, "wait freeze");
1090 wait_event_lock_irq_cmd(
1091 conf->wait_barrier,
1092 get_unqueued_pending(conf) == extra,
1093 conf->resync_lock,
1094 flush_pending_writes(conf));
1095 spin_unlock_irq(&conf->resync_lock);
1096}
1097static void unfreeze_array(struct r1conf *conf)
1098{
1099
1100 spin_lock_irq(&conf->resync_lock);
1101 conf->array_frozen = 0;
1102 spin_unlock_irq(&conf->resync_lock);
1103 wake_up(&conf->wait_barrier);
1104}
1105
1106static void alloc_behind_master_bio(struct r1bio *r1_bio,
1107 struct bio *bio)
1108{
1109 int size = bio->bi_iter.bi_size;
1110 unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1111 int i = 0;
1112 struct bio *behind_bio = NULL;
1113
1114 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1115 if (!behind_bio)
1116 return;
1117
1118
1119 if (!bio_has_data(bio)) {
1120 behind_bio->bi_iter.bi_size = size;
1121 goto skip_copy;
1122 }
1123
1124 behind_bio->bi_write_hint = bio->bi_write_hint;
1125
1126 while (i < vcnt && size) {
1127 struct page *page;
1128 int len = min_t(int, PAGE_SIZE, size);
1129
1130 page = alloc_page(GFP_NOIO);
1131 if (unlikely(!page))
1132 goto free_pages;
1133
1134 bio_add_page(behind_bio, page, len, 0);
1135
1136 size -= len;
1137 i++;
1138 }
1139
1140 bio_copy_data(behind_bio, bio);
1141skip_copy:
1142 r1_bio->behind_master_bio = behind_bio;
1143 set_bit(R1BIO_BehindIO, &r1_bio->state);
1144
1145 return;
1146
1147free_pages:
1148 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1149 bio->bi_iter.bi_size);
1150 bio_free_pages(behind_bio);
1151 bio_put(behind_bio);
1152}
1153
1154struct raid1_plug_cb {
1155 struct blk_plug_cb cb;
1156 struct bio_list pending;
1157 int pending_cnt;
1158};
1159
1160static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1161{
1162 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1163 cb);
1164 struct mddev *mddev = plug->cb.data;
1165 struct r1conf *conf = mddev->private;
1166 struct bio *bio;
1167
1168 if (from_schedule || current->bio_list) {
1169 spin_lock_irq(&conf->device_lock);
1170 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1171 conf->pending_count += plug->pending_cnt;
1172 spin_unlock_irq(&conf->device_lock);
1173 wake_up(&conf->wait_barrier);
1174 md_wakeup_thread(mddev->thread);
1175 kfree(plug);
1176 return;
1177 }
1178
1179
1180 bio = bio_list_get(&plug->pending);
1181 flush_bio_list(conf, bio);
1182 kfree(plug);
1183}
1184
1185static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1186{
1187 r1_bio->master_bio = bio;
1188 r1_bio->sectors = bio_sectors(bio);
1189 r1_bio->state = 0;
1190 r1_bio->mddev = mddev;
1191 r1_bio->sector = bio->bi_iter.bi_sector;
1192}
1193
1194static inline struct r1bio *
1195alloc_r1bio(struct mddev *mddev, struct bio *bio)
1196{
1197 struct r1conf *conf = mddev->private;
1198 struct r1bio *r1_bio;
1199
1200 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1201
1202 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1203 init_r1bio(r1_bio, mddev, bio);
1204 return r1_bio;
1205}
1206
1207static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1208 int max_read_sectors, struct r1bio *r1_bio)
1209{
1210 struct r1conf *conf = mddev->private;
1211 struct raid1_info *mirror;
1212 struct bio *read_bio;
1213 struct bitmap *bitmap = mddev->bitmap;
1214 const int op = bio_op(bio);
1215 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1216 int max_sectors;
1217 int rdisk;
1218 bool print_msg = !!r1_bio;
1219 char b[BDEVNAME_SIZE];
1220
1221
1222
1223
1224
1225
1226 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1227
1228 if (print_msg) {
1229
1230 struct md_rdev *rdev;
1231 rcu_read_lock();
1232 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1233 if (rdev)
1234 bdevname(rdev->bdev, b);
1235 else
1236 strcpy(b, "???");
1237 rcu_read_unlock();
1238 }
1239
1240
1241
1242
1243
1244 wait_read_barrier(conf, bio->bi_iter.bi_sector);
1245
1246 if (!r1_bio)
1247 r1_bio = alloc_r1bio(mddev, bio);
1248 else
1249 init_r1bio(r1_bio, mddev, bio);
1250 r1_bio->sectors = max_read_sectors;
1251
1252
1253
1254
1255
1256 rdisk = read_balance(conf, r1_bio, &max_sectors);
1257
1258 if (rdisk < 0) {
1259
1260 if (print_msg) {
1261 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1262 mdname(mddev),
1263 b,
1264 (unsigned long long)r1_bio->sector);
1265 }
1266 raid_end_bio_io(r1_bio);
1267 return;
1268 }
1269 mirror = conf->mirrors + rdisk;
1270
1271 if (print_msg)
1272 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
1273 mdname(mddev),
1274 (unsigned long long)r1_bio->sector,
1275 bdevname(mirror->rdev->bdev, b));
1276
1277 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1278 bitmap) {
1279
1280
1281
1282
1283 raid1_log(mddev, "wait behind writes");
1284 wait_event(bitmap->behind_wait,
1285 atomic_read(&bitmap->behind_writes) == 0);
1286 }
1287
1288 if (max_sectors < bio_sectors(bio)) {
1289 struct bio *split = bio_split(bio, max_sectors,
1290 gfp, &conf->bio_split);
1291 bio_chain(split, bio);
1292 generic_make_request(bio);
1293 bio = split;
1294 r1_bio->master_bio = bio;
1295 r1_bio->sectors = max_sectors;
1296 }
1297
1298 r1_bio->read_disk = rdisk;
1299
1300 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
1301
1302 r1_bio->bios[rdisk] = read_bio;
1303
1304 read_bio->bi_iter.bi_sector = r1_bio->sector +
1305 mirror->rdev->data_offset;
1306 bio_set_dev(read_bio, mirror->rdev->bdev);
1307 read_bio->bi_end_io = raid1_end_read_request;
1308 bio_set_op_attrs(read_bio, op, do_sync);
1309 if (test_bit(FailFast, &mirror->rdev->flags) &&
1310 test_bit(R1BIO_FailFast, &r1_bio->state))
1311 read_bio->bi_opf |= MD_FAILFAST;
1312 read_bio->bi_private = r1_bio;
1313
1314 if (mddev->gendisk)
1315 trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
1316 disk_devt(mddev->gendisk), r1_bio->sector);
1317
1318 generic_make_request(read_bio);
1319}
1320
1321static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1322 int max_write_sectors)
1323{
1324 struct r1conf *conf = mddev->private;
1325 struct r1bio *r1_bio;
1326 int i, disks;
1327 struct bitmap *bitmap = mddev->bitmap;
1328 unsigned long flags;
1329 struct md_rdev *blocked_rdev;
1330 struct blk_plug_cb *cb;
1331 struct raid1_plug_cb *plug = NULL;
1332 int first_clone;
1333 int max_sectors;
1334
1335 if (mddev_is_clustered(mddev) &&
1336 md_cluster_ops->area_resyncing(mddev, WRITE,
1337 bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1338
1339 DEFINE_WAIT(w);
1340 for (;;) {
1341 prepare_to_wait(&conf->wait_barrier,
1342 &w, TASK_IDLE);
1343 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1344 bio->bi_iter.bi_sector,
1345 bio_end_sector(bio)))
1346 break;
1347 schedule();
1348 }
1349 finish_wait(&conf->wait_barrier, &w);
1350 }
1351
1352
1353
1354
1355
1356
1357 wait_barrier(conf, bio->bi_iter.bi_sector);
1358
1359 r1_bio = alloc_r1bio(mddev, bio);
1360 r1_bio->sectors = max_write_sectors;
1361
1362 if (conf->pending_count >= max_queued_requests) {
1363 md_wakeup_thread(mddev->thread);
1364 raid1_log(mddev, "wait queued");
1365 wait_event(conf->wait_barrier,
1366 conf->pending_count < max_queued_requests);
1367 }
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379 disks = conf->raid_disks * 2;
1380 retry_write:
1381 blocked_rdev = NULL;
1382 rcu_read_lock();
1383 max_sectors = r1_bio->sectors;
1384 for (i = 0; i < disks; i++) {
1385 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1386 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1387 atomic_inc(&rdev->nr_pending);
1388 blocked_rdev = rdev;
1389 break;
1390 }
1391 r1_bio->bios[i] = NULL;
1392 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1393 if (i < conf->raid_disks)
1394 set_bit(R1BIO_Degraded, &r1_bio->state);
1395 continue;
1396 }
1397
1398 atomic_inc(&rdev->nr_pending);
1399 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1400 sector_t first_bad;
1401 int bad_sectors;
1402 int is_bad;
1403
1404 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1405 &first_bad, &bad_sectors);
1406 if (is_bad < 0) {
1407
1408
1409 set_bit(BlockedBadBlocks, &rdev->flags);
1410 blocked_rdev = rdev;
1411 break;
1412 }
1413 if (is_bad && first_bad <= r1_bio->sector) {
1414
1415 bad_sectors -= (r1_bio->sector - first_bad);
1416 if (bad_sectors < max_sectors)
1417
1418
1419
1420 max_sectors = bad_sectors;
1421 rdev_dec_pending(rdev, mddev);
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432 continue;
1433 }
1434 if (is_bad) {
1435 int good_sectors = first_bad - r1_bio->sector;
1436 if (good_sectors < max_sectors)
1437 max_sectors = good_sectors;
1438 }
1439 }
1440 r1_bio->bios[i] = bio;
1441 }
1442 rcu_read_unlock();
1443
1444 if (unlikely(blocked_rdev)) {
1445
1446 int j;
1447
1448 for (j = 0; j < i; j++)
1449 if (r1_bio->bios[j])
1450 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1451 r1_bio->state = 0;
1452 allow_barrier(conf, bio->bi_iter.bi_sector);
1453 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1454 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1455 wait_barrier(conf, bio->bi_iter.bi_sector);
1456 goto retry_write;
1457 }
1458
1459 if (max_sectors < bio_sectors(bio)) {
1460 struct bio *split = bio_split(bio, max_sectors,
1461 GFP_NOIO, &conf->bio_split);
1462 bio_chain(split, bio);
1463 generic_make_request(bio);
1464 bio = split;
1465 r1_bio->master_bio = bio;
1466 r1_bio->sectors = max_sectors;
1467 }
1468
1469 atomic_set(&r1_bio->remaining, 1);
1470 atomic_set(&r1_bio->behind_remaining, 0);
1471
1472 first_clone = 1;
1473
1474 for (i = 0; i < disks; i++) {
1475 struct bio *mbio = NULL;
1476 if (!r1_bio->bios[i])
1477 continue;
1478
1479 if (first_clone) {
1480
1481
1482
1483
1484 if (bitmap &&
1485 (atomic_read(&bitmap->behind_writes)
1486 < mddev->bitmap_info.max_write_behind) &&
1487 !waitqueue_active(&bitmap->behind_wait)) {
1488 alloc_behind_master_bio(r1_bio, bio);
1489 }
1490
1491 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1492 test_bit(R1BIO_BehindIO, &r1_bio->state));
1493 first_clone = 0;
1494 }
1495
1496 if (r1_bio->behind_master_bio)
1497 mbio = bio_clone_fast(r1_bio->behind_master_bio,
1498 GFP_NOIO, &mddev->bio_set);
1499 else
1500 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
1501
1502 if (r1_bio->behind_master_bio) {
1503 struct md_rdev *rdev = conf->mirrors[i].rdev;
1504
1505 if (test_bit(WBCollisionCheck, &rdev->flags)) {
1506 sector_t lo = r1_bio->sector;
1507 sector_t hi = r1_bio->sector + r1_bio->sectors;
1508
1509 wait_event(rdev->wb_io_wait,
1510 check_and_add_wb(rdev, lo, hi) == 0);
1511 }
1512 if (test_bit(WriteMostly, &rdev->flags))
1513 atomic_inc(&r1_bio->behind_remaining);
1514 }
1515
1516 r1_bio->bios[i] = mbio;
1517
1518 mbio->bi_iter.bi_sector = (r1_bio->sector +
1519 conf->mirrors[i].rdev->data_offset);
1520 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
1521 mbio->bi_end_io = raid1_end_write_request;
1522 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
1523 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1524 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1525 conf->raid_disks - mddev->degraded > 1)
1526 mbio->bi_opf |= MD_FAILFAST;
1527 mbio->bi_private = r1_bio;
1528
1529 atomic_inc(&r1_bio->remaining);
1530
1531 if (mddev->gendisk)
1532 trace_block_bio_remap(mbio->bi_disk->queue,
1533 mbio, disk_devt(mddev->gendisk),
1534 r1_bio->sector);
1535
1536 mbio->bi_disk = (void *)conf->mirrors[i].rdev;
1537
1538 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1539 if (cb)
1540 plug = container_of(cb, struct raid1_plug_cb, cb);
1541 else
1542 plug = NULL;
1543 if (plug) {
1544 bio_list_add(&plug->pending, mbio);
1545 plug->pending_cnt++;
1546 } else {
1547 spin_lock_irqsave(&conf->device_lock, flags);
1548 bio_list_add(&conf->pending_bio_list, mbio);
1549 conf->pending_count++;
1550 spin_unlock_irqrestore(&conf->device_lock, flags);
1551 md_wakeup_thread(mddev->thread);
1552 }
1553 }
1554
1555 r1_bio_write_done(r1_bio);
1556
1557
1558 wake_up(&conf->wait_barrier);
1559}
1560
1561static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1562{
1563 sector_t sectors;
1564
1565 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1566 md_flush_request(mddev, bio);
1567 return true;
1568 }
1569
1570
1571
1572
1573
1574
1575
1576
1577 sectors = align_to_barrier_unit_end(
1578 bio->bi_iter.bi_sector, bio_sectors(bio));
1579
1580 if (bio_data_dir(bio) == READ)
1581 raid1_read_request(mddev, bio, sectors, NULL);
1582 else {
1583 if (!md_write_start(mddev,bio))
1584 return false;
1585 raid1_write_request(mddev, bio, sectors);
1586 }
1587 return true;
1588}
1589
1590static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1591{
1592 struct r1conf *conf = mddev->private;
1593 int i;
1594
1595 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1596 conf->raid_disks - mddev->degraded);
1597 rcu_read_lock();
1598 for (i = 0; i < conf->raid_disks; i++) {
1599 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1600 seq_printf(seq, "%s",
1601 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1602 }
1603 rcu_read_unlock();
1604 seq_printf(seq, "]");
1605}
1606
1607static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1608{
1609 char b[BDEVNAME_SIZE];
1610 struct r1conf *conf = mddev->private;
1611 unsigned long flags;
1612
1613
1614
1615
1616
1617
1618
1619 spin_lock_irqsave(&conf->device_lock, flags);
1620 if (test_bit(In_sync, &rdev->flags)
1621 && (conf->raid_disks - mddev->degraded) == 1) {
1622
1623
1624
1625
1626
1627
1628 conf->recovery_disabled = mddev->recovery_disabled;
1629 spin_unlock_irqrestore(&conf->device_lock, flags);
1630 return;
1631 }
1632 set_bit(Blocked, &rdev->flags);
1633 if (test_and_clear_bit(In_sync, &rdev->flags))
1634 mddev->degraded++;
1635 set_bit(Faulty, &rdev->flags);
1636 spin_unlock_irqrestore(&conf->device_lock, flags);
1637
1638
1639
1640 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1641 set_mask_bits(&mddev->sb_flags, 0,
1642 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1643 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1644 "md/raid1:%s: Operation continuing on %d devices.\n",
1645 mdname(mddev), bdevname(rdev->bdev, b),
1646 mdname(mddev), conf->raid_disks - mddev->degraded);
1647}
1648
1649static void print_conf(struct r1conf *conf)
1650{
1651 int i;
1652
1653 pr_debug("RAID1 conf printout:\n");
1654 if (!conf) {
1655 pr_debug("(!conf)\n");
1656 return;
1657 }
1658 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1659 conf->raid_disks);
1660
1661 rcu_read_lock();
1662 for (i = 0; i < conf->raid_disks; i++) {
1663 char b[BDEVNAME_SIZE];
1664 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1665 if (rdev)
1666 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1667 i, !test_bit(In_sync, &rdev->flags),
1668 !test_bit(Faulty, &rdev->flags),
1669 bdevname(rdev->bdev,b));
1670 }
1671 rcu_read_unlock();
1672}
1673
1674static void close_sync(struct r1conf *conf)
1675{
1676 int idx;
1677
1678 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1679 _wait_barrier(conf, idx);
1680 _allow_barrier(conf, idx);
1681 }
1682
1683 mempool_exit(&conf->r1buf_pool);
1684}
1685
1686static int raid1_spare_active(struct mddev *mddev)
1687{
1688 int i;
1689 struct r1conf *conf = mddev->private;
1690 int count = 0;
1691 unsigned long flags;
1692
1693
1694
1695
1696
1697
1698
1699
1700 spin_lock_irqsave(&conf->device_lock, flags);
1701 for (i = 0; i < conf->raid_disks; i++) {
1702 struct md_rdev *rdev = conf->mirrors[i].rdev;
1703 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1704 if (repl
1705 && !test_bit(Candidate, &repl->flags)
1706 && repl->recovery_offset == MaxSector
1707 && !test_bit(Faulty, &repl->flags)
1708 && !test_and_set_bit(In_sync, &repl->flags)) {
1709
1710 if (!rdev ||
1711 !test_and_clear_bit(In_sync, &rdev->flags))
1712 count++;
1713 if (rdev) {
1714
1715
1716
1717
1718 set_bit(Faulty, &rdev->flags);
1719 sysfs_notify_dirent_safe(
1720 rdev->sysfs_state);
1721 }
1722 }
1723 if (rdev
1724 && rdev->recovery_offset == MaxSector
1725 && !test_bit(Faulty, &rdev->flags)
1726 && !test_and_set_bit(In_sync, &rdev->flags)) {
1727 count++;
1728 sysfs_notify_dirent_safe(rdev->sysfs_state);
1729 }
1730 }
1731 mddev->degraded -= count;
1732 spin_unlock_irqrestore(&conf->device_lock, flags);
1733
1734 print_conf(conf);
1735 return count;
1736}
1737
1738static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1739{
1740 struct r1conf *conf = mddev->private;
1741 int err = -EEXIST;
1742 int mirror = 0;
1743 struct raid1_info *p;
1744 int first = 0;
1745 int last = conf->raid_disks - 1;
1746
1747 if (mddev->recovery_disabled == conf->recovery_disabled)
1748 return -EBUSY;
1749
1750 if (md_integrity_add_rdev(rdev, mddev))
1751 return -ENXIO;
1752
1753 if (rdev->raid_disk >= 0)
1754 first = last = rdev->raid_disk;
1755
1756
1757
1758
1759
1760 if (rdev->saved_raid_disk >= 0 &&
1761 rdev->saved_raid_disk >= first &&
1762 rdev->saved_raid_disk < conf->raid_disks &&
1763 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1764 first = last = rdev->saved_raid_disk;
1765
1766 for (mirror = first; mirror <= last; mirror++) {
1767 p = conf->mirrors + mirror;
1768 if (!p->rdev) {
1769 if (mddev->gendisk)
1770 disk_stack_limits(mddev->gendisk, rdev->bdev,
1771 rdev->data_offset << 9);
1772
1773 p->head_position = 0;
1774 rdev->raid_disk = mirror;
1775 err = 0;
1776
1777
1778
1779 if (rdev->saved_raid_disk < 0)
1780 conf->fullsync = 1;
1781 rcu_assign_pointer(p->rdev, rdev);
1782 break;
1783 }
1784 if (test_bit(WantReplacement, &p->rdev->flags) &&
1785 p[conf->raid_disks].rdev == NULL) {
1786
1787 clear_bit(In_sync, &rdev->flags);
1788 set_bit(Replacement, &rdev->flags);
1789 rdev->raid_disk = mirror;
1790 err = 0;
1791 conf->fullsync = 1;
1792 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1793 break;
1794 }
1795 }
1796 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1797 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1798 print_conf(conf);
1799 return err;
1800}
1801
1802static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1803{
1804 struct r1conf *conf = mddev->private;
1805 int err = 0;
1806 int number = rdev->raid_disk;
1807 struct raid1_info *p = conf->mirrors + number;
1808
1809 if (rdev != p->rdev)
1810 p = conf->mirrors + conf->raid_disks + number;
1811
1812 print_conf(conf);
1813 if (rdev == p->rdev) {
1814 if (test_bit(In_sync, &rdev->flags) ||
1815 atomic_read(&rdev->nr_pending)) {
1816 err = -EBUSY;
1817 goto abort;
1818 }
1819
1820
1821
1822 if (!test_bit(Faulty, &rdev->flags) &&
1823 mddev->recovery_disabled != conf->recovery_disabled &&
1824 mddev->degraded < conf->raid_disks) {
1825 err = -EBUSY;
1826 goto abort;
1827 }
1828 p->rdev = NULL;
1829 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1830 synchronize_rcu();
1831 if (atomic_read(&rdev->nr_pending)) {
1832
1833 err = -EBUSY;
1834 p->rdev = rdev;
1835 goto abort;
1836 }
1837 }
1838 if (conf->mirrors[conf->raid_disks + number].rdev) {
1839
1840
1841
1842
1843 struct md_rdev *repl =
1844 conf->mirrors[conf->raid_disks + number].rdev;
1845 freeze_array(conf, 0);
1846 if (atomic_read(&repl->nr_pending)) {
1847
1848
1849
1850
1851
1852
1853 err = -EBUSY;
1854 unfreeze_array(conf);
1855 goto abort;
1856 }
1857 clear_bit(Replacement, &repl->flags);
1858 p->rdev = repl;
1859 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1860 unfreeze_array(conf);
1861 }
1862
1863 clear_bit(WantReplacement, &rdev->flags);
1864 err = md_integrity_register(mddev);
1865 }
1866abort:
1867
1868 print_conf(conf);
1869 return err;
1870}
1871
1872static void end_sync_read(struct bio *bio)
1873{
1874 struct r1bio *r1_bio = get_resync_r1bio(bio);
1875
1876 update_head_pos(r1_bio->read_disk, r1_bio);
1877
1878
1879
1880
1881
1882
1883 if (!bio->bi_status)
1884 set_bit(R1BIO_Uptodate, &r1_bio->state);
1885
1886 if (atomic_dec_and_test(&r1_bio->remaining))
1887 reschedule_retry(r1_bio);
1888}
1889
1890static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1891{
1892 sector_t sync_blocks = 0;
1893 sector_t s = r1_bio->sector;
1894 long sectors_to_go = r1_bio->sectors;
1895
1896
1897 do {
1898 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1899 s += sync_blocks;
1900 sectors_to_go -= sync_blocks;
1901 } while (sectors_to_go > 0);
1902}
1903
1904static void end_sync_write(struct bio *bio)
1905{
1906 int uptodate = !bio->bi_status;
1907 struct r1bio *r1_bio = get_resync_r1bio(bio);
1908 struct mddev *mddev = r1_bio->mddev;
1909 struct r1conf *conf = mddev->private;
1910 sector_t first_bad;
1911 int bad_sectors;
1912 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1913
1914 if (!uptodate) {
1915 abort_sync_write(mddev, r1_bio);
1916 set_bit(WriteErrorSeen, &rdev->flags);
1917 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1918 set_bit(MD_RECOVERY_NEEDED, &
1919 mddev->recovery);
1920 set_bit(R1BIO_WriteError, &r1_bio->state);
1921 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1922 &first_bad, &bad_sectors) &&
1923 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1924 r1_bio->sector,
1925 r1_bio->sectors,
1926 &first_bad, &bad_sectors)
1927 )
1928 set_bit(R1BIO_MadeGood, &r1_bio->state);
1929
1930 if (atomic_dec_and_test(&r1_bio->remaining)) {
1931 int s = r1_bio->sectors;
1932 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1933 test_bit(R1BIO_WriteError, &r1_bio->state))
1934 reschedule_retry(r1_bio);
1935 else {
1936 put_buf(r1_bio);
1937 md_done_sync(mddev, s, uptodate);
1938 }
1939 }
1940}
1941
1942static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1943 int sectors, struct page *page, int rw)
1944{
1945 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1946
1947 return 1;
1948 if (rw == WRITE) {
1949 set_bit(WriteErrorSeen, &rdev->flags);
1950 if (!test_and_set_bit(WantReplacement,
1951 &rdev->flags))
1952 set_bit(MD_RECOVERY_NEEDED, &
1953 rdev->mddev->recovery);
1954 }
1955
1956 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1957 md_error(rdev->mddev, rdev);
1958 return 0;
1959}
1960
1961static int fix_sync_read_error(struct r1bio *r1_bio)
1962{
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974 struct mddev *mddev = r1_bio->mddev;
1975 struct r1conf *conf = mddev->private;
1976 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1977 struct page **pages = get_resync_pages(bio)->pages;
1978 sector_t sect = r1_bio->sector;
1979 int sectors = r1_bio->sectors;
1980 int idx = 0;
1981 struct md_rdev *rdev;
1982
1983 rdev = conf->mirrors[r1_bio->read_disk].rdev;
1984 if (test_bit(FailFast, &rdev->flags)) {
1985
1986
1987 md_error(mddev, rdev);
1988 if (test_bit(Faulty, &rdev->flags))
1989
1990
1991
1992 bio->bi_end_io = end_sync_write;
1993 }
1994
1995 while(sectors) {
1996 int s = sectors;
1997 int d = r1_bio->read_disk;
1998 int success = 0;
1999 int start;
2000
2001 if (s > (PAGE_SIZE>>9))
2002 s = PAGE_SIZE >> 9;
2003 do {
2004 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
2005
2006
2007
2008
2009 rdev = conf->mirrors[d].rdev;
2010 if (sync_page_io(rdev, sect, s<<9,
2011 pages[idx],
2012 REQ_OP_READ, 0, false)) {
2013 success = 1;
2014 break;
2015 }
2016 }
2017 d++;
2018 if (d == conf->raid_disks * 2)
2019 d = 0;
2020 } while (!success && d != r1_bio->read_disk);
2021
2022 if (!success) {
2023 char b[BDEVNAME_SIZE];
2024 int abort = 0;
2025
2026
2027
2028
2029
2030 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
2031 mdname(mddev), bio_devname(bio, b),
2032 (unsigned long long)r1_bio->sector);
2033 for (d = 0; d < conf->raid_disks * 2; d++) {
2034 rdev = conf->mirrors[d].rdev;
2035 if (!rdev || test_bit(Faulty, &rdev->flags))
2036 continue;
2037 if (!rdev_set_badblocks(rdev, sect, s, 0))
2038 abort = 1;
2039 }
2040 if (abort) {
2041 conf->recovery_disabled =
2042 mddev->recovery_disabled;
2043 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2044 md_done_sync(mddev, r1_bio->sectors, 0);
2045 put_buf(r1_bio);
2046 return 0;
2047 }
2048
2049 sectors -= s;
2050 sect += s;
2051 idx++;
2052 continue;
2053 }
2054
2055 start = d;
2056
2057 while (d != r1_bio->read_disk) {
2058 if (d == 0)
2059 d = conf->raid_disks * 2;
2060 d--;
2061 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2062 continue;
2063 rdev = conf->mirrors[d].rdev;
2064 if (r1_sync_page_io(rdev, sect, s,
2065 pages[idx],
2066 WRITE) == 0) {
2067 r1_bio->bios[d]->bi_end_io = NULL;
2068 rdev_dec_pending(rdev, mddev);
2069 }
2070 }
2071 d = start;
2072 while (d != r1_bio->read_disk) {
2073 if (d == 0)
2074 d = conf->raid_disks * 2;
2075 d--;
2076 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2077 continue;
2078 rdev = conf->mirrors[d].rdev;
2079 if (r1_sync_page_io(rdev, sect, s,
2080 pages[idx],
2081 READ) != 0)
2082 atomic_add(s, &rdev->corrected_errors);
2083 }
2084 sectors -= s;
2085 sect += s;
2086 idx ++;
2087 }
2088 set_bit(R1BIO_Uptodate, &r1_bio->state);
2089 bio->bi_status = 0;
2090 return 1;
2091}
2092
2093static void process_checks(struct r1bio *r1_bio)
2094{
2095
2096
2097
2098
2099
2100
2101
2102 struct mddev *mddev = r1_bio->mddev;
2103 struct r1conf *conf = mddev->private;
2104 int primary;
2105 int i;
2106 int vcnt;
2107
2108
2109 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2110 for (i = 0; i < conf->raid_disks * 2; i++) {
2111 blk_status_t status;
2112 struct bio *b = r1_bio->bios[i];
2113 struct resync_pages *rp = get_resync_pages(b);
2114 if (b->bi_end_io != end_sync_read)
2115 continue;
2116
2117 status = b->bi_status;
2118 bio_reset(b);
2119 b->bi_status = status;
2120 b->bi_iter.bi_sector = r1_bio->sector +
2121 conf->mirrors[i].rdev->data_offset;
2122 bio_set_dev(b, conf->mirrors[i].rdev->bdev);
2123 b->bi_end_io = end_sync_read;
2124 rp->raid_bio = r1_bio;
2125 b->bi_private = rp;
2126
2127
2128 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2129 }
2130 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2131 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2132 !r1_bio->bios[primary]->bi_status) {
2133 r1_bio->bios[primary]->bi_end_io = NULL;
2134 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2135 break;
2136 }
2137 r1_bio->read_disk = primary;
2138 for (i = 0; i < conf->raid_disks * 2; i++) {
2139 int j = 0;
2140 struct bio *pbio = r1_bio->bios[primary];
2141 struct bio *sbio = r1_bio->bios[i];
2142 blk_status_t status = sbio->bi_status;
2143 struct page **ppages = get_resync_pages(pbio)->pages;
2144 struct page **spages = get_resync_pages(sbio)->pages;
2145 struct bio_vec *bi;
2146 int page_len[RESYNC_PAGES] = { 0 };
2147 struct bvec_iter_all iter_all;
2148
2149 if (sbio->bi_end_io != end_sync_read)
2150 continue;
2151
2152 sbio->bi_status = 0;
2153
2154 bio_for_each_segment_all(bi, sbio, iter_all)
2155 page_len[j++] = bi->bv_len;
2156
2157 if (!status) {
2158 for (j = vcnt; j-- ; ) {
2159 if (memcmp(page_address(ppages[j]),
2160 page_address(spages[j]),
2161 page_len[j]))
2162 break;
2163 }
2164 } else
2165 j = 0;
2166 if (j >= 0)
2167 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2168 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2169 && !status)) {
2170
2171 sbio->bi_end_io = NULL;
2172 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2173 continue;
2174 }
2175
2176 bio_copy_data(sbio, pbio);
2177 }
2178}
2179
2180static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2181{
2182 struct r1conf *conf = mddev->private;
2183 int i;
2184 int disks = conf->raid_disks * 2;
2185 struct bio *wbio;
2186
2187 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2188
2189 if (!fix_sync_read_error(r1_bio))
2190 return;
2191
2192 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2193 process_checks(r1_bio);
2194
2195
2196
2197
2198 atomic_set(&r1_bio->remaining, 1);
2199 for (i = 0; i < disks ; i++) {
2200 wbio = r1_bio->bios[i];
2201 if (wbio->bi_end_io == NULL ||
2202 (wbio->bi_end_io == end_sync_read &&
2203 (i == r1_bio->read_disk ||
2204 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2205 continue;
2206 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2207 abort_sync_write(mddev, r1_bio);
2208 continue;
2209 }
2210
2211 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2212 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2213 wbio->bi_opf |= MD_FAILFAST;
2214
2215 wbio->bi_end_io = end_sync_write;
2216 atomic_inc(&r1_bio->remaining);
2217 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2218
2219 generic_make_request(wbio);
2220 }
2221
2222 if (atomic_dec_and_test(&r1_bio->remaining)) {
2223
2224 int s = r1_bio->sectors;
2225 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2226 test_bit(R1BIO_WriteError, &r1_bio->state))
2227 reschedule_retry(r1_bio);
2228 else {
2229 put_buf(r1_bio);
2230 md_done_sync(mddev, s, 1);
2231 }
2232 }
2233}
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243static void fix_read_error(struct r1conf *conf, int read_disk,
2244 sector_t sect, int sectors)
2245{
2246 struct mddev *mddev = conf->mddev;
2247 while(sectors) {
2248 int s = sectors;
2249 int d = read_disk;
2250 int success = 0;
2251 int start;
2252 struct md_rdev *rdev;
2253
2254 if (s > (PAGE_SIZE>>9))
2255 s = PAGE_SIZE >> 9;
2256
2257 do {
2258 sector_t first_bad;
2259 int bad_sectors;
2260
2261 rcu_read_lock();
2262 rdev = rcu_dereference(conf->mirrors[d].rdev);
2263 if (rdev &&
2264 (test_bit(In_sync, &rdev->flags) ||
2265 (!test_bit(Faulty, &rdev->flags) &&
2266 rdev->recovery_offset >= sect + s)) &&
2267 is_badblock(rdev, sect, s,
2268 &first_bad, &bad_sectors) == 0) {
2269 atomic_inc(&rdev->nr_pending);
2270 rcu_read_unlock();
2271 if (sync_page_io(rdev, sect, s<<9,
2272 conf->tmppage, REQ_OP_READ, 0, false))
2273 success = 1;
2274 rdev_dec_pending(rdev, mddev);
2275 if (success)
2276 break;
2277 } else
2278 rcu_read_unlock();
2279 d++;
2280 if (d == conf->raid_disks * 2)
2281 d = 0;
2282 } while (!success && d != read_disk);
2283
2284 if (!success) {
2285
2286 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2287 if (!rdev_set_badblocks(rdev, sect, s, 0))
2288 md_error(mddev, rdev);
2289 break;
2290 }
2291
2292 start = d;
2293 while (d != read_disk) {
2294 if (d==0)
2295 d = conf->raid_disks * 2;
2296 d--;
2297 rcu_read_lock();
2298 rdev = rcu_dereference(conf->mirrors[d].rdev);
2299 if (rdev &&
2300 !test_bit(Faulty, &rdev->flags)) {
2301 atomic_inc(&rdev->nr_pending);
2302 rcu_read_unlock();
2303 r1_sync_page_io(rdev, sect, s,
2304 conf->tmppage, WRITE);
2305 rdev_dec_pending(rdev, mddev);
2306 } else
2307 rcu_read_unlock();
2308 }
2309 d = start;
2310 while (d != read_disk) {
2311 char b[BDEVNAME_SIZE];
2312 if (d==0)
2313 d = conf->raid_disks * 2;
2314 d--;
2315 rcu_read_lock();
2316 rdev = rcu_dereference(conf->mirrors[d].rdev);
2317 if (rdev &&
2318 !test_bit(Faulty, &rdev->flags)) {
2319 atomic_inc(&rdev->nr_pending);
2320 rcu_read_unlock();
2321 if (r1_sync_page_io(rdev, sect, s,
2322 conf->tmppage, READ)) {
2323 atomic_add(s, &rdev->corrected_errors);
2324 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2325 mdname(mddev), s,
2326 (unsigned long long)(sect +
2327 rdev->data_offset),
2328 bdevname(rdev->bdev, b));
2329 }
2330 rdev_dec_pending(rdev, mddev);
2331 } else
2332 rcu_read_unlock();
2333 }
2334 sectors -= s;
2335 sect += s;
2336 }
2337}
2338
2339static int narrow_write_error(struct r1bio *r1_bio, int i)
2340{
2341 struct mddev *mddev = r1_bio->mddev;
2342 struct r1conf *conf = mddev->private;
2343 struct md_rdev *rdev = conf->mirrors[i].rdev;
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356 int block_sectors;
2357 sector_t sector;
2358 int sectors;
2359 int sect_to_write = r1_bio->sectors;
2360 int ok = 1;
2361
2362 if (rdev->badblocks.shift < 0)
2363 return 0;
2364
2365 block_sectors = roundup(1 << rdev->badblocks.shift,
2366 bdev_logical_block_size(rdev->bdev) >> 9);
2367 sector = r1_bio->sector;
2368 sectors = ((sector + block_sectors)
2369 & ~(sector_t)(block_sectors - 1))
2370 - sector;
2371
2372 while (sect_to_write) {
2373 struct bio *wbio;
2374 if (sectors > sect_to_write)
2375 sectors = sect_to_write;
2376
2377
2378 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2379 wbio = bio_clone_fast(r1_bio->behind_master_bio,
2380 GFP_NOIO,
2381 &mddev->bio_set);
2382 } else {
2383 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2384 &mddev->bio_set);
2385 }
2386
2387 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2388 wbio->bi_iter.bi_sector = r1_bio->sector;
2389 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2390
2391 bio_trim(wbio, sector - r1_bio->sector, sectors);
2392 wbio->bi_iter.bi_sector += rdev->data_offset;
2393 bio_set_dev(wbio, rdev->bdev);
2394
2395 if (submit_bio_wait(wbio) < 0)
2396
2397 ok = rdev_set_badblocks(rdev, sector,
2398 sectors, 0)
2399 && ok;
2400
2401 bio_put(wbio);
2402 sect_to_write -= sectors;
2403 sector += sectors;
2404 sectors = block_sectors;
2405 }
2406 return ok;
2407}
2408
2409static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2410{
2411 int m;
2412 int s = r1_bio->sectors;
2413 for (m = 0; m < conf->raid_disks * 2 ; m++) {
2414 struct md_rdev *rdev = conf->mirrors[m].rdev;
2415 struct bio *bio = r1_bio->bios[m];
2416 if (bio->bi_end_io == NULL)
2417 continue;
2418 if (!bio->bi_status &&
2419 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2420 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2421 }
2422 if (bio->bi_status &&
2423 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2424 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2425 md_error(conf->mddev, rdev);
2426 }
2427 }
2428 put_buf(r1_bio);
2429 md_done_sync(conf->mddev, s, 1);
2430}
2431
2432static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2433{
2434 int m, idx;
2435 bool fail = false;
2436
2437 for (m = 0; m < conf->raid_disks * 2 ; m++)
2438 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2439 struct md_rdev *rdev = conf->mirrors[m].rdev;
2440 rdev_clear_badblocks(rdev,
2441 r1_bio->sector,
2442 r1_bio->sectors, 0);
2443 rdev_dec_pending(rdev, conf->mddev);
2444 } else if (r1_bio->bios[m] != NULL) {
2445
2446
2447
2448
2449 fail = true;
2450 if (!narrow_write_error(r1_bio, m)) {
2451 md_error(conf->mddev,
2452 conf->mirrors[m].rdev);
2453
2454 set_bit(R1BIO_Degraded, &r1_bio->state);
2455 }
2456 rdev_dec_pending(conf->mirrors[m].rdev,
2457 conf->mddev);
2458 }
2459 if (fail) {
2460 spin_lock_irq(&conf->device_lock);
2461 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2462 idx = sector_to_idx(r1_bio->sector);
2463 atomic_inc(&conf->nr_queued[idx]);
2464 spin_unlock_irq(&conf->device_lock);
2465
2466
2467
2468
2469 wake_up(&conf->wait_barrier);
2470 md_wakeup_thread(conf->mddev->thread);
2471 } else {
2472 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2473 close_write(r1_bio);
2474 raid_end_bio_io(r1_bio);
2475 }
2476}
2477
2478static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2479{
2480 struct mddev *mddev = conf->mddev;
2481 struct bio *bio;
2482 struct md_rdev *rdev;
2483
2484 clear_bit(R1BIO_ReadError, &r1_bio->state);
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494 bio = r1_bio->bios[r1_bio->read_disk];
2495 bio_put(bio);
2496 r1_bio->bios[r1_bio->read_disk] = NULL;
2497
2498 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2499 if (mddev->ro == 0
2500 && !test_bit(FailFast, &rdev->flags)) {
2501 freeze_array(conf, 1);
2502 fix_read_error(conf, r1_bio->read_disk,
2503 r1_bio->sector, r1_bio->sectors);
2504 unfreeze_array(conf);
2505 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2506 md_error(mddev, rdev);
2507 } else {
2508 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2509 }
2510
2511 rdev_dec_pending(rdev, conf->mddev);
2512 allow_barrier(conf, r1_bio->sector);
2513 bio = r1_bio->master_bio;
2514
2515
2516 r1_bio->state = 0;
2517 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2518}
2519
2520static void raid1d(struct md_thread *thread)
2521{
2522 struct mddev *mddev = thread->mddev;
2523 struct r1bio *r1_bio;
2524 unsigned long flags;
2525 struct r1conf *conf = mddev->private;
2526 struct list_head *head = &conf->retry_list;
2527 struct blk_plug plug;
2528 int idx;
2529
2530 md_check_recovery(mddev);
2531
2532 if (!list_empty_careful(&conf->bio_end_io_list) &&
2533 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2534 LIST_HEAD(tmp);
2535 spin_lock_irqsave(&conf->device_lock, flags);
2536 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2537 list_splice_init(&conf->bio_end_io_list, &tmp);
2538 spin_unlock_irqrestore(&conf->device_lock, flags);
2539 while (!list_empty(&tmp)) {
2540 r1_bio = list_first_entry(&tmp, struct r1bio,
2541 retry_list);
2542 list_del(&r1_bio->retry_list);
2543 idx = sector_to_idx(r1_bio->sector);
2544 atomic_dec(&conf->nr_queued[idx]);
2545 if (mddev->degraded)
2546 set_bit(R1BIO_Degraded, &r1_bio->state);
2547 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2548 close_write(r1_bio);
2549 raid_end_bio_io(r1_bio);
2550 }
2551 }
2552
2553 blk_start_plug(&plug);
2554 for (;;) {
2555
2556 flush_pending_writes(conf);
2557
2558 spin_lock_irqsave(&conf->device_lock, flags);
2559 if (list_empty(head)) {
2560 spin_unlock_irqrestore(&conf->device_lock, flags);
2561 break;
2562 }
2563 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2564 list_del(head->prev);
2565 idx = sector_to_idx(r1_bio->sector);
2566 atomic_dec(&conf->nr_queued[idx]);
2567 spin_unlock_irqrestore(&conf->device_lock, flags);
2568
2569 mddev = r1_bio->mddev;
2570 conf = mddev->private;
2571 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2572 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2573 test_bit(R1BIO_WriteError, &r1_bio->state))
2574 handle_sync_write_finished(conf, r1_bio);
2575 else
2576 sync_request_write(mddev, r1_bio);
2577 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2578 test_bit(R1BIO_WriteError, &r1_bio->state))
2579 handle_write_finished(conf, r1_bio);
2580 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2581 handle_read_error(conf, r1_bio);
2582 else
2583 WARN_ON_ONCE(1);
2584
2585 cond_resched();
2586 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2587 md_check_recovery(mddev);
2588 }
2589 blk_finish_plug(&plug);
2590}
2591
2592static int init_resync(struct r1conf *conf)
2593{
2594 int buffs;
2595
2596 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2597 BUG_ON(mempool_initialized(&conf->r1buf_pool));
2598
2599 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2600 r1buf_pool_free, conf->poolinfo);
2601}
2602
2603static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2604{
2605 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2606 struct resync_pages *rps;
2607 struct bio *bio;
2608 int i;
2609
2610 for (i = conf->poolinfo->raid_disks; i--; ) {
2611 bio = r1bio->bios[i];
2612 rps = bio->bi_private;
2613 bio_reset(bio);
2614 bio->bi_private = rps;
2615 }
2616 r1bio->master_bio = NULL;
2617 return r1bio;
2618}
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2631 int *skipped)
2632{
2633 struct r1conf *conf = mddev->private;
2634 struct r1bio *r1_bio;
2635 struct bio *bio;
2636 sector_t max_sector, nr_sectors;
2637 int disk = -1;
2638 int i;
2639 int wonly = -1;
2640 int write_targets = 0, read_targets = 0;
2641 sector_t sync_blocks;
2642 int still_degraded = 0;
2643 int good_sectors = RESYNC_SECTORS;
2644 int min_bad = 0;
2645 int idx = sector_to_idx(sector_nr);
2646 int page_idx = 0;
2647
2648 if (!mempool_initialized(&conf->r1buf_pool))
2649 if (init_resync(conf))
2650 return 0;
2651
2652 max_sector = mddev->dev_sectors;
2653 if (sector_nr >= max_sector) {
2654
2655
2656
2657
2658
2659 if (mddev->curr_resync < max_sector)
2660 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2661 &sync_blocks, 1);
2662 else
2663 conf->fullsync = 0;
2664
2665 md_bitmap_close_sync(mddev->bitmap);
2666 close_sync(conf);
2667
2668 if (mddev_is_clustered(mddev)) {
2669 conf->cluster_sync_low = 0;
2670 conf->cluster_sync_high = 0;
2671 }
2672 return 0;
2673 }
2674
2675 if (mddev->bitmap == NULL &&
2676 mddev->recovery_cp == MaxSector &&
2677 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2678 conf->fullsync == 0) {
2679 *skipped = 1;
2680 return max_sector - sector_nr;
2681 }
2682
2683
2684
2685 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2686 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2687
2688 *skipped = 1;
2689 return sync_blocks;
2690 }
2691
2692
2693
2694
2695
2696 if (atomic_read(&conf->nr_waiting[idx]))
2697 schedule_timeout_uninterruptible(1);
2698
2699
2700
2701
2702
2703 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2704 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2705
2706
2707 if (raise_barrier(conf, sector_nr))
2708 return 0;
2709
2710 r1_bio = raid1_alloc_init_r1buf(conf);
2711
2712 rcu_read_lock();
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722 r1_bio->mddev = mddev;
2723 r1_bio->sector = sector_nr;
2724 r1_bio->state = 0;
2725 set_bit(R1BIO_IsSync, &r1_bio->state);
2726
2727 good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2728
2729 for (i = 0; i < conf->raid_disks * 2; i++) {
2730 struct md_rdev *rdev;
2731 bio = r1_bio->bios[i];
2732
2733 rdev = rcu_dereference(conf->mirrors[i].rdev);
2734 if (rdev == NULL ||
2735 test_bit(Faulty, &rdev->flags)) {
2736 if (i < conf->raid_disks)
2737 still_degraded = 1;
2738 } else if (!test_bit(In_sync, &rdev->flags)) {
2739 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2740 bio->bi_end_io = end_sync_write;
2741 write_targets ++;
2742 } else {
2743
2744 sector_t first_bad = MaxSector;
2745 int bad_sectors;
2746
2747 if (is_badblock(rdev, sector_nr, good_sectors,
2748 &first_bad, &bad_sectors)) {
2749 if (first_bad > sector_nr)
2750 good_sectors = first_bad - sector_nr;
2751 else {
2752 bad_sectors -= (sector_nr - first_bad);
2753 if (min_bad == 0 ||
2754 min_bad > bad_sectors)
2755 min_bad = bad_sectors;
2756 }
2757 }
2758 if (sector_nr < first_bad) {
2759 if (test_bit(WriteMostly, &rdev->flags)) {
2760 if (wonly < 0)
2761 wonly = i;
2762 } else {
2763 if (disk < 0)
2764 disk = i;
2765 }
2766 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2767 bio->bi_end_io = end_sync_read;
2768 read_targets++;
2769 } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2770 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2771 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2772
2773
2774
2775
2776
2777
2778 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2779 bio->bi_end_io = end_sync_write;
2780 write_targets++;
2781 }
2782 }
2783 if (bio->bi_end_io) {
2784 atomic_inc(&rdev->nr_pending);
2785 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2786 bio_set_dev(bio, rdev->bdev);
2787 if (test_bit(FailFast, &rdev->flags))
2788 bio->bi_opf |= MD_FAILFAST;
2789 }
2790 }
2791 rcu_read_unlock();
2792 if (disk < 0)
2793 disk = wonly;
2794 r1_bio->read_disk = disk;
2795
2796 if (read_targets == 0 && min_bad > 0) {
2797
2798
2799
2800 int ok = 1;
2801 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2802 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2803 struct md_rdev *rdev = conf->mirrors[i].rdev;
2804 ok = rdev_set_badblocks(rdev, sector_nr,
2805 min_bad, 0
2806 ) && ok;
2807 }
2808 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2809 *skipped = 1;
2810 put_buf(r1_bio);
2811
2812 if (!ok) {
2813
2814
2815
2816
2817
2818 conf->recovery_disabled = mddev->recovery_disabled;
2819 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2820 return 0;
2821 } else
2822 return min_bad;
2823
2824 }
2825 if (min_bad > 0 && min_bad < good_sectors) {
2826
2827
2828 good_sectors = min_bad;
2829 }
2830
2831 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2832
2833 write_targets += read_targets-1;
2834
2835 if (write_targets == 0 || read_targets == 0) {
2836
2837
2838
2839 sector_t rv;
2840 if (min_bad > 0)
2841 max_sector = sector_nr + min_bad;
2842 rv = max_sector - sector_nr;
2843 *skipped = 1;
2844 put_buf(r1_bio);
2845 return rv;
2846 }
2847
2848 if (max_sector > mddev->resync_max)
2849 max_sector = mddev->resync_max;
2850 if (max_sector > sector_nr + good_sectors)
2851 max_sector = sector_nr + good_sectors;
2852 nr_sectors = 0;
2853 sync_blocks = 0;
2854 do {
2855 struct page *page;
2856 int len = PAGE_SIZE;
2857 if (sector_nr + (len>>9) > max_sector)
2858 len = (max_sector - sector_nr) << 9;
2859 if (len == 0)
2860 break;
2861 if (sync_blocks == 0) {
2862 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2863 &sync_blocks, still_degraded) &&
2864 !conf->fullsync &&
2865 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2866 break;
2867 if ((len >> 9) > sync_blocks)
2868 len = sync_blocks<<9;
2869 }
2870
2871 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2872 struct resync_pages *rp;
2873
2874 bio = r1_bio->bios[i];
2875 rp = get_resync_pages(bio);
2876 if (bio->bi_end_io) {
2877 page = resync_fetch_page(rp, page_idx);
2878
2879
2880
2881
2882
2883 bio_add_page(bio, page, len, 0);
2884 }
2885 }
2886 nr_sectors += len>>9;
2887 sector_nr += len>>9;
2888 sync_blocks -= (len>>9);
2889 } while (++page_idx < RESYNC_PAGES);
2890
2891 r1_bio->sectors = nr_sectors;
2892
2893 if (mddev_is_clustered(mddev) &&
2894 conf->cluster_sync_high < sector_nr + nr_sectors) {
2895 conf->cluster_sync_low = mddev->curr_resync_completed;
2896 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2897
2898 md_cluster_ops->resync_info_update(mddev,
2899 conf->cluster_sync_low,
2900 conf->cluster_sync_high);
2901 }
2902
2903
2904
2905
2906 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2907 atomic_set(&r1_bio->remaining, read_targets);
2908 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2909 bio = r1_bio->bios[i];
2910 if (bio->bi_end_io == end_sync_read) {
2911 read_targets--;
2912 md_sync_acct_bio(bio, nr_sectors);
2913 if (read_targets == 1)
2914 bio->bi_opf &= ~MD_FAILFAST;
2915 generic_make_request(bio);
2916 }
2917 }
2918 } else {
2919 atomic_set(&r1_bio->remaining, 1);
2920 bio = r1_bio->bios[r1_bio->read_disk];
2921 md_sync_acct_bio(bio, nr_sectors);
2922 if (read_targets == 1)
2923 bio->bi_opf &= ~MD_FAILFAST;
2924 generic_make_request(bio);
2925 }
2926 return nr_sectors;
2927}
2928
2929static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2930{
2931 if (sectors)
2932 return sectors;
2933
2934 return mddev->dev_sectors;
2935}
2936
2937static struct r1conf *setup_conf(struct mddev *mddev)
2938{
2939 struct r1conf *conf;
2940 int i;
2941 struct raid1_info *disk;
2942 struct md_rdev *rdev;
2943 int err = -ENOMEM;
2944
2945 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2946 if (!conf)
2947 goto abort;
2948
2949 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
2950 sizeof(atomic_t), GFP_KERNEL);
2951 if (!conf->nr_pending)
2952 goto abort;
2953
2954 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
2955 sizeof(atomic_t), GFP_KERNEL);
2956 if (!conf->nr_waiting)
2957 goto abort;
2958
2959 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
2960 sizeof(atomic_t), GFP_KERNEL);
2961 if (!conf->nr_queued)
2962 goto abort;
2963
2964 conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
2965 sizeof(atomic_t), GFP_KERNEL);
2966 if (!conf->barrier)
2967 goto abort;
2968
2969 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
2970 mddev->raid_disks, 2),
2971 GFP_KERNEL);
2972 if (!conf->mirrors)
2973 goto abort;
2974
2975 conf->tmppage = alloc_page(GFP_KERNEL);
2976 if (!conf->tmppage)
2977 goto abort;
2978
2979 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2980 if (!conf->poolinfo)
2981 goto abort;
2982 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2983 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
2984 rbio_pool_free, conf->poolinfo);
2985 if (err)
2986 goto abort;
2987
2988 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
2989 if (err)
2990 goto abort;
2991
2992 conf->poolinfo->mddev = mddev;
2993
2994 err = -EINVAL;
2995 spin_lock_init(&conf->device_lock);
2996 rdev_for_each(rdev, mddev) {
2997 int disk_idx = rdev->raid_disk;
2998 if (disk_idx >= mddev->raid_disks
2999 || disk_idx < 0)
3000 continue;
3001 if (test_bit(Replacement, &rdev->flags))
3002 disk = conf->mirrors + mddev->raid_disks + disk_idx;
3003 else
3004 disk = conf->mirrors + disk_idx;
3005
3006 if (disk->rdev)
3007 goto abort;
3008 disk->rdev = rdev;
3009 disk->head_position = 0;
3010 disk->seq_start = MaxSector;
3011 }
3012 conf->raid_disks = mddev->raid_disks;
3013 conf->mddev = mddev;
3014 INIT_LIST_HEAD(&conf->retry_list);
3015 INIT_LIST_HEAD(&conf->bio_end_io_list);
3016
3017 spin_lock_init(&conf->resync_lock);
3018 init_waitqueue_head(&conf->wait_barrier);
3019
3020 bio_list_init(&conf->pending_bio_list);
3021 conf->pending_count = 0;
3022 conf->recovery_disabled = mddev->recovery_disabled - 1;
3023
3024 err = -EIO;
3025 for (i = 0; i < conf->raid_disks * 2; i++) {
3026
3027 disk = conf->mirrors + i;
3028
3029 if (i < conf->raid_disks &&
3030 disk[conf->raid_disks].rdev) {
3031
3032 if (!disk->rdev) {
3033
3034
3035
3036 disk->rdev =
3037 disk[conf->raid_disks].rdev;
3038 disk[conf->raid_disks].rdev = NULL;
3039 } else if (!test_bit(In_sync, &disk->rdev->flags))
3040
3041 goto abort;
3042 }
3043
3044 if (!disk->rdev ||
3045 !test_bit(In_sync, &disk->rdev->flags)) {
3046 disk->head_position = 0;
3047 if (disk->rdev &&
3048 (disk->rdev->saved_raid_disk < 0))
3049 conf->fullsync = 1;
3050 }
3051 }
3052
3053 err = -ENOMEM;
3054 conf->thread = md_register_thread(raid1d, mddev, "raid1");
3055 if (!conf->thread)
3056 goto abort;
3057
3058 return conf;
3059
3060 abort:
3061 if (conf) {
3062 mempool_exit(&conf->r1bio_pool);
3063 kfree(conf->mirrors);
3064 safe_put_page(conf->tmppage);
3065 kfree(conf->poolinfo);
3066 kfree(conf->nr_pending);
3067 kfree(conf->nr_waiting);
3068 kfree(conf->nr_queued);
3069 kfree(conf->barrier);
3070 bioset_exit(&conf->bio_split);
3071 kfree(conf);
3072 }
3073 return ERR_PTR(err);
3074}
3075
3076static void raid1_free(struct mddev *mddev, void *priv);
3077static int raid1_run(struct mddev *mddev)
3078{
3079 struct r1conf *conf;
3080 int i;
3081 struct md_rdev *rdev;
3082 int ret;
3083 bool discard_supported = false;
3084
3085 if (mddev->level != 1) {
3086 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3087 mdname(mddev), mddev->level);
3088 return -EIO;
3089 }
3090 if (mddev->reshape_position != MaxSector) {
3091 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3092 mdname(mddev));
3093 return -EIO;
3094 }
3095 if (mddev_init_writes_pending(mddev) < 0)
3096 return -ENOMEM;
3097
3098
3099
3100
3101
3102 if (mddev->private == NULL)
3103 conf = setup_conf(mddev);
3104 else
3105 conf = mddev->private;
3106
3107 if (IS_ERR(conf))
3108 return PTR_ERR(conf);
3109
3110 if (mddev->queue) {
3111 blk_queue_max_write_same_sectors(mddev->queue, 0);
3112 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3113 }
3114
3115 rdev_for_each(rdev, mddev) {
3116 if (!mddev->gendisk)
3117 continue;
3118 disk_stack_limits(mddev->gendisk, rdev->bdev,
3119 rdev->data_offset << 9);
3120 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3121 discard_supported = true;
3122 }
3123
3124 mddev->degraded = 0;
3125 for (i = 0; i < conf->raid_disks; i++)
3126 if (conf->mirrors[i].rdev == NULL ||
3127 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3128 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3129 mddev->degraded++;
3130
3131 if (conf->raid_disks - mddev->degraded == 1)
3132 mddev->recovery_cp = MaxSector;
3133
3134 if (mddev->recovery_cp != MaxSector)
3135 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3136 mdname(mddev));
3137 pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3138 mdname(mddev), mddev->raid_disks - mddev->degraded,
3139 mddev->raid_disks);
3140
3141
3142
3143
3144 mddev->thread = conf->thread;
3145 conf->thread = NULL;
3146 mddev->private = conf;
3147 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3148
3149 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3150
3151 if (mddev->queue) {
3152 if (discard_supported)
3153 blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3154 mddev->queue);
3155 else
3156 blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3157 mddev->queue);
3158 }
3159
3160 ret = md_integrity_register(mddev);
3161 if (ret) {
3162 md_unregister_thread(&mddev->thread);
3163 raid1_free(mddev, conf);
3164 }
3165 return ret;
3166}
3167
3168static void raid1_free(struct mddev *mddev, void *priv)
3169{
3170 struct r1conf *conf = priv;
3171
3172 mempool_exit(&conf->r1bio_pool);
3173 kfree(conf->mirrors);
3174 safe_put_page(conf->tmppage);
3175 kfree(conf->poolinfo);
3176 kfree(conf->nr_pending);
3177 kfree(conf->nr_waiting);
3178 kfree(conf->nr_queued);
3179 kfree(conf->barrier);
3180 bioset_exit(&conf->bio_split);
3181 kfree(conf);
3182}
3183
3184static int raid1_resize(struct mddev *mddev, sector_t sectors)
3185{
3186
3187
3188
3189
3190
3191
3192
3193 sector_t newsize = raid1_size(mddev, sectors, 0);
3194 if (mddev->external_size &&
3195 mddev->array_sectors > newsize)
3196 return -EINVAL;
3197 if (mddev->bitmap) {
3198 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
3199 if (ret)
3200 return ret;
3201 }
3202 md_set_array_sectors(mddev, newsize);
3203 if (sectors > mddev->dev_sectors &&
3204 mddev->recovery_cp > mddev->dev_sectors) {
3205 mddev->recovery_cp = mddev->dev_sectors;
3206 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3207 }
3208 mddev->dev_sectors = sectors;
3209 mddev->resync_max_sectors = sectors;
3210 return 0;
3211}
3212
3213static int raid1_reshape(struct mddev *mddev)
3214{
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226 mempool_t newpool, oldpool;
3227 struct pool_info *newpoolinfo;
3228 struct raid1_info *newmirrors;
3229 struct r1conf *conf = mddev->private;
3230 int cnt, raid_disks;
3231 unsigned long flags;
3232 int d, d2;
3233 int ret;
3234
3235 memset(&newpool, 0, sizeof(newpool));
3236 memset(&oldpool, 0, sizeof(oldpool));
3237
3238
3239 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3240 mddev->layout != mddev->new_layout ||
3241 mddev->level != mddev->new_level) {
3242 mddev->new_chunk_sectors = mddev->chunk_sectors;
3243 mddev->new_layout = mddev->layout;
3244 mddev->new_level = mddev->level;
3245 return -EINVAL;
3246 }
3247
3248 if (!mddev_is_clustered(mddev))
3249 md_allow_write(mddev);
3250
3251 raid_disks = mddev->raid_disks + mddev->delta_disks;
3252
3253 if (raid_disks < conf->raid_disks) {
3254 cnt=0;
3255 for (d= 0; d < conf->raid_disks; d++)
3256 if (conf->mirrors[d].rdev)
3257 cnt++;
3258 if (cnt > raid_disks)
3259 return -EBUSY;
3260 }
3261
3262 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3263 if (!newpoolinfo)
3264 return -ENOMEM;
3265 newpoolinfo->mddev = mddev;
3266 newpoolinfo->raid_disks = raid_disks * 2;
3267
3268 ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
3269 rbio_pool_free, newpoolinfo);
3270 if (ret) {
3271 kfree(newpoolinfo);
3272 return ret;
3273 }
3274 newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3275 raid_disks, 2),
3276 GFP_KERNEL);
3277 if (!newmirrors) {
3278 kfree(newpoolinfo);
3279 mempool_exit(&newpool);
3280 return -ENOMEM;
3281 }
3282
3283 freeze_array(conf, 0);
3284
3285
3286 oldpool = conf->r1bio_pool;
3287 conf->r1bio_pool = newpool;
3288
3289 for (d = d2 = 0; d < conf->raid_disks; d++) {
3290 struct md_rdev *rdev = conf->mirrors[d].rdev;
3291 if (rdev && rdev->raid_disk != d2) {
3292 sysfs_unlink_rdev(mddev, rdev);
3293 rdev->raid_disk = d2;
3294 sysfs_unlink_rdev(mddev, rdev);
3295 if (sysfs_link_rdev(mddev, rdev))
3296 pr_warn("md/raid1:%s: cannot register rd%d\n",
3297 mdname(mddev), rdev->raid_disk);
3298 }
3299 if (rdev)
3300 newmirrors[d2++].rdev = rdev;
3301 }
3302 kfree(conf->mirrors);
3303 conf->mirrors = newmirrors;
3304 kfree(conf->poolinfo);
3305 conf->poolinfo = newpoolinfo;
3306
3307 spin_lock_irqsave(&conf->device_lock, flags);
3308 mddev->degraded += (raid_disks - conf->raid_disks);
3309 spin_unlock_irqrestore(&conf->device_lock, flags);
3310 conf->raid_disks = mddev->raid_disks = raid_disks;
3311 mddev->delta_disks = 0;
3312
3313 unfreeze_array(conf);
3314
3315 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3316 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3317 md_wakeup_thread(mddev->thread);
3318
3319 mempool_exit(&oldpool);
3320 return 0;
3321}
3322
3323static void raid1_quiesce(struct mddev *mddev, int quiesce)
3324{
3325 struct r1conf *conf = mddev->private;
3326
3327 if (quiesce)
3328 freeze_array(conf, 0);
3329 else
3330 unfreeze_array(conf);
3331}
3332
3333static void *raid1_takeover(struct mddev *mddev)
3334{
3335
3336
3337
3338 if (mddev->level == 5 && mddev->raid_disks == 2) {
3339 struct r1conf *conf;
3340 mddev->new_level = 1;
3341 mddev->new_layout = 0;
3342 mddev->new_chunk_sectors = 0;
3343 conf = setup_conf(mddev);
3344 if (!IS_ERR(conf)) {
3345
3346 conf->array_frozen = 1;
3347 mddev_clear_unsupported_flags(mddev,
3348 UNSUPPORTED_MDDEV_FLAGS);
3349 }
3350 return conf;
3351 }
3352 return ERR_PTR(-EINVAL);
3353}
3354
3355static struct md_personality raid1_personality =
3356{
3357 .name = "raid1",
3358 .level = 1,
3359 .owner = THIS_MODULE,
3360 .make_request = raid1_make_request,
3361 .run = raid1_run,
3362 .free = raid1_free,
3363 .status = raid1_status,
3364 .error_handler = raid1_error,
3365 .hot_add_disk = raid1_add_disk,
3366 .hot_remove_disk= raid1_remove_disk,
3367 .spare_active = raid1_spare_active,
3368 .sync_request = raid1_sync_request,
3369 .resize = raid1_resize,
3370 .size = raid1_size,
3371 .check_reshape = raid1_reshape,
3372 .quiesce = raid1_quiesce,
3373 .takeover = raid1_takeover,
3374 .congested = raid1_congested,
3375};
3376
3377static int __init raid_init(void)
3378{
3379 return register_md_personality(&raid1_personality);
3380}
3381
3382static void raid_exit(void)
3383{
3384 unregister_md_personality(&raid1_personality);
3385}
3386
3387module_init(raid_init);
3388module_exit(raid_exit);
3389MODULE_LICENSE("GPL");
3390MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3391MODULE_ALIAS("md-personality-3");
3392MODULE_ALIAS("md-raid1");
3393MODULE_ALIAS("md-level-1");
3394
3395module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
3396