1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/blkdev.h>
29#include <linux/module.h>
30#include <linux/seq_file.h>
31#include <linux/ratelimit.h>
32#include <linux/interval_tree_generic.h>
33
34#include <trace/events/block.h>
35
36#include "md.h"
37#include "raid1.h"
38#include "md-bitmap.h"
39
40#define UNSUPPORTED_MDDEV_FLAGS \
41 ((1L << MD_HAS_JOURNAL) | \
42 (1L << MD_JOURNAL_CLEAN) | \
43 (1L << MD_HAS_PPL) | \
44 (1L << MD_HAS_MULTIPLE_PPLS))
45
46static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
48
49#define raid1_log(md, fmt, args...) \
50 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
51
52#include "raid1-10.c"
53
54#define START(node) ((node)->start)
55#define LAST(node) ((node)->last)
56INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
57 START, LAST, static inline, raid1_rb);
58
59static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
60 struct serial_info *si, int idx)
61{
62 unsigned long flags;
63 int ret = 0;
64 sector_t lo = r1_bio->sector;
65 sector_t hi = lo + r1_bio->sectors;
66 struct serial_in_rdev *serial = &rdev->serial[idx];
67
68 spin_lock_irqsave(&serial->serial_lock, flags);
69
70 if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
71 ret = -EBUSY;
72 else {
73 si->start = lo;
74 si->last = hi;
75 raid1_rb_insert(si, &serial->serial_rb);
76 }
77 spin_unlock_irqrestore(&serial->serial_lock, flags);
78
79 return ret;
80}
81
82static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
83{
84 struct mddev *mddev = rdev->mddev;
85 struct serial_info *si;
86 int idx = sector_to_idx(r1_bio->sector);
87 struct serial_in_rdev *serial = &rdev->serial[idx];
88
89 if (WARN_ON(!mddev->serial_info_pool))
90 return;
91 si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
92 wait_event(serial->serial_io_wait,
93 check_and_add_serial(rdev, r1_bio, si, idx) == 0);
94}
95
96static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
97{
98 struct serial_info *si;
99 unsigned long flags;
100 int found = 0;
101 struct mddev *mddev = rdev->mddev;
102 int idx = sector_to_idx(lo);
103 struct serial_in_rdev *serial = &rdev->serial[idx];
104
105 spin_lock_irqsave(&serial->serial_lock, flags);
106 for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
107 si; si = raid1_rb_iter_next(si, lo, hi)) {
108 if (si->start == lo && si->last == hi) {
109 raid1_rb_remove(si, &serial->serial_rb);
110 mempool_free(si, mddev->serial_info_pool);
111 found = 1;
112 break;
113 }
114 }
115 if (!found)
116 WARN(1, "The write IO is not recorded for serialization\n");
117 spin_unlock_irqrestore(&serial->serial_lock, flags);
118 wake_up(&serial->serial_io_wait);
119}
120
121
122
123
124
125static inline struct r1bio *get_resync_r1bio(struct bio *bio)
126{
127 return get_resync_pages(bio)->raid_bio;
128}
129
130static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
131{
132 struct pool_info *pi = data;
133 int size = offsetof(struct r1bio, bios[pi->raid_disks]);
134
135
136 return kzalloc(size, gfp_flags);
137}
138
139#define RESYNC_DEPTH 32
140#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
141#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
142#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
143#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
144#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
145
146static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
147{
148 struct pool_info *pi = data;
149 struct r1bio *r1_bio;
150 struct bio *bio;
151 int need_pages;
152 int j;
153 struct resync_pages *rps;
154
155 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
156 if (!r1_bio)
157 return NULL;
158
159 rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
160 gfp_flags);
161 if (!rps)
162 goto out_free_r1bio;
163
164
165
166
167 for (j = pi->raid_disks ; j-- ; ) {
168 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
169 if (!bio)
170 goto out_free_bio;
171 r1_bio->bios[j] = bio;
172 }
173
174
175
176
177
178
179 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
180 need_pages = pi->raid_disks;
181 else
182 need_pages = 1;
183 for (j = 0; j < pi->raid_disks; j++) {
184 struct resync_pages *rp = &rps[j];
185
186 bio = r1_bio->bios[j];
187
188 if (j < need_pages) {
189 if (resync_alloc_pages(rp, gfp_flags))
190 goto out_free_pages;
191 } else {
192 memcpy(rp, &rps[0], sizeof(*rp));
193 resync_get_all_pages(rp);
194 }
195
196 rp->raid_bio = r1_bio;
197 bio->bi_private = rp;
198 }
199
200 r1_bio->master_bio = NULL;
201
202 return r1_bio;
203
204out_free_pages:
205 while (--j >= 0)
206 resync_free_pages(&rps[j]);
207
208out_free_bio:
209 while (++j < pi->raid_disks)
210 bio_put(r1_bio->bios[j]);
211 kfree(rps);
212
213out_free_r1bio:
214 rbio_pool_free(r1_bio, data);
215 return NULL;
216}
217
218static void r1buf_pool_free(void *__r1_bio, void *data)
219{
220 struct pool_info *pi = data;
221 int i;
222 struct r1bio *r1bio = __r1_bio;
223 struct resync_pages *rp = NULL;
224
225 for (i = pi->raid_disks; i--; ) {
226 rp = get_resync_pages(r1bio->bios[i]);
227 resync_free_pages(rp);
228 bio_put(r1bio->bios[i]);
229 }
230
231
232 kfree(rp);
233
234 rbio_pool_free(r1bio, data);
235}
236
237static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
238{
239 int i;
240
241 for (i = 0; i < conf->raid_disks * 2; i++) {
242 struct bio **bio = r1_bio->bios + i;
243 if (!BIO_SPECIAL(*bio))
244 bio_put(*bio);
245 *bio = NULL;
246 }
247}
248
249static void free_r1bio(struct r1bio *r1_bio)
250{
251 struct r1conf *conf = r1_bio->mddev->private;
252
253 put_all_bios(conf, r1_bio);
254 mempool_free(r1_bio, &conf->r1bio_pool);
255}
256
257static void put_buf(struct r1bio *r1_bio)
258{
259 struct r1conf *conf = r1_bio->mddev->private;
260 sector_t sect = r1_bio->sector;
261 int i;
262
263 for (i = 0; i < conf->raid_disks * 2; i++) {
264 struct bio *bio = r1_bio->bios[i];
265 if (bio->bi_end_io)
266 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
267 }
268
269 mempool_free(r1_bio, &conf->r1buf_pool);
270
271 lower_barrier(conf, sect);
272}
273
274static void reschedule_retry(struct r1bio *r1_bio)
275{
276 unsigned long flags;
277 struct mddev *mddev = r1_bio->mddev;
278 struct r1conf *conf = mddev->private;
279 int idx;
280
281 idx = sector_to_idx(r1_bio->sector);
282 spin_lock_irqsave(&conf->device_lock, flags);
283 list_add(&r1_bio->retry_list, &conf->retry_list);
284 atomic_inc(&conf->nr_queued[idx]);
285 spin_unlock_irqrestore(&conf->device_lock, flags);
286
287 wake_up(&conf->wait_barrier);
288 md_wakeup_thread(mddev->thread);
289}
290
291
292
293
294
295
296static void call_bio_endio(struct r1bio *r1_bio)
297{
298 struct bio *bio = r1_bio->master_bio;
299
300 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
301 bio->bi_status = BLK_STS_IOERR;
302
303 if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
304 bio_end_io_acct(bio, r1_bio->start_time);
305 bio_endio(bio);
306}
307
308static void raid_end_bio_io(struct r1bio *r1_bio)
309{
310 struct bio *bio = r1_bio->master_bio;
311 struct r1conf *conf = r1_bio->mddev->private;
312
313
314 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
315 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
316 (bio_data_dir(bio) == WRITE) ? "write" : "read",
317 (unsigned long long) bio->bi_iter.bi_sector,
318 (unsigned long long) bio_end_sector(bio) - 1);
319
320 call_bio_endio(r1_bio);
321 }
322
323
324
325
326 allow_barrier(conf, r1_bio->sector);
327
328 free_r1bio(r1_bio);
329}
330
331
332
333
334static inline void update_head_pos(int disk, struct r1bio *r1_bio)
335{
336 struct r1conf *conf = r1_bio->mddev->private;
337
338 conf->mirrors[disk].head_position =
339 r1_bio->sector + (r1_bio->sectors);
340}
341
342
343
344
345static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
346{
347 int mirror;
348 struct r1conf *conf = r1_bio->mddev->private;
349 int raid_disks = conf->raid_disks;
350
351 for (mirror = 0; mirror < raid_disks * 2; mirror++)
352 if (r1_bio->bios[mirror] == bio)
353 break;
354
355 BUG_ON(mirror == raid_disks * 2);
356 update_head_pos(mirror, r1_bio);
357
358 return mirror;
359}
360
361static void raid1_end_read_request(struct bio *bio)
362{
363 int uptodate = !bio->bi_status;
364 struct r1bio *r1_bio = bio->bi_private;
365 struct r1conf *conf = r1_bio->mddev->private;
366 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
367
368
369
370
371 update_head_pos(r1_bio->read_disk, r1_bio);
372
373 if (uptodate)
374 set_bit(R1BIO_Uptodate, &r1_bio->state);
375 else if (test_bit(FailFast, &rdev->flags) &&
376 test_bit(R1BIO_FailFast, &r1_bio->state))
377
378
379 ;
380 else {
381
382
383
384
385 unsigned long flags;
386 spin_lock_irqsave(&conf->device_lock, flags);
387 if (r1_bio->mddev->degraded == conf->raid_disks ||
388 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
389 test_bit(In_sync, &rdev->flags)))
390 uptodate = 1;
391 spin_unlock_irqrestore(&conf->device_lock, flags);
392 }
393
394 if (uptodate) {
395 raid_end_bio_io(r1_bio);
396 rdev_dec_pending(rdev, conf->mddev);
397 } else {
398
399
400
401 char b[BDEVNAME_SIZE];
402 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
403 mdname(conf->mddev),
404 bdevname(rdev->bdev, b),
405 (unsigned long long)r1_bio->sector);
406 set_bit(R1BIO_ReadError, &r1_bio->state);
407 reschedule_retry(r1_bio);
408
409 }
410}
411
412static void close_write(struct r1bio *r1_bio)
413{
414
415 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
416 bio_free_pages(r1_bio->behind_master_bio);
417 bio_put(r1_bio->behind_master_bio);
418 r1_bio->behind_master_bio = NULL;
419 }
420
421 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
422 r1_bio->sectors,
423 !test_bit(R1BIO_Degraded, &r1_bio->state),
424 test_bit(R1BIO_BehindIO, &r1_bio->state));
425 md_write_end(r1_bio->mddev);
426}
427
428static void r1_bio_write_done(struct r1bio *r1_bio)
429{
430 if (!atomic_dec_and_test(&r1_bio->remaining))
431 return;
432
433 if (test_bit(R1BIO_WriteError, &r1_bio->state))
434 reschedule_retry(r1_bio);
435 else {
436 close_write(r1_bio);
437 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
438 reschedule_retry(r1_bio);
439 else
440 raid_end_bio_io(r1_bio);
441 }
442}
443
444static void raid1_end_write_request(struct bio *bio)
445{
446 struct r1bio *r1_bio = bio->bi_private;
447 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
448 struct r1conf *conf = r1_bio->mddev->private;
449 struct bio *to_put = NULL;
450 int mirror = find_bio_disk(r1_bio, bio);
451 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
452 bool discard_error;
453 sector_t lo = r1_bio->sector;
454 sector_t hi = r1_bio->sector + r1_bio->sectors;
455
456 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
457
458
459
460
461 if (bio->bi_status && !discard_error) {
462 set_bit(WriteErrorSeen, &rdev->flags);
463 if (!test_and_set_bit(WantReplacement, &rdev->flags))
464 set_bit(MD_RECOVERY_NEEDED, &
465 conf->mddev->recovery);
466
467 if (test_bit(FailFast, &rdev->flags) &&
468 (bio->bi_opf & MD_FAILFAST) &&
469
470 !test_bit(WriteMostly, &rdev->flags)) {
471 md_error(r1_bio->mddev, rdev);
472 }
473
474
475
476
477
478 if (!test_bit(Faulty, &rdev->flags))
479 set_bit(R1BIO_WriteError, &r1_bio->state);
480 else {
481
482 set_bit(R1BIO_Degraded, &r1_bio->state);
483
484 r1_bio->bios[mirror] = NULL;
485 to_put = bio;
486 }
487 } else {
488
489
490
491
492
493
494
495
496
497
498 sector_t first_bad;
499 int bad_sectors;
500
501 r1_bio->bios[mirror] = NULL;
502 to_put = bio;
503
504
505
506
507
508
509
510
511 if (test_bit(In_sync, &rdev->flags) &&
512 !test_bit(Faulty, &rdev->flags))
513 set_bit(R1BIO_Uptodate, &r1_bio->state);
514
515
516 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
517 &first_bad, &bad_sectors) && !discard_error) {
518 r1_bio->bios[mirror] = IO_MADE_GOOD;
519 set_bit(R1BIO_MadeGood, &r1_bio->state);
520 }
521 }
522
523 if (behind) {
524 if (test_bit(CollisionCheck, &rdev->flags))
525 remove_serial(rdev, lo, hi);
526 if (test_bit(WriteMostly, &rdev->flags))
527 atomic_dec(&r1_bio->behind_remaining);
528
529
530
531
532
533
534
535
536 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
537 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
538
539 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
540 struct bio *mbio = r1_bio->master_bio;
541 pr_debug("raid1: behind end write sectors"
542 " %llu-%llu\n",
543 (unsigned long long) mbio->bi_iter.bi_sector,
544 (unsigned long long) bio_end_sector(mbio) - 1);
545 call_bio_endio(r1_bio);
546 }
547 }
548 } else if (rdev->mddev->serialize_policy)
549 remove_serial(rdev, lo, hi);
550 if (r1_bio->bios[mirror] == NULL)
551 rdev_dec_pending(rdev, conf->mddev);
552
553
554
555
556
557 r1_bio_write_done(r1_bio);
558
559 if (to_put)
560 bio_put(to_put);
561}
562
563static sector_t align_to_barrier_unit_end(sector_t start_sector,
564 sector_t sectors)
565{
566 sector_t len;
567
568 WARN_ON(sectors == 0);
569
570
571
572
573 len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
574 start_sector;
575
576 if (len > sectors)
577 len = sectors;
578
579 return len;
580}
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
597{
598 const sector_t this_sector = r1_bio->sector;
599 int sectors;
600 int best_good_sectors;
601 int best_disk, best_dist_disk, best_pending_disk;
602 int has_nonrot_disk;
603 int disk;
604 sector_t best_dist;
605 unsigned int min_pending;
606 struct md_rdev *rdev;
607 int choose_first;
608 int choose_next_idle;
609
610 rcu_read_lock();
611
612
613
614
615
616 retry:
617 sectors = r1_bio->sectors;
618 best_disk = -1;
619 best_dist_disk = -1;
620 best_dist = MaxSector;
621 best_pending_disk = -1;
622 min_pending = UINT_MAX;
623 best_good_sectors = 0;
624 has_nonrot_disk = 0;
625 choose_next_idle = 0;
626 clear_bit(R1BIO_FailFast, &r1_bio->state);
627
628 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
629 (mddev_is_clustered(conf->mddev) &&
630 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
631 this_sector + sectors)))
632 choose_first = 1;
633 else
634 choose_first = 0;
635
636 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
637 sector_t dist;
638 sector_t first_bad;
639 int bad_sectors;
640 unsigned int pending;
641 bool nonrot;
642
643 rdev = rcu_dereference(conf->mirrors[disk].rdev);
644 if (r1_bio->bios[disk] == IO_BLOCKED
645 || rdev == NULL
646 || test_bit(Faulty, &rdev->flags))
647 continue;
648 if (!test_bit(In_sync, &rdev->flags) &&
649 rdev->recovery_offset < this_sector + sectors)
650 continue;
651 if (test_bit(WriteMostly, &rdev->flags)) {
652
653
654 if (best_dist_disk < 0) {
655 if (is_badblock(rdev, this_sector, sectors,
656 &first_bad, &bad_sectors)) {
657 if (first_bad <= this_sector)
658
659 continue;
660 best_good_sectors = first_bad - this_sector;
661 } else
662 best_good_sectors = sectors;
663 best_dist_disk = disk;
664 best_pending_disk = disk;
665 }
666 continue;
667 }
668
669
670
671 if (is_badblock(rdev, this_sector, sectors,
672 &first_bad, &bad_sectors)) {
673 if (best_dist < MaxSector)
674
675 continue;
676 if (first_bad <= this_sector) {
677
678
679
680
681 bad_sectors -= (this_sector - first_bad);
682 if (choose_first && sectors > bad_sectors)
683 sectors = bad_sectors;
684 if (best_good_sectors > sectors)
685 best_good_sectors = sectors;
686
687 } else {
688 sector_t good_sectors = first_bad - this_sector;
689 if (good_sectors > best_good_sectors) {
690 best_good_sectors = good_sectors;
691 best_disk = disk;
692 }
693 if (choose_first)
694 break;
695 }
696 continue;
697 } else {
698 if ((sectors > best_good_sectors) && (best_disk >= 0))
699 best_disk = -1;
700 best_good_sectors = sectors;
701 }
702
703 if (best_disk >= 0)
704
705 set_bit(R1BIO_FailFast, &r1_bio->state);
706
707 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
708 has_nonrot_disk |= nonrot;
709 pending = atomic_read(&rdev->nr_pending);
710 dist = abs(this_sector - conf->mirrors[disk].head_position);
711 if (choose_first) {
712 best_disk = disk;
713 break;
714 }
715
716 if (conf->mirrors[disk].next_seq_sect == this_sector
717 || dist == 0) {
718 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
719 struct raid1_info *mirror = &conf->mirrors[disk];
720
721 best_disk = disk;
722
723
724
725
726
727
728
729
730
731
732
733
734
735 if (nonrot && opt_iosize > 0 &&
736 mirror->seq_start != MaxSector &&
737 mirror->next_seq_sect > opt_iosize &&
738 mirror->next_seq_sect - opt_iosize >=
739 mirror->seq_start) {
740 choose_next_idle = 1;
741 continue;
742 }
743 break;
744 }
745
746 if (choose_next_idle)
747 continue;
748
749 if (min_pending > pending) {
750 min_pending = pending;
751 best_pending_disk = disk;
752 }
753
754 if (dist < best_dist) {
755 best_dist = dist;
756 best_dist_disk = disk;
757 }
758 }
759
760
761
762
763
764
765
766 if (best_disk == -1) {
767 if (has_nonrot_disk || min_pending == 0)
768 best_disk = best_pending_disk;
769 else
770 best_disk = best_dist_disk;
771 }
772
773 if (best_disk >= 0) {
774 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
775 if (!rdev)
776 goto retry;
777 atomic_inc(&rdev->nr_pending);
778 sectors = best_good_sectors;
779
780 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
781 conf->mirrors[best_disk].seq_start = this_sector;
782
783 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
784 }
785 rcu_read_unlock();
786 *max_sectors = sectors;
787
788 return best_disk;
789}
790
791static void flush_bio_list(struct r1conf *conf, struct bio *bio)
792{
793
794 md_bitmap_unplug(conf->mddev->bitmap);
795 wake_up(&conf->wait_barrier);
796
797 while (bio) {
798 struct bio *next = bio->bi_next;
799 struct md_rdev *rdev = (void *)bio->bi_bdev;
800 bio->bi_next = NULL;
801 bio_set_dev(bio, rdev->bdev);
802 if (test_bit(Faulty, &rdev->flags)) {
803 bio_io_error(bio);
804 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
805 !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
806
807 bio_endio(bio);
808 else
809 submit_bio_noacct(bio);
810 bio = next;
811 cond_resched();
812 }
813}
814
815static void flush_pending_writes(struct r1conf *conf)
816{
817
818
819
820 spin_lock_irq(&conf->device_lock);
821
822 if (conf->pending_bio_list.head) {
823 struct blk_plug plug;
824 struct bio *bio;
825
826 bio = bio_list_get(&conf->pending_bio_list);
827 conf->pending_count = 0;
828 spin_unlock_irq(&conf->device_lock);
829
830
831
832
833
834
835
836
837
838
839 __set_current_state(TASK_RUNNING);
840 blk_start_plug(&plug);
841 flush_bio_list(conf, bio);
842 blk_finish_plug(&plug);
843 } else
844 spin_unlock_irq(&conf->device_lock);
845}
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
872{
873 int idx = sector_to_idx(sector_nr);
874
875 spin_lock_irq(&conf->resync_lock);
876
877
878 wait_event_lock_irq(conf->wait_barrier,
879 !atomic_read(&conf->nr_waiting[idx]),
880 conf->resync_lock);
881
882
883 atomic_inc(&conf->barrier[idx]);
884
885
886
887
888
889
890
891
892 smp_mb__after_atomic();
893
894
895
896
897
898
899
900
901 wait_event_lock_irq(conf->wait_barrier,
902 (!conf->array_frozen &&
903 !atomic_read(&conf->nr_pending[idx]) &&
904 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
905 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
906 conf->resync_lock);
907
908 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
909 atomic_dec(&conf->barrier[idx]);
910 spin_unlock_irq(&conf->resync_lock);
911 wake_up(&conf->wait_barrier);
912 return -EINTR;
913 }
914
915 atomic_inc(&conf->nr_sync_pending);
916 spin_unlock_irq(&conf->resync_lock);
917
918 return 0;
919}
920
921static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
922{
923 int idx = sector_to_idx(sector_nr);
924
925 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
926
927 atomic_dec(&conf->barrier[idx]);
928 atomic_dec(&conf->nr_sync_pending);
929 wake_up(&conf->wait_barrier);
930}
931
932static void _wait_barrier(struct r1conf *conf, int idx)
933{
934
935
936
937
938
939
940
941
942 atomic_inc(&conf->nr_pending[idx]);
943
944
945
946
947
948
949
950
951 smp_mb__after_atomic();
952
953
954
955
956
957
958
959
960
961
962 if (!READ_ONCE(conf->array_frozen) &&
963 !atomic_read(&conf->barrier[idx]))
964 return;
965
966
967
968
969
970
971
972
973 spin_lock_irq(&conf->resync_lock);
974 atomic_inc(&conf->nr_waiting[idx]);
975 atomic_dec(&conf->nr_pending[idx]);
976
977
978
979
980 wake_up(&conf->wait_barrier);
981
982 wait_event_lock_irq(conf->wait_barrier,
983 !conf->array_frozen &&
984 !atomic_read(&conf->barrier[idx]),
985 conf->resync_lock);
986 atomic_inc(&conf->nr_pending[idx]);
987 atomic_dec(&conf->nr_waiting[idx]);
988 spin_unlock_irq(&conf->resync_lock);
989}
990
991static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
992{
993 int idx = sector_to_idx(sector_nr);
994
995
996
997
998
999
1000
1001
1002 atomic_inc(&conf->nr_pending[idx]);
1003
1004 if (!READ_ONCE(conf->array_frozen))
1005 return;
1006
1007 spin_lock_irq(&conf->resync_lock);
1008 atomic_inc(&conf->nr_waiting[idx]);
1009 atomic_dec(&conf->nr_pending[idx]);
1010
1011
1012
1013
1014 wake_up(&conf->wait_barrier);
1015
1016 wait_event_lock_irq(conf->wait_barrier,
1017 !conf->array_frozen,
1018 conf->resync_lock);
1019 atomic_inc(&conf->nr_pending[idx]);
1020 atomic_dec(&conf->nr_waiting[idx]);
1021 spin_unlock_irq(&conf->resync_lock);
1022}
1023
1024static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
1025{
1026 int idx = sector_to_idx(sector_nr);
1027
1028 _wait_barrier(conf, idx);
1029}
1030
1031static void _allow_barrier(struct r1conf *conf, int idx)
1032{
1033 atomic_dec(&conf->nr_pending[idx]);
1034 wake_up(&conf->wait_barrier);
1035}
1036
1037static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1038{
1039 int idx = sector_to_idx(sector_nr);
1040
1041 _allow_barrier(conf, idx);
1042}
1043
1044
1045static int get_unqueued_pending(struct r1conf *conf)
1046{
1047 int idx, ret;
1048
1049 ret = atomic_read(&conf->nr_sync_pending);
1050 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1051 ret += atomic_read(&conf->nr_pending[idx]) -
1052 atomic_read(&conf->nr_queued[idx]);
1053
1054 return ret;
1055}
1056
1057static void freeze_array(struct r1conf *conf, int extra)
1058{
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 spin_lock_irq(&conf->resync_lock);
1083 conf->array_frozen = 1;
1084 raid1_log(conf->mddev, "wait freeze");
1085 wait_event_lock_irq_cmd(
1086 conf->wait_barrier,
1087 get_unqueued_pending(conf) == extra,
1088 conf->resync_lock,
1089 flush_pending_writes(conf));
1090 spin_unlock_irq(&conf->resync_lock);
1091}
1092static void unfreeze_array(struct r1conf *conf)
1093{
1094
1095 spin_lock_irq(&conf->resync_lock);
1096 conf->array_frozen = 0;
1097 spin_unlock_irq(&conf->resync_lock);
1098 wake_up(&conf->wait_barrier);
1099}
1100
1101static void alloc_behind_master_bio(struct r1bio *r1_bio,
1102 struct bio *bio)
1103{
1104 int size = bio->bi_iter.bi_size;
1105 unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1106 int i = 0;
1107 struct bio *behind_bio = NULL;
1108
1109 behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set);
1110 if (!behind_bio)
1111 return;
1112
1113
1114 if (!bio_has_data(bio)) {
1115 behind_bio->bi_iter.bi_size = size;
1116 goto skip_copy;
1117 }
1118
1119 behind_bio->bi_write_hint = bio->bi_write_hint;
1120
1121 while (i < vcnt && size) {
1122 struct page *page;
1123 int len = min_t(int, PAGE_SIZE, size);
1124
1125 page = alloc_page(GFP_NOIO);
1126 if (unlikely(!page))
1127 goto free_pages;
1128
1129 bio_add_page(behind_bio, page, len, 0);
1130
1131 size -= len;
1132 i++;
1133 }
1134
1135 bio_copy_data(behind_bio, bio);
1136skip_copy:
1137 r1_bio->behind_master_bio = behind_bio;
1138 set_bit(R1BIO_BehindIO, &r1_bio->state);
1139
1140 return;
1141
1142free_pages:
1143 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1144 bio->bi_iter.bi_size);
1145 bio_free_pages(behind_bio);
1146 bio_put(behind_bio);
1147}
1148
1149struct raid1_plug_cb {
1150 struct blk_plug_cb cb;
1151 struct bio_list pending;
1152 int pending_cnt;
1153};
1154
1155static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1156{
1157 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1158 cb);
1159 struct mddev *mddev = plug->cb.data;
1160 struct r1conf *conf = mddev->private;
1161 struct bio *bio;
1162
1163 if (from_schedule || current->bio_list) {
1164 spin_lock_irq(&conf->device_lock);
1165 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1166 conf->pending_count += plug->pending_cnt;
1167 spin_unlock_irq(&conf->device_lock);
1168 wake_up(&conf->wait_barrier);
1169 md_wakeup_thread(mddev->thread);
1170 kfree(plug);
1171 return;
1172 }
1173
1174
1175 bio = bio_list_get(&plug->pending);
1176 flush_bio_list(conf, bio);
1177 kfree(plug);
1178}
1179
1180static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1181{
1182 r1_bio->master_bio = bio;
1183 r1_bio->sectors = bio_sectors(bio);
1184 r1_bio->state = 0;
1185 r1_bio->mddev = mddev;
1186 r1_bio->sector = bio->bi_iter.bi_sector;
1187}
1188
1189static inline struct r1bio *
1190alloc_r1bio(struct mddev *mddev, struct bio *bio)
1191{
1192 struct r1conf *conf = mddev->private;
1193 struct r1bio *r1_bio;
1194
1195 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1196
1197 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1198 init_r1bio(r1_bio, mddev, bio);
1199 return r1_bio;
1200}
1201
1202static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1203 int max_read_sectors, struct r1bio *r1_bio)
1204{
1205 struct r1conf *conf = mddev->private;
1206 struct raid1_info *mirror;
1207 struct bio *read_bio;
1208 struct bitmap *bitmap = mddev->bitmap;
1209 const int op = bio_op(bio);
1210 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1211 int max_sectors;
1212 int rdisk;
1213 bool r1bio_existed = !!r1_bio;
1214 char b[BDEVNAME_SIZE];
1215
1216
1217
1218
1219
1220
1221 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1222
1223 if (r1bio_existed) {
1224
1225 struct md_rdev *rdev;
1226 rcu_read_lock();
1227 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1228 if (rdev)
1229 bdevname(rdev->bdev, b);
1230 else
1231 strcpy(b, "???");
1232 rcu_read_unlock();
1233 }
1234
1235
1236
1237
1238
1239 wait_read_barrier(conf, bio->bi_iter.bi_sector);
1240
1241 if (!r1_bio)
1242 r1_bio = alloc_r1bio(mddev, bio);
1243 else
1244 init_r1bio(r1_bio, mddev, bio);
1245 r1_bio->sectors = max_read_sectors;
1246
1247
1248
1249
1250
1251 rdisk = read_balance(conf, r1_bio, &max_sectors);
1252
1253 if (rdisk < 0) {
1254
1255 if (r1bio_existed) {
1256 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1257 mdname(mddev),
1258 b,
1259 (unsigned long long)r1_bio->sector);
1260 }
1261 raid_end_bio_io(r1_bio);
1262 return;
1263 }
1264 mirror = conf->mirrors + rdisk;
1265
1266 if (r1bio_existed)
1267 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
1268 mdname(mddev),
1269 (unsigned long long)r1_bio->sector,
1270 bdevname(mirror->rdev->bdev, b));
1271
1272 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1273 bitmap) {
1274
1275
1276
1277
1278 raid1_log(mddev, "wait behind writes");
1279 wait_event(bitmap->behind_wait,
1280 atomic_read(&bitmap->behind_writes) == 0);
1281 }
1282
1283 if (max_sectors < bio_sectors(bio)) {
1284 struct bio *split = bio_split(bio, max_sectors,
1285 gfp, &conf->bio_split);
1286 bio_chain(split, bio);
1287 submit_bio_noacct(bio);
1288 bio = split;
1289 r1_bio->master_bio = bio;
1290 r1_bio->sectors = max_sectors;
1291 }
1292
1293 r1_bio->read_disk = rdisk;
1294
1295 if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
1296 r1_bio->start_time = bio_start_io_acct(bio);
1297
1298 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
1299
1300 r1_bio->bios[rdisk] = read_bio;
1301
1302 read_bio->bi_iter.bi_sector = r1_bio->sector +
1303 mirror->rdev->data_offset;
1304 bio_set_dev(read_bio, mirror->rdev->bdev);
1305 read_bio->bi_end_io = raid1_end_read_request;
1306 bio_set_op_attrs(read_bio, op, do_sync);
1307 if (test_bit(FailFast, &mirror->rdev->flags) &&
1308 test_bit(R1BIO_FailFast, &r1_bio->state))
1309 read_bio->bi_opf |= MD_FAILFAST;
1310 read_bio->bi_private = r1_bio;
1311
1312 if (mddev->gendisk)
1313 trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
1314 r1_bio->sector);
1315
1316 submit_bio_noacct(read_bio);
1317}
1318
1319static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1320 int max_write_sectors)
1321{
1322 struct r1conf *conf = mddev->private;
1323 struct r1bio *r1_bio;
1324 int i, disks;
1325 struct bitmap *bitmap = mddev->bitmap;
1326 unsigned long flags;
1327 struct md_rdev *blocked_rdev;
1328 struct blk_plug_cb *cb;
1329 struct raid1_plug_cb *plug = NULL;
1330 int first_clone;
1331 int max_sectors;
1332 bool write_behind = false;
1333
1334 if (mddev_is_clustered(mddev) &&
1335 md_cluster_ops->area_resyncing(mddev, WRITE,
1336 bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1337
1338 DEFINE_WAIT(w);
1339 for (;;) {
1340 prepare_to_wait(&conf->wait_barrier,
1341 &w, TASK_IDLE);
1342 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1343 bio->bi_iter.bi_sector,
1344 bio_end_sector(bio)))
1345 break;
1346 schedule();
1347 }
1348 finish_wait(&conf->wait_barrier, &w);
1349 }
1350
1351
1352
1353
1354
1355
1356 wait_barrier(conf, bio->bi_iter.bi_sector);
1357
1358 r1_bio = alloc_r1bio(mddev, bio);
1359 r1_bio->sectors = max_write_sectors;
1360
1361 if (conf->pending_count >= max_queued_requests) {
1362 md_wakeup_thread(mddev->thread);
1363 raid1_log(mddev, "wait queued");
1364 wait_event(conf->wait_barrier,
1365 conf->pending_count < max_queued_requests);
1366 }
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378 disks = conf->raid_disks * 2;
1379 retry_write:
1380 blocked_rdev = NULL;
1381 rcu_read_lock();
1382 max_sectors = r1_bio->sectors;
1383 for (i = 0; i < disks; i++) {
1384 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1385
1386
1387
1388
1389
1390
1391 if (rdev && test_bit(WriteMostly, &rdev->flags))
1392 write_behind = true;
1393
1394 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1395 atomic_inc(&rdev->nr_pending);
1396 blocked_rdev = rdev;
1397 break;
1398 }
1399 r1_bio->bios[i] = NULL;
1400 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1401 if (i < conf->raid_disks)
1402 set_bit(R1BIO_Degraded, &r1_bio->state);
1403 continue;
1404 }
1405
1406 atomic_inc(&rdev->nr_pending);
1407 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1408 sector_t first_bad;
1409 int bad_sectors;
1410 int is_bad;
1411
1412 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1413 &first_bad, &bad_sectors);
1414 if (is_bad < 0) {
1415
1416
1417 set_bit(BlockedBadBlocks, &rdev->flags);
1418 blocked_rdev = rdev;
1419 break;
1420 }
1421 if (is_bad && first_bad <= r1_bio->sector) {
1422
1423 bad_sectors -= (r1_bio->sector - first_bad);
1424 if (bad_sectors < max_sectors)
1425
1426
1427
1428 max_sectors = bad_sectors;
1429 rdev_dec_pending(rdev, mddev);
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440 continue;
1441 }
1442 if (is_bad) {
1443 int good_sectors = first_bad - r1_bio->sector;
1444 if (good_sectors < max_sectors)
1445 max_sectors = good_sectors;
1446 }
1447 }
1448 r1_bio->bios[i] = bio;
1449 }
1450 rcu_read_unlock();
1451
1452 if (unlikely(blocked_rdev)) {
1453
1454 int j;
1455
1456 for (j = 0; j < i; j++)
1457 if (r1_bio->bios[j])
1458 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1459 r1_bio->state = 0;
1460 allow_barrier(conf, bio->bi_iter.bi_sector);
1461 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1462 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1463 wait_barrier(conf, bio->bi_iter.bi_sector);
1464 goto retry_write;
1465 }
1466
1467
1468
1469
1470
1471
1472
1473 if (write_behind && bitmap)
1474 max_sectors = min_t(int, max_sectors,
1475 BIO_MAX_VECS * (PAGE_SIZE >> 9));
1476 if (max_sectors < bio_sectors(bio)) {
1477 struct bio *split = bio_split(bio, max_sectors,
1478 GFP_NOIO, &conf->bio_split);
1479 bio_chain(split, bio);
1480 submit_bio_noacct(bio);
1481 bio = split;
1482 r1_bio->master_bio = bio;
1483 r1_bio->sectors = max_sectors;
1484 }
1485
1486 if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
1487 r1_bio->start_time = bio_start_io_acct(bio);
1488 atomic_set(&r1_bio->remaining, 1);
1489 atomic_set(&r1_bio->behind_remaining, 0);
1490
1491 first_clone = 1;
1492
1493 for (i = 0; i < disks; i++) {
1494 struct bio *mbio = NULL;
1495 struct md_rdev *rdev = conf->mirrors[i].rdev;
1496 if (!r1_bio->bios[i])
1497 continue;
1498
1499 if (first_clone) {
1500
1501
1502
1503
1504 if (bitmap &&
1505 (atomic_read(&bitmap->behind_writes)
1506 < mddev->bitmap_info.max_write_behind) &&
1507 !waitqueue_active(&bitmap->behind_wait)) {
1508 alloc_behind_master_bio(r1_bio, bio);
1509 }
1510
1511 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1512 test_bit(R1BIO_BehindIO, &r1_bio->state));
1513 first_clone = 0;
1514 }
1515
1516 if (r1_bio->behind_master_bio)
1517 mbio = bio_clone_fast(r1_bio->behind_master_bio,
1518 GFP_NOIO, &mddev->bio_set);
1519 else
1520 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
1521
1522 if (r1_bio->behind_master_bio) {
1523 if (test_bit(CollisionCheck, &rdev->flags))
1524 wait_for_serialization(rdev, r1_bio);
1525 if (test_bit(WriteMostly, &rdev->flags))
1526 atomic_inc(&r1_bio->behind_remaining);
1527 } else if (mddev->serialize_policy)
1528 wait_for_serialization(rdev, r1_bio);
1529
1530 r1_bio->bios[i] = mbio;
1531
1532 mbio->bi_iter.bi_sector = (r1_bio->sector +
1533 conf->mirrors[i].rdev->data_offset);
1534 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
1535 mbio->bi_end_io = raid1_end_write_request;
1536 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
1537 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1538 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1539 conf->raid_disks - mddev->degraded > 1)
1540 mbio->bi_opf |= MD_FAILFAST;
1541 mbio->bi_private = r1_bio;
1542
1543 atomic_inc(&r1_bio->remaining);
1544
1545 if (mddev->gendisk)
1546 trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
1547 r1_bio->sector);
1548
1549 mbio->bi_bdev = (void *)conf->mirrors[i].rdev;
1550
1551 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1552 if (cb)
1553 plug = container_of(cb, struct raid1_plug_cb, cb);
1554 else
1555 plug = NULL;
1556 if (plug) {
1557 bio_list_add(&plug->pending, mbio);
1558 plug->pending_cnt++;
1559 } else {
1560 spin_lock_irqsave(&conf->device_lock, flags);
1561 bio_list_add(&conf->pending_bio_list, mbio);
1562 conf->pending_count++;
1563 spin_unlock_irqrestore(&conf->device_lock, flags);
1564 md_wakeup_thread(mddev->thread);
1565 }
1566 }
1567
1568 r1_bio_write_done(r1_bio);
1569
1570
1571 wake_up(&conf->wait_barrier);
1572}
1573
1574static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1575{
1576 sector_t sectors;
1577
1578 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1579 && md_flush_request(mddev, bio))
1580 return true;
1581
1582
1583
1584
1585
1586
1587
1588
1589 sectors = align_to_barrier_unit_end(
1590 bio->bi_iter.bi_sector, bio_sectors(bio));
1591
1592 if (bio_data_dir(bio) == READ)
1593 raid1_read_request(mddev, bio, sectors, NULL);
1594 else {
1595 if (!md_write_start(mddev,bio))
1596 return false;
1597 raid1_write_request(mddev, bio, sectors);
1598 }
1599 return true;
1600}
1601
1602static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1603{
1604 struct r1conf *conf = mddev->private;
1605 int i;
1606
1607 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1608 conf->raid_disks - mddev->degraded);
1609 rcu_read_lock();
1610 for (i = 0; i < conf->raid_disks; i++) {
1611 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1612 seq_printf(seq, "%s",
1613 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1614 }
1615 rcu_read_unlock();
1616 seq_printf(seq, "]");
1617}
1618
1619static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1620{
1621 char b[BDEVNAME_SIZE];
1622 struct r1conf *conf = mddev->private;
1623 unsigned long flags;
1624
1625
1626
1627
1628
1629
1630
1631 spin_lock_irqsave(&conf->device_lock, flags);
1632 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
1633 && (conf->raid_disks - mddev->degraded) == 1) {
1634
1635
1636
1637
1638
1639
1640 conf->recovery_disabled = mddev->recovery_disabled;
1641 spin_unlock_irqrestore(&conf->device_lock, flags);
1642 return;
1643 }
1644 set_bit(Blocked, &rdev->flags);
1645 if (test_and_clear_bit(In_sync, &rdev->flags))
1646 mddev->degraded++;
1647 set_bit(Faulty, &rdev->flags);
1648 spin_unlock_irqrestore(&conf->device_lock, flags);
1649
1650
1651
1652 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1653 set_mask_bits(&mddev->sb_flags, 0,
1654 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1655 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1656 "md/raid1:%s: Operation continuing on %d devices.\n",
1657 mdname(mddev), bdevname(rdev->bdev, b),
1658 mdname(mddev), conf->raid_disks - mddev->degraded);
1659}
1660
1661static void print_conf(struct r1conf *conf)
1662{
1663 int i;
1664
1665 pr_debug("RAID1 conf printout:\n");
1666 if (!conf) {
1667 pr_debug("(!conf)\n");
1668 return;
1669 }
1670 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1671 conf->raid_disks);
1672
1673 rcu_read_lock();
1674 for (i = 0; i < conf->raid_disks; i++) {
1675 char b[BDEVNAME_SIZE];
1676 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1677 if (rdev)
1678 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1679 i, !test_bit(In_sync, &rdev->flags),
1680 !test_bit(Faulty, &rdev->flags),
1681 bdevname(rdev->bdev,b));
1682 }
1683 rcu_read_unlock();
1684}
1685
1686static void close_sync(struct r1conf *conf)
1687{
1688 int idx;
1689
1690 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1691 _wait_barrier(conf, idx);
1692 _allow_barrier(conf, idx);
1693 }
1694
1695 mempool_exit(&conf->r1buf_pool);
1696}
1697
1698static int raid1_spare_active(struct mddev *mddev)
1699{
1700 int i;
1701 struct r1conf *conf = mddev->private;
1702 int count = 0;
1703 unsigned long flags;
1704
1705
1706
1707
1708
1709
1710
1711
1712 spin_lock_irqsave(&conf->device_lock, flags);
1713 for (i = 0; i < conf->raid_disks; i++) {
1714 struct md_rdev *rdev = conf->mirrors[i].rdev;
1715 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1716 if (repl
1717 && !test_bit(Candidate, &repl->flags)
1718 && repl->recovery_offset == MaxSector
1719 && !test_bit(Faulty, &repl->flags)
1720 && !test_and_set_bit(In_sync, &repl->flags)) {
1721
1722 if (!rdev ||
1723 !test_and_clear_bit(In_sync, &rdev->flags))
1724 count++;
1725 if (rdev) {
1726
1727
1728
1729
1730 set_bit(Faulty, &rdev->flags);
1731 sysfs_notify_dirent_safe(
1732 rdev->sysfs_state);
1733 }
1734 }
1735 if (rdev
1736 && rdev->recovery_offset == MaxSector
1737 && !test_bit(Faulty, &rdev->flags)
1738 && !test_and_set_bit(In_sync, &rdev->flags)) {
1739 count++;
1740 sysfs_notify_dirent_safe(rdev->sysfs_state);
1741 }
1742 }
1743 mddev->degraded -= count;
1744 spin_unlock_irqrestore(&conf->device_lock, flags);
1745
1746 print_conf(conf);
1747 return count;
1748}
1749
1750static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1751{
1752 struct r1conf *conf = mddev->private;
1753 int err = -EEXIST;
1754 int mirror = 0;
1755 struct raid1_info *p;
1756 int first = 0;
1757 int last = conf->raid_disks - 1;
1758
1759 if (mddev->recovery_disabled == conf->recovery_disabled)
1760 return -EBUSY;
1761
1762 if (md_integrity_add_rdev(rdev, mddev))
1763 return -ENXIO;
1764
1765 if (rdev->raid_disk >= 0)
1766 first = last = rdev->raid_disk;
1767
1768
1769
1770
1771
1772 if (rdev->saved_raid_disk >= 0 &&
1773 rdev->saved_raid_disk >= first &&
1774 rdev->saved_raid_disk < conf->raid_disks &&
1775 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1776 first = last = rdev->saved_raid_disk;
1777
1778 for (mirror = first; mirror <= last; mirror++) {
1779 p = conf->mirrors + mirror;
1780 if (!p->rdev) {
1781 if (mddev->gendisk)
1782 disk_stack_limits(mddev->gendisk, rdev->bdev,
1783 rdev->data_offset << 9);
1784
1785 p->head_position = 0;
1786 rdev->raid_disk = mirror;
1787 err = 0;
1788
1789
1790
1791 if (rdev->saved_raid_disk < 0)
1792 conf->fullsync = 1;
1793 rcu_assign_pointer(p->rdev, rdev);
1794 break;
1795 }
1796 if (test_bit(WantReplacement, &p->rdev->flags) &&
1797 p[conf->raid_disks].rdev == NULL) {
1798
1799 clear_bit(In_sync, &rdev->flags);
1800 set_bit(Replacement, &rdev->flags);
1801 rdev->raid_disk = mirror;
1802 err = 0;
1803 conf->fullsync = 1;
1804 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1805 break;
1806 }
1807 }
1808 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1809 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1810 print_conf(conf);
1811 return err;
1812}
1813
1814static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1815{
1816 struct r1conf *conf = mddev->private;
1817 int err = 0;
1818 int number = rdev->raid_disk;
1819 struct raid1_info *p = conf->mirrors + number;
1820
1821 if (rdev != p->rdev)
1822 p = conf->mirrors + conf->raid_disks + number;
1823
1824 print_conf(conf);
1825 if (rdev == p->rdev) {
1826 if (test_bit(In_sync, &rdev->flags) ||
1827 atomic_read(&rdev->nr_pending)) {
1828 err = -EBUSY;
1829 goto abort;
1830 }
1831
1832
1833
1834 if (!test_bit(Faulty, &rdev->flags) &&
1835 mddev->recovery_disabled != conf->recovery_disabled &&
1836 mddev->degraded < conf->raid_disks) {
1837 err = -EBUSY;
1838 goto abort;
1839 }
1840 p->rdev = NULL;
1841 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1842 synchronize_rcu();
1843 if (atomic_read(&rdev->nr_pending)) {
1844
1845 err = -EBUSY;
1846 p->rdev = rdev;
1847 goto abort;
1848 }
1849 }
1850 if (conf->mirrors[conf->raid_disks + number].rdev) {
1851
1852
1853
1854
1855 struct md_rdev *repl =
1856 conf->mirrors[conf->raid_disks + number].rdev;
1857 freeze_array(conf, 0);
1858 if (atomic_read(&repl->nr_pending)) {
1859
1860
1861
1862
1863
1864
1865 err = -EBUSY;
1866 unfreeze_array(conf);
1867 goto abort;
1868 }
1869 clear_bit(Replacement, &repl->flags);
1870 p->rdev = repl;
1871 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1872 unfreeze_array(conf);
1873 }
1874
1875 clear_bit(WantReplacement, &rdev->flags);
1876 err = md_integrity_register(mddev);
1877 }
1878abort:
1879
1880 print_conf(conf);
1881 return err;
1882}
1883
1884static void end_sync_read(struct bio *bio)
1885{
1886 struct r1bio *r1_bio = get_resync_r1bio(bio);
1887
1888 update_head_pos(r1_bio->read_disk, r1_bio);
1889
1890
1891
1892
1893
1894
1895 if (!bio->bi_status)
1896 set_bit(R1BIO_Uptodate, &r1_bio->state);
1897
1898 if (atomic_dec_and_test(&r1_bio->remaining))
1899 reschedule_retry(r1_bio);
1900}
1901
1902static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1903{
1904 sector_t sync_blocks = 0;
1905 sector_t s = r1_bio->sector;
1906 long sectors_to_go = r1_bio->sectors;
1907
1908
1909 do {
1910 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1911 s += sync_blocks;
1912 sectors_to_go -= sync_blocks;
1913 } while (sectors_to_go > 0);
1914}
1915
1916static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
1917{
1918 if (atomic_dec_and_test(&r1_bio->remaining)) {
1919 struct mddev *mddev = r1_bio->mddev;
1920 int s = r1_bio->sectors;
1921
1922 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1923 test_bit(R1BIO_WriteError, &r1_bio->state))
1924 reschedule_retry(r1_bio);
1925 else {
1926 put_buf(r1_bio);
1927 md_done_sync(mddev, s, uptodate);
1928 }
1929 }
1930}
1931
1932static void end_sync_write(struct bio *bio)
1933{
1934 int uptodate = !bio->bi_status;
1935 struct r1bio *r1_bio = get_resync_r1bio(bio);
1936 struct mddev *mddev = r1_bio->mddev;
1937 struct r1conf *conf = mddev->private;
1938 sector_t first_bad;
1939 int bad_sectors;
1940 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1941
1942 if (!uptodate) {
1943 abort_sync_write(mddev, r1_bio);
1944 set_bit(WriteErrorSeen, &rdev->flags);
1945 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1946 set_bit(MD_RECOVERY_NEEDED, &
1947 mddev->recovery);
1948 set_bit(R1BIO_WriteError, &r1_bio->state);
1949 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1950 &first_bad, &bad_sectors) &&
1951 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1952 r1_bio->sector,
1953 r1_bio->sectors,
1954 &first_bad, &bad_sectors)
1955 )
1956 set_bit(R1BIO_MadeGood, &r1_bio->state);
1957
1958 put_sync_write_buf(r1_bio, uptodate);
1959}
1960
1961static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1962 int sectors, struct page *page, int rw)
1963{
1964 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1965
1966 return 1;
1967 if (rw == WRITE) {
1968 set_bit(WriteErrorSeen, &rdev->flags);
1969 if (!test_and_set_bit(WantReplacement,
1970 &rdev->flags))
1971 set_bit(MD_RECOVERY_NEEDED, &
1972 rdev->mddev->recovery);
1973 }
1974
1975 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1976 md_error(rdev->mddev, rdev);
1977 return 0;
1978}
1979
1980static int fix_sync_read_error(struct r1bio *r1_bio)
1981{
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993 struct mddev *mddev = r1_bio->mddev;
1994 struct r1conf *conf = mddev->private;
1995 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1996 struct page **pages = get_resync_pages(bio)->pages;
1997 sector_t sect = r1_bio->sector;
1998 int sectors = r1_bio->sectors;
1999 int idx = 0;
2000 struct md_rdev *rdev;
2001
2002 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2003 if (test_bit(FailFast, &rdev->flags)) {
2004
2005
2006 md_error(mddev, rdev);
2007 if (test_bit(Faulty, &rdev->flags))
2008
2009
2010
2011 bio->bi_end_io = end_sync_write;
2012 }
2013
2014 while(sectors) {
2015 int s = sectors;
2016 int d = r1_bio->read_disk;
2017 int success = 0;
2018 int start;
2019
2020 if (s > (PAGE_SIZE>>9))
2021 s = PAGE_SIZE >> 9;
2022 do {
2023 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
2024
2025
2026
2027
2028 rdev = conf->mirrors[d].rdev;
2029 if (sync_page_io(rdev, sect, s<<9,
2030 pages[idx],
2031 REQ_OP_READ, 0, false)) {
2032 success = 1;
2033 break;
2034 }
2035 }
2036 d++;
2037 if (d == conf->raid_disks * 2)
2038 d = 0;
2039 } while (!success && d != r1_bio->read_disk);
2040
2041 if (!success) {
2042 char b[BDEVNAME_SIZE];
2043 int abort = 0;
2044
2045
2046
2047
2048
2049 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
2050 mdname(mddev), bio_devname(bio, b),
2051 (unsigned long long)r1_bio->sector);
2052 for (d = 0; d < conf->raid_disks * 2; d++) {
2053 rdev = conf->mirrors[d].rdev;
2054 if (!rdev || test_bit(Faulty, &rdev->flags))
2055 continue;
2056 if (!rdev_set_badblocks(rdev, sect, s, 0))
2057 abort = 1;
2058 }
2059 if (abort) {
2060 conf->recovery_disabled =
2061 mddev->recovery_disabled;
2062 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2063 md_done_sync(mddev, r1_bio->sectors, 0);
2064 put_buf(r1_bio);
2065 return 0;
2066 }
2067
2068 sectors -= s;
2069 sect += s;
2070 idx++;
2071 continue;
2072 }
2073
2074 start = d;
2075
2076 while (d != r1_bio->read_disk) {
2077 if (d == 0)
2078 d = conf->raid_disks * 2;
2079 d--;
2080 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2081 continue;
2082 rdev = conf->mirrors[d].rdev;
2083 if (r1_sync_page_io(rdev, sect, s,
2084 pages[idx],
2085 WRITE) == 0) {
2086 r1_bio->bios[d]->bi_end_io = NULL;
2087 rdev_dec_pending(rdev, mddev);
2088 }
2089 }
2090 d = start;
2091 while (d != r1_bio->read_disk) {
2092 if (d == 0)
2093 d = conf->raid_disks * 2;
2094 d--;
2095 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2096 continue;
2097 rdev = conf->mirrors[d].rdev;
2098 if (r1_sync_page_io(rdev, sect, s,
2099 pages[idx],
2100 READ) != 0)
2101 atomic_add(s, &rdev->corrected_errors);
2102 }
2103 sectors -= s;
2104 sect += s;
2105 idx ++;
2106 }
2107 set_bit(R1BIO_Uptodate, &r1_bio->state);
2108 bio->bi_status = 0;
2109 return 1;
2110}
2111
2112static void process_checks(struct r1bio *r1_bio)
2113{
2114
2115
2116
2117
2118
2119
2120
2121 struct mddev *mddev = r1_bio->mddev;
2122 struct r1conf *conf = mddev->private;
2123 int primary;
2124 int i;
2125 int vcnt;
2126
2127
2128 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2129 for (i = 0; i < conf->raid_disks * 2; i++) {
2130 blk_status_t status;
2131 struct bio *b = r1_bio->bios[i];
2132 struct resync_pages *rp = get_resync_pages(b);
2133 if (b->bi_end_io != end_sync_read)
2134 continue;
2135
2136 status = b->bi_status;
2137 bio_reset(b);
2138 b->bi_status = status;
2139 b->bi_iter.bi_sector = r1_bio->sector +
2140 conf->mirrors[i].rdev->data_offset;
2141 bio_set_dev(b, conf->mirrors[i].rdev->bdev);
2142 b->bi_end_io = end_sync_read;
2143 rp->raid_bio = r1_bio;
2144 b->bi_private = rp;
2145
2146
2147 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2148 }
2149 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2150 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2151 !r1_bio->bios[primary]->bi_status) {
2152 r1_bio->bios[primary]->bi_end_io = NULL;
2153 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2154 break;
2155 }
2156 r1_bio->read_disk = primary;
2157 for (i = 0; i < conf->raid_disks * 2; i++) {
2158 int j = 0;
2159 struct bio *pbio = r1_bio->bios[primary];
2160 struct bio *sbio = r1_bio->bios[i];
2161 blk_status_t status = sbio->bi_status;
2162 struct page **ppages = get_resync_pages(pbio)->pages;
2163 struct page **spages = get_resync_pages(sbio)->pages;
2164 struct bio_vec *bi;
2165 int page_len[RESYNC_PAGES] = { 0 };
2166 struct bvec_iter_all iter_all;
2167
2168 if (sbio->bi_end_io != end_sync_read)
2169 continue;
2170
2171 sbio->bi_status = 0;
2172
2173 bio_for_each_segment_all(bi, sbio, iter_all)
2174 page_len[j++] = bi->bv_len;
2175
2176 if (!status) {
2177 for (j = vcnt; j-- ; ) {
2178 if (memcmp(page_address(ppages[j]),
2179 page_address(spages[j]),
2180 page_len[j]))
2181 break;
2182 }
2183 } else
2184 j = 0;
2185 if (j >= 0)
2186 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2187 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2188 && !status)) {
2189
2190 sbio->bi_end_io = NULL;
2191 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2192 continue;
2193 }
2194
2195 bio_copy_data(sbio, pbio);
2196 }
2197}
2198
2199static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2200{
2201 struct r1conf *conf = mddev->private;
2202 int i;
2203 int disks = conf->raid_disks * 2;
2204 struct bio *wbio;
2205
2206 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2207
2208 if (!fix_sync_read_error(r1_bio))
2209 return;
2210
2211 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2212 process_checks(r1_bio);
2213
2214
2215
2216
2217 atomic_set(&r1_bio->remaining, 1);
2218 for (i = 0; i < disks ; i++) {
2219 wbio = r1_bio->bios[i];
2220 if (wbio->bi_end_io == NULL ||
2221 (wbio->bi_end_io == end_sync_read &&
2222 (i == r1_bio->read_disk ||
2223 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2224 continue;
2225 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2226 abort_sync_write(mddev, r1_bio);
2227 continue;
2228 }
2229
2230 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2231 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2232 wbio->bi_opf |= MD_FAILFAST;
2233
2234 wbio->bi_end_io = end_sync_write;
2235 atomic_inc(&r1_bio->remaining);
2236 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2237
2238 submit_bio_noacct(wbio);
2239 }
2240
2241 put_sync_write_buf(r1_bio, 1);
2242}
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252static void fix_read_error(struct r1conf *conf, int read_disk,
2253 sector_t sect, int sectors)
2254{
2255 struct mddev *mddev = conf->mddev;
2256 while(sectors) {
2257 int s = sectors;
2258 int d = read_disk;
2259 int success = 0;
2260 int start;
2261 struct md_rdev *rdev;
2262
2263 if (s > (PAGE_SIZE>>9))
2264 s = PAGE_SIZE >> 9;
2265
2266 do {
2267 sector_t first_bad;
2268 int bad_sectors;
2269
2270 rcu_read_lock();
2271 rdev = rcu_dereference(conf->mirrors[d].rdev);
2272 if (rdev &&
2273 (test_bit(In_sync, &rdev->flags) ||
2274 (!test_bit(Faulty, &rdev->flags) &&
2275 rdev->recovery_offset >= sect + s)) &&
2276 is_badblock(rdev, sect, s,
2277 &first_bad, &bad_sectors) == 0) {
2278 atomic_inc(&rdev->nr_pending);
2279 rcu_read_unlock();
2280 if (sync_page_io(rdev, sect, s<<9,
2281 conf->tmppage, REQ_OP_READ, 0, false))
2282 success = 1;
2283 rdev_dec_pending(rdev, mddev);
2284 if (success)
2285 break;
2286 } else
2287 rcu_read_unlock();
2288 d++;
2289 if (d == conf->raid_disks * 2)
2290 d = 0;
2291 } while (!success && d != read_disk);
2292
2293 if (!success) {
2294
2295 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2296 if (!rdev_set_badblocks(rdev, sect, s, 0))
2297 md_error(mddev, rdev);
2298 break;
2299 }
2300
2301 start = d;
2302 while (d != read_disk) {
2303 if (d==0)
2304 d = conf->raid_disks * 2;
2305 d--;
2306 rcu_read_lock();
2307 rdev = rcu_dereference(conf->mirrors[d].rdev);
2308 if (rdev &&
2309 !test_bit(Faulty, &rdev->flags)) {
2310 atomic_inc(&rdev->nr_pending);
2311 rcu_read_unlock();
2312 r1_sync_page_io(rdev, sect, s,
2313 conf->tmppage, WRITE);
2314 rdev_dec_pending(rdev, mddev);
2315 } else
2316 rcu_read_unlock();
2317 }
2318 d = start;
2319 while (d != read_disk) {
2320 char b[BDEVNAME_SIZE];
2321 if (d==0)
2322 d = conf->raid_disks * 2;
2323 d--;
2324 rcu_read_lock();
2325 rdev = rcu_dereference(conf->mirrors[d].rdev);
2326 if (rdev &&
2327 !test_bit(Faulty, &rdev->flags)) {
2328 atomic_inc(&rdev->nr_pending);
2329 rcu_read_unlock();
2330 if (r1_sync_page_io(rdev, sect, s,
2331 conf->tmppage, READ)) {
2332 atomic_add(s, &rdev->corrected_errors);
2333 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2334 mdname(mddev), s,
2335 (unsigned long long)(sect +
2336 rdev->data_offset),
2337 bdevname(rdev->bdev, b));
2338 }
2339 rdev_dec_pending(rdev, mddev);
2340 } else
2341 rcu_read_unlock();
2342 }
2343 sectors -= s;
2344 sect += s;
2345 }
2346}
2347
2348static int narrow_write_error(struct r1bio *r1_bio, int i)
2349{
2350 struct mddev *mddev = r1_bio->mddev;
2351 struct r1conf *conf = mddev->private;
2352 struct md_rdev *rdev = conf->mirrors[i].rdev;
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365 int block_sectors;
2366 sector_t sector;
2367 int sectors;
2368 int sect_to_write = r1_bio->sectors;
2369 int ok = 1;
2370
2371 if (rdev->badblocks.shift < 0)
2372 return 0;
2373
2374 block_sectors = roundup(1 << rdev->badblocks.shift,
2375 bdev_logical_block_size(rdev->bdev) >> 9);
2376 sector = r1_bio->sector;
2377 sectors = ((sector + block_sectors)
2378 & ~(sector_t)(block_sectors - 1))
2379 - sector;
2380
2381 while (sect_to_write) {
2382 struct bio *wbio;
2383 if (sectors > sect_to_write)
2384 sectors = sect_to_write;
2385
2386
2387 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2388 wbio = bio_clone_fast(r1_bio->behind_master_bio,
2389 GFP_NOIO,
2390 &mddev->bio_set);
2391 } else {
2392 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2393 &mddev->bio_set);
2394 }
2395
2396 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2397 wbio->bi_iter.bi_sector = r1_bio->sector;
2398 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2399
2400 bio_trim(wbio, sector - r1_bio->sector, sectors);
2401 wbio->bi_iter.bi_sector += rdev->data_offset;
2402 bio_set_dev(wbio, rdev->bdev);
2403
2404 if (submit_bio_wait(wbio) < 0)
2405
2406 ok = rdev_set_badblocks(rdev, sector,
2407 sectors, 0)
2408 && ok;
2409
2410 bio_put(wbio);
2411 sect_to_write -= sectors;
2412 sector += sectors;
2413 sectors = block_sectors;
2414 }
2415 return ok;
2416}
2417
2418static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2419{
2420 int m;
2421 int s = r1_bio->sectors;
2422 for (m = 0; m < conf->raid_disks * 2 ; m++) {
2423 struct md_rdev *rdev = conf->mirrors[m].rdev;
2424 struct bio *bio = r1_bio->bios[m];
2425 if (bio->bi_end_io == NULL)
2426 continue;
2427 if (!bio->bi_status &&
2428 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2429 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2430 }
2431 if (bio->bi_status &&
2432 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2433 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2434 md_error(conf->mddev, rdev);
2435 }
2436 }
2437 put_buf(r1_bio);
2438 md_done_sync(conf->mddev, s, 1);
2439}
2440
2441static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2442{
2443 int m, idx;
2444 bool fail = false;
2445
2446 for (m = 0; m < conf->raid_disks * 2 ; m++)
2447 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2448 struct md_rdev *rdev = conf->mirrors[m].rdev;
2449 rdev_clear_badblocks(rdev,
2450 r1_bio->sector,
2451 r1_bio->sectors, 0);
2452 rdev_dec_pending(rdev, conf->mddev);
2453 } else if (r1_bio->bios[m] != NULL) {
2454
2455
2456
2457
2458 fail = true;
2459 if (!narrow_write_error(r1_bio, m)) {
2460 md_error(conf->mddev,
2461 conf->mirrors[m].rdev);
2462
2463 set_bit(R1BIO_Degraded, &r1_bio->state);
2464 }
2465 rdev_dec_pending(conf->mirrors[m].rdev,
2466 conf->mddev);
2467 }
2468 if (fail) {
2469 spin_lock_irq(&conf->device_lock);
2470 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2471 idx = sector_to_idx(r1_bio->sector);
2472 atomic_inc(&conf->nr_queued[idx]);
2473 spin_unlock_irq(&conf->device_lock);
2474
2475
2476
2477
2478 wake_up(&conf->wait_barrier);
2479 md_wakeup_thread(conf->mddev->thread);
2480 } else {
2481 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2482 close_write(r1_bio);
2483 raid_end_bio_io(r1_bio);
2484 }
2485}
2486
2487static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2488{
2489 struct mddev *mddev = conf->mddev;
2490 struct bio *bio;
2491 struct md_rdev *rdev;
2492
2493 clear_bit(R1BIO_ReadError, &r1_bio->state);
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503 bio = r1_bio->bios[r1_bio->read_disk];
2504 bio_put(bio);
2505 r1_bio->bios[r1_bio->read_disk] = NULL;
2506
2507 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2508 if (mddev->ro == 0
2509 && !test_bit(FailFast, &rdev->flags)) {
2510 freeze_array(conf, 1);
2511 fix_read_error(conf, r1_bio->read_disk,
2512 r1_bio->sector, r1_bio->sectors);
2513 unfreeze_array(conf);
2514 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2515 md_error(mddev, rdev);
2516 } else {
2517 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2518 }
2519
2520 rdev_dec_pending(rdev, conf->mddev);
2521 allow_barrier(conf, r1_bio->sector);
2522 bio = r1_bio->master_bio;
2523
2524
2525 r1_bio->state = 0;
2526 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2527}
2528
2529static void raid1d(struct md_thread *thread)
2530{
2531 struct mddev *mddev = thread->mddev;
2532 struct r1bio *r1_bio;
2533 unsigned long flags;
2534 struct r1conf *conf = mddev->private;
2535 struct list_head *head = &conf->retry_list;
2536 struct blk_plug plug;
2537 int idx;
2538
2539 md_check_recovery(mddev);
2540
2541 if (!list_empty_careful(&conf->bio_end_io_list) &&
2542 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2543 LIST_HEAD(tmp);
2544 spin_lock_irqsave(&conf->device_lock, flags);
2545 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2546 list_splice_init(&conf->bio_end_io_list, &tmp);
2547 spin_unlock_irqrestore(&conf->device_lock, flags);
2548 while (!list_empty(&tmp)) {
2549 r1_bio = list_first_entry(&tmp, struct r1bio,
2550 retry_list);
2551 list_del(&r1_bio->retry_list);
2552 idx = sector_to_idx(r1_bio->sector);
2553 atomic_dec(&conf->nr_queued[idx]);
2554 if (mddev->degraded)
2555 set_bit(R1BIO_Degraded, &r1_bio->state);
2556 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2557 close_write(r1_bio);
2558 raid_end_bio_io(r1_bio);
2559 }
2560 }
2561
2562 blk_start_plug(&plug);
2563 for (;;) {
2564
2565 flush_pending_writes(conf);
2566
2567 spin_lock_irqsave(&conf->device_lock, flags);
2568 if (list_empty(head)) {
2569 spin_unlock_irqrestore(&conf->device_lock, flags);
2570 break;
2571 }
2572 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2573 list_del(head->prev);
2574 idx = sector_to_idx(r1_bio->sector);
2575 atomic_dec(&conf->nr_queued[idx]);
2576 spin_unlock_irqrestore(&conf->device_lock, flags);
2577
2578 mddev = r1_bio->mddev;
2579 conf = mddev->private;
2580 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2581 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2582 test_bit(R1BIO_WriteError, &r1_bio->state))
2583 handle_sync_write_finished(conf, r1_bio);
2584 else
2585 sync_request_write(mddev, r1_bio);
2586 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2587 test_bit(R1BIO_WriteError, &r1_bio->state))
2588 handle_write_finished(conf, r1_bio);
2589 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2590 handle_read_error(conf, r1_bio);
2591 else
2592 WARN_ON_ONCE(1);
2593
2594 cond_resched();
2595 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2596 md_check_recovery(mddev);
2597 }
2598 blk_finish_plug(&plug);
2599}
2600
2601static int init_resync(struct r1conf *conf)
2602{
2603 int buffs;
2604
2605 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2606 BUG_ON(mempool_initialized(&conf->r1buf_pool));
2607
2608 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2609 r1buf_pool_free, conf->poolinfo);
2610}
2611
2612static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2613{
2614 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2615 struct resync_pages *rps;
2616 struct bio *bio;
2617 int i;
2618
2619 for (i = conf->poolinfo->raid_disks; i--; ) {
2620 bio = r1bio->bios[i];
2621 rps = bio->bi_private;
2622 bio_reset(bio);
2623 bio->bi_private = rps;
2624 }
2625 r1bio->master_bio = NULL;
2626 return r1bio;
2627}
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2640 int *skipped)
2641{
2642 struct r1conf *conf = mddev->private;
2643 struct r1bio *r1_bio;
2644 struct bio *bio;
2645 sector_t max_sector, nr_sectors;
2646 int disk = -1;
2647 int i;
2648 int wonly = -1;
2649 int write_targets = 0, read_targets = 0;
2650 sector_t sync_blocks;
2651 int still_degraded = 0;
2652 int good_sectors = RESYNC_SECTORS;
2653 int min_bad = 0;
2654 int idx = sector_to_idx(sector_nr);
2655 int page_idx = 0;
2656
2657 if (!mempool_initialized(&conf->r1buf_pool))
2658 if (init_resync(conf))
2659 return 0;
2660
2661 max_sector = mddev->dev_sectors;
2662 if (sector_nr >= max_sector) {
2663
2664
2665
2666
2667
2668 if (mddev->curr_resync < max_sector)
2669 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2670 &sync_blocks, 1);
2671 else
2672 conf->fullsync = 0;
2673
2674 md_bitmap_close_sync(mddev->bitmap);
2675 close_sync(conf);
2676
2677 if (mddev_is_clustered(mddev)) {
2678 conf->cluster_sync_low = 0;
2679 conf->cluster_sync_high = 0;
2680 }
2681 return 0;
2682 }
2683
2684 if (mddev->bitmap == NULL &&
2685 mddev->recovery_cp == MaxSector &&
2686 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2687 conf->fullsync == 0) {
2688 *skipped = 1;
2689 return max_sector - sector_nr;
2690 }
2691
2692
2693
2694 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2695 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2696
2697 *skipped = 1;
2698 return sync_blocks;
2699 }
2700
2701
2702
2703
2704
2705 if (atomic_read(&conf->nr_waiting[idx]))
2706 schedule_timeout_uninterruptible(1);
2707
2708
2709
2710
2711
2712 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2713 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2714
2715
2716 if (raise_barrier(conf, sector_nr))
2717 return 0;
2718
2719 r1_bio = raid1_alloc_init_r1buf(conf);
2720
2721 rcu_read_lock();
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731 r1_bio->mddev = mddev;
2732 r1_bio->sector = sector_nr;
2733 r1_bio->state = 0;
2734 set_bit(R1BIO_IsSync, &r1_bio->state);
2735
2736 good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2737
2738 for (i = 0; i < conf->raid_disks * 2; i++) {
2739 struct md_rdev *rdev;
2740 bio = r1_bio->bios[i];
2741
2742 rdev = rcu_dereference(conf->mirrors[i].rdev);
2743 if (rdev == NULL ||
2744 test_bit(Faulty, &rdev->flags)) {
2745 if (i < conf->raid_disks)
2746 still_degraded = 1;
2747 } else if (!test_bit(In_sync, &rdev->flags)) {
2748 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2749 bio->bi_end_io = end_sync_write;
2750 write_targets ++;
2751 } else {
2752
2753 sector_t first_bad = MaxSector;
2754 int bad_sectors;
2755
2756 if (is_badblock(rdev, sector_nr, good_sectors,
2757 &first_bad, &bad_sectors)) {
2758 if (first_bad > sector_nr)
2759 good_sectors = first_bad - sector_nr;
2760 else {
2761 bad_sectors -= (sector_nr - first_bad);
2762 if (min_bad == 0 ||
2763 min_bad > bad_sectors)
2764 min_bad = bad_sectors;
2765 }
2766 }
2767 if (sector_nr < first_bad) {
2768 if (test_bit(WriteMostly, &rdev->flags)) {
2769 if (wonly < 0)
2770 wonly = i;
2771 } else {
2772 if (disk < 0)
2773 disk = i;
2774 }
2775 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2776 bio->bi_end_io = end_sync_read;
2777 read_targets++;
2778 } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2779 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2780 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2781
2782
2783
2784
2785
2786
2787 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2788 bio->bi_end_io = end_sync_write;
2789 write_targets++;
2790 }
2791 }
2792 if (rdev && bio->bi_end_io) {
2793 atomic_inc(&rdev->nr_pending);
2794 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2795 bio_set_dev(bio, rdev->bdev);
2796 if (test_bit(FailFast, &rdev->flags))
2797 bio->bi_opf |= MD_FAILFAST;
2798 }
2799 }
2800 rcu_read_unlock();
2801 if (disk < 0)
2802 disk = wonly;
2803 r1_bio->read_disk = disk;
2804
2805 if (read_targets == 0 && min_bad > 0) {
2806
2807
2808
2809 int ok = 1;
2810 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2811 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2812 struct md_rdev *rdev = conf->mirrors[i].rdev;
2813 ok = rdev_set_badblocks(rdev, sector_nr,
2814 min_bad, 0
2815 ) && ok;
2816 }
2817 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2818 *skipped = 1;
2819 put_buf(r1_bio);
2820
2821 if (!ok) {
2822
2823
2824
2825
2826
2827 conf->recovery_disabled = mddev->recovery_disabled;
2828 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2829 return 0;
2830 } else
2831 return min_bad;
2832
2833 }
2834 if (min_bad > 0 && min_bad < good_sectors) {
2835
2836
2837 good_sectors = min_bad;
2838 }
2839
2840 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2841
2842 write_targets += read_targets-1;
2843
2844 if (write_targets == 0 || read_targets == 0) {
2845
2846
2847
2848 sector_t rv;
2849 if (min_bad > 0)
2850 max_sector = sector_nr + min_bad;
2851 rv = max_sector - sector_nr;
2852 *skipped = 1;
2853 put_buf(r1_bio);
2854 return rv;
2855 }
2856
2857 if (max_sector > mddev->resync_max)
2858 max_sector = mddev->resync_max;
2859 if (max_sector > sector_nr + good_sectors)
2860 max_sector = sector_nr + good_sectors;
2861 nr_sectors = 0;
2862 sync_blocks = 0;
2863 do {
2864 struct page *page;
2865 int len = PAGE_SIZE;
2866 if (sector_nr + (len>>9) > max_sector)
2867 len = (max_sector - sector_nr) << 9;
2868 if (len == 0)
2869 break;
2870 if (sync_blocks == 0) {
2871 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2872 &sync_blocks, still_degraded) &&
2873 !conf->fullsync &&
2874 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2875 break;
2876 if ((len >> 9) > sync_blocks)
2877 len = sync_blocks<<9;
2878 }
2879
2880 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2881 struct resync_pages *rp;
2882
2883 bio = r1_bio->bios[i];
2884 rp = get_resync_pages(bio);
2885 if (bio->bi_end_io) {
2886 page = resync_fetch_page(rp, page_idx);
2887
2888
2889
2890
2891
2892 bio_add_page(bio, page, len, 0);
2893 }
2894 }
2895 nr_sectors += len>>9;
2896 sector_nr += len>>9;
2897 sync_blocks -= (len>>9);
2898 } while (++page_idx < RESYNC_PAGES);
2899
2900 r1_bio->sectors = nr_sectors;
2901
2902 if (mddev_is_clustered(mddev) &&
2903 conf->cluster_sync_high < sector_nr + nr_sectors) {
2904 conf->cluster_sync_low = mddev->curr_resync_completed;
2905 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2906
2907 md_cluster_ops->resync_info_update(mddev,
2908 conf->cluster_sync_low,
2909 conf->cluster_sync_high);
2910 }
2911
2912
2913
2914
2915 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2916 atomic_set(&r1_bio->remaining, read_targets);
2917 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2918 bio = r1_bio->bios[i];
2919 if (bio->bi_end_io == end_sync_read) {
2920 read_targets--;
2921 md_sync_acct_bio(bio, nr_sectors);
2922 if (read_targets == 1)
2923 bio->bi_opf &= ~MD_FAILFAST;
2924 submit_bio_noacct(bio);
2925 }
2926 }
2927 } else {
2928 atomic_set(&r1_bio->remaining, 1);
2929 bio = r1_bio->bios[r1_bio->read_disk];
2930 md_sync_acct_bio(bio, nr_sectors);
2931 if (read_targets == 1)
2932 bio->bi_opf &= ~MD_FAILFAST;
2933 submit_bio_noacct(bio);
2934 }
2935 return nr_sectors;
2936}
2937
2938static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2939{
2940 if (sectors)
2941 return sectors;
2942
2943 return mddev->dev_sectors;
2944}
2945
2946static struct r1conf *setup_conf(struct mddev *mddev)
2947{
2948 struct r1conf *conf;
2949 int i;
2950 struct raid1_info *disk;
2951 struct md_rdev *rdev;
2952 int err = -ENOMEM;
2953
2954 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2955 if (!conf)
2956 goto abort;
2957
2958 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
2959 sizeof(atomic_t), GFP_KERNEL);
2960 if (!conf->nr_pending)
2961 goto abort;
2962
2963 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
2964 sizeof(atomic_t), GFP_KERNEL);
2965 if (!conf->nr_waiting)
2966 goto abort;
2967
2968 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
2969 sizeof(atomic_t), GFP_KERNEL);
2970 if (!conf->nr_queued)
2971 goto abort;
2972
2973 conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
2974 sizeof(atomic_t), GFP_KERNEL);
2975 if (!conf->barrier)
2976 goto abort;
2977
2978 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
2979 mddev->raid_disks, 2),
2980 GFP_KERNEL);
2981 if (!conf->mirrors)
2982 goto abort;
2983
2984 conf->tmppage = alloc_page(GFP_KERNEL);
2985 if (!conf->tmppage)
2986 goto abort;
2987
2988 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2989 if (!conf->poolinfo)
2990 goto abort;
2991 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2992 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
2993 rbio_pool_free, conf->poolinfo);
2994 if (err)
2995 goto abort;
2996
2997 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
2998 if (err)
2999 goto abort;
3000
3001 conf->poolinfo->mddev = mddev;
3002
3003 err = -EINVAL;
3004 spin_lock_init(&conf->device_lock);
3005 rdev_for_each(rdev, mddev) {
3006 int disk_idx = rdev->raid_disk;
3007 if (disk_idx >= mddev->raid_disks
3008 || disk_idx < 0)
3009 continue;
3010 if (test_bit(Replacement, &rdev->flags))
3011 disk = conf->mirrors + mddev->raid_disks + disk_idx;
3012 else
3013 disk = conf->mirrors + disk_idx;
3014
3015 if (disk->rdev)
3016 goto abort;
3017 disk->rdev = rdev;
3018 disk->head_position = 0;
3019 disk->seq_start = MaxSector;
3020 }
3021 conf->raid_disks = mddev->raid_disks;
3022 conf->mddev = mddev;
3023 INIT_LIST_HEAD(&conf->retry_list);
3024 INIT_LIST_HEAD(&conf->bio_end_io_list);
3025
3026 spin_lock_init(&conf->resync_lock);
3027 init_waitqueue_head(&conf->wait_barrier);
3028
3029 bio_list_init(&conf->pending_bio_list);
3030 conf->pending_count = 0;
3031 conf->recovery_disabled = mddev->recovery_disabled - 1;
3032
3033 err = -EIO;
3034 for (i = 0; i < conf->raid_disks * 2; i++) {
3035
3036 disk = conf->mirrors + i;
3037
3038 if (i < conf->raid_disks &&
3039 disk[conf->raid_disks].rdev) {
3040
3041 if (!disk->rdev) {
3042
3043
3044
3045 disk->rdev =
3046 disk[conf->raid_disks].rdev;
3047 disk[conf->raid_disks].rdev = NULL;
3048 } else if (!test_bit(In_sync, &disk->rdev->flags))
3049
3050 goto abort;
3051 }
3052
3053 if (!disk->rdev ||
3054 !test_bit(In_sync, &disk->rdev->flags)) {
3055 disk->head_position = 0;
3056 if (disk->rdev &&
3057 (disk->rdev->saved_raid_disk < 0))
3058 conf->fullsync = 1;
3059 }
3060 }
3061
3062 err = -ENOMEM;
3063 conf->thread = md_register_thread(raid1d, mddev, "raid1");
3064 if (!conf->thread)
3065 goto abort;
3066
3067 return conf;
3068
3069 abort:
3070 if (conf) {
3071 mempool_exit(&conf->r1bio_pool);
3072 kfree(conf->mirrors);
3073 safe_put_page(conf->tmppage);
3074 kfree(conf->poolinfo);
3075 kfree(conf->nr_pending);
3076 kfree(conf->nr_waiting);
3077 kfree(conf->nr_queued);
3078 kfree(conf->barrier);
3079 bioset_exit(&conf->bio_split);
3080 kfree(conf);
3081 }
3082 return ERR_PTR(err);
3083}
3084
3085static void raid1_free(struct mddev *mddev, void *priv);
3086static int raid1_run(struct mddev *mddev)
3087{
3088 struct r1conf *conf;
3089 int i;
3090 struct md_rdev *rdev;
3091 int ret;
3092 bool discard_supported = false;
3093
3094 if (mddev->level != 1) {
3095 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3096 mdname(mddev), mddev->level);
3097 return -EIO;
3098 }
3099 if (mddev->reshape_position != MaxSector) {
3100 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3101 mdname(mddev));
3102 return -EIO;
3103 }
3104 if (mddev_init_writes_pending(mddev) < 0)
3105 return -ENOMEM;
3106
3107
3108
3109
3110
3111 if (mddev->private == NULL)
3112 conf = setup_conf(mddev);
3113 else
3114 conf = mddev->private;
3115
3116 if (IS_ERR(conf))
3117 return PTR_ERR(conf);
3118
3119 if (mddev->queue) {
3120 blk_queue_max_write_same_sectors(mddev->queue, 0);
3121 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3122 }
3123
3124 rdev_for_each(rdev, mddev) {
3125 if (!mddev->gendisk)
3126 continue;
3127 disk_stack_limits(mddev->gendisk, rdev->bdev,
3128 rdev->data_offset << 9);
3129 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3130 discard_supported = true;
3131 }
3132
3133 mddev->degraded = 0;
3134 for (i = 0; i < conf->raid_disks; i++)
3135 if (conf->mirrors[i].rdev == NULL ||
3136 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3137 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3138 mddev->degraded++;
3139
3140
3141
3142 if (conf->raid_disks - mddev->degraded < 1) {
3143 ret = -EINVAL;
3144 goto abort;
3145 }
3146
3147 if (conf->raid_disks - mddev->degraded == 1)
3148 mddev->recovery_cp = MaxSector;
3149
3150 if (mddev->recovery_cp != MaxSector)
3151 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3152 mdname(mddev));
3153 pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3154 mdname(mddev), mddev->raid_disks - mddev->degraded,
3155 mddev->raid_disks);
3156
3157
3158
3159
3160 mddev->thread = conf->thread;
3161 conf->thread = NULL;
3162 mddev->private = conf;
3163 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3164
3165 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3166
3167 if (mddev->queue) {
3168 if (discard_supported)
3169 blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3170 mddev->queue);
3171 else
3172 blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3173 mddev->queue);
3174 }
3175
3176 ret = md_integrity_register(mddev);
3177 if (ret) {
3178 md_unregister_thread(&mddev->thread);
3179 goto abort;
3180 }
3181 return 0;
3182
3183abort:
3184 raid1_free(mddev, conf);
3185 return ret;
3186}
3187
3188static void raid1_free(struct mddev *mddev, void *priv)
3189{
3190 struct r1conf *conf = priv;
3191
3192 mempool_exit(&conf->r1bio_pool);
3193 kfree(conf->mirrors);
3194 safe_put_page(conf->tmppage);
3195 kfree(conf->poolinfo);
3196 kfree(conf->nr_pending);
3197 kfree(conf->nr_waiting);
3198 kfree(conf->nr_queued);
3199 kfree(conf->barrier);
3200 bioset_exit(&conf->bio_split);
3201 kfree(conf);
3202}
3203
3204static int raid1_resize(struct mddev *mddev, sector_t sectors)
3205{
3206
3207
3208
3209
3210
3211
3212
3213 sector_t newsize = raid1_size(mddev, sectors, 0);
3214 if (mddev->external_size &&
3215 mddev->array_sectors > newsize)
3216 return -EINVAL;
3217 if (mddev->bitmap) {
3218 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
3219 if (ret)
3220 return ret;
3221 }
3222 md_set_array_sectors(mddev, newsize);
3223 if (sectors > mddev->dev_sectors &&
3224 mddev->recovery_cp > mddev->dev_sectors) {
3225 mddev->recovery_cp = mddev->dev_sectors;
3226 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3227 }
3228 mddev->dev_sectors = sectors;
3229 mddev->resync_max_sectors = sectors;
3230 return 0;
3231}
3232
3233static int raid1_reshape(struct mddev *mddev)
3234{
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246 mempool_t newpool, oldpool;
3247 struct pool_info *newpoolinfo;
3248 struct raid1_info *newmirrors;
3249 struct r1conf *conf = mddev->private;
3250 int cnt, raid_disks;
3251 unsigned long flags;
3252 int d, d2;
3253 int ret;
3254
3255 memset(&newpool, 0, sizeof(newpool));
3256 memset(&oldpool, 0, sizeof(oldpool));
3257
3258
3259 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3260 mddev->layout != mddev->new_layout ||
3261 mddev->level != mddev->new_level) {
3262 mddev->new_chunk_sectors = mddev->chunk_sectors;
3263 mddev->new_layout = mddev->layout;
3264 mddev->new_level = mddev->level;
3265 return -EINVAL;
3266 }
3267
3268 if (!mddev_is_clustered(mddev))
3269 md_allow_write(mddev);
3270
3271 raid_disks = mddev->raid_disks + mddev->delta_disks;
3272
3273 if (raid_disks < conf->raid_disks) {
3274 cnt=0;
3275 for (d= 0; d < conf->raid_disks; d++)
3276 if (conf->mirrors[d].rdev)
3277 cnt++;
3278 if (cnt > raid_disks)
3279 return -EBUSY;
3280 }
3281
3282 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3283 if (!newpoolinfo)
3284 return -ENOMEM;
3285 newpoolinfo->mddev = mddev;
3286 newpoolinfo->raid_disks = raid_disks * 2;
3287
3288 ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
3289 rbio_pool_free, newpoolinfo);
3290 if (ret) {
3291 kfree(newpoolinfo);
3292 return ret;
3293 }
3294 newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3295 raid_disks, 2),
3296 GFP_KERNEL);
3297 if (!newmirrors) {
3298 kfree(newpoolinfo);
3299 mempool_exit(&newpool);
3300 return -ENOMEM;
3301 }
3302
3303 freeze_array(conf, 0);
3304
3305
3306 oldpool = conf->r1bio_pool;
3307 conf->r1bio_pool = newpool;
3308
3309 for (d = d2 = 0; d < conf->raid_disks; d++) {
3310 struct md_rdev *rdev = conf->mirrors[d].rdev;
3311 if (rdev && rdev->raid_disk != d2) {
3312 sysfs_unlink_rdev(mddev, rdev);
3313 rdev->raid_disk = d2;
3314 sysfs_unlink_rdev(mddev, rdev);
3315 if (sysfs_link_rdev(mddev, rdev))
3316 pr_warn("md/raid1:%s: cannot register rd%d\n",
3317 mdname(mddev), rdev->raid_disk);
3318 }
3319 if (rdev)
3320 newmirrors[d2++].rdev = rdev;
3321 }
3322 kfree(conf->mirrors);
3323 conf->mirrors = newmirrors;
3324 kfree(conf->poolinfo);
3325 conf->poolinfo = newpoolinfo;
3326
3327 spin_lock_irqsave(&conf->device_lock, flags);
3328 mddev->degraded += (raid_disks - conf->raid_disks);
3329 spin_unlock_irqrestore(&conf->device_lock, flags);
3330 conf->raid_disks = mddev->raid_disks = raid_disks;
3331 mddev->delta_disks = 0;
3332
3333 unfreeze_array(conf);
3334
3335 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3336 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3337 md_wakeup_thread(mddev->thread);
3338
3339 mempool_exit(&oldpool);
3340 return 0;
3341}
3342
3343static void raid1_quiesce(struct mddev *mddev, int quiesce)
3344{
3345 struct r1conf *conf = mddev->private;
3346
3347 if (quiesce)
3348 freeze_array(conf, 0);
3349 else
3350 unfreeze_array(conf);
3351}
3352
3353static void *raid1_takeover(struct mddev *mddev)
3354{
3355
3356
3357
3358 if (mddev->level == 5 && mddev->raid_disks == 2) {
3359 struct r1conf *conf;
3360 mddev->new_level = 1;
3361 mddev->new_layout = 0;
3362 mddev->new_chunk_sectors = 0;
3363 conf = setup_conf(mddev);
3364 if (!IS_ERR(conf)) {
3365
3366 conf->array_frozen = 1;
3367 mddev_clear_unsupported_flags(mddev,
3368 UNSUPPORTED_MDDEV_FLAGS);
3369 }
3370 return conf;
3371 }
3372 return ERR_PTR(-EINVAL);
3373}
3374
3375static struct md_personality raid1_personality =
3376{
3377 .name = "raid1",
3378 .level = 1,
3379 .owner = THIS_MODULE,
3380 .make_request = raid1_make_request,
3381 .run = raid1_run,
3382 .free = raid1_free,
3383 .status = raid1_status,
3384 .error_handler = raid1_error,
3385 .hot_add_disk = raid1_add_disk,
3386 .hot_remove_disk= raid1_remove_disk,
3387 .spare_active = raid1_spare_active,
3388 .sync_request = raid1_sync_request,
3389 .resize = raid1_resize,
3390 .size = raid1_size,
3391 .check_reshape = raid1_reshape,
3392 .quiesce = raid1_quiesce,
3393 .takeover = raid1_takeover,
3394};
3395
3396static int __init raid_init(void)
3397{
3398 return register_md_personality(&raid1_personality);
3399}
3400
3401static void raid_exit(void)
3402{
3403 unregister_md_personality(&raid1_personality);
3404}
3405
3406module_init(raid_init);
3407module_exit(raid_exit);
3408MODULE_LICENSE("GPL");
3409MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3410MODULE_ALIAS("md-personality-3");
3411MODULE_ALIAS("md-raid1");
3412MODULE_ALIAS("md-level-1");
3413
3414module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
3415