1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/blkdev.h>
29#include <linux/module.h>
30#include <linux/seq_file.h>
31#include <linux/ratelimit.h>
32#include <linux/interval_tree_generic.h>
33
34#include <trace/events/block.h>
35
36#include "md.h"
37#include "raid1.h"
38#include "md-bitmap.h"
39
40#define UNSUPPORTED_MDDEV_FLAGS \
41 ((1L << MD_HAS_JOURNAL) | \
42 (1L << MD_JOURNAL_CLEAN) | \
43 (1L << MD_HAS_PPL) | \
44 (1L << MD_HAS_MULTIPLE_PPLS))
45
46static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
48
49#define raid1_log(md, fmt, args...) \
50 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
51
52#include "raid1-10.c"
53
54#define START(node) ((node)->start)
55#define LAST(node) ((node)->last)
56INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
57 START, LAST, static inline, raid1_rb);
58
59static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
60 struct serial_info *si, int idx)
61{
62 unsigned long flags;
63 int ret = 0;
64 sector_t lo = r1_bio->sector;
65 sector_t hi = lo + r1_bio->sectors;
66 struct serial_in_rdev *serial = &rdev->serial[idx];
67
68 spin_lock_irqsave(&serial->serial_lock, flags);
69
70 if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
71 ret = -EBUSY;
72 else {
73 si->start = lo;
74 si->last = hi;
75 raid1_rb_insert(si, &serial->serial_rb);
76 }
77 spin_unlock_irqrestore(&serial->serial_lock, flags);
78
79 return ret;
80}
81
82static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
83{
84 struct mddev *mddev = rdev->mddev;
85 struct serial_info *si;
86 int idx = sector_to_idx(r1_bio->sector);
87 struct serial_in_rdev *serial = &rdev->serial[idx];
88
89 if (WARN_ON(!mddev->serial_info_pool))
90 return;
91 si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
92 wait_event(serial->serial_io_wait,
93 check_and_add_serial(rdev, r1_bio, si, idx) == 0);
94}
95
96static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
97{
98 struct serial_info *si;
99 unsigned long flags;
100 int found = 0;
101 struct mddev *mddev = rdev->mddev;
102 int idx = sector_to_idx(lo);
103 struct serial_in_rdev *serial = &rdev->serial[idx];
104
105 spin_lock_irqsave(&serial->serial_lock, flags);
106 for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
107 si; si = raid1_rb_iter_next(si, lo, hi)) {
108 if (si->start == lo && si->last == hi) {
109 raid1_rb_remove(si, &serial->serial_rb);
110 mempool_free(si, mddev->serial_info_pool);
111 found = 1;
112 break;
113 }
114 }
115 if (!found)
116 WARN(1, "The write IO is not recorded for serialization\n");
117 spin_unlock_irqrestore(&serial->serial_lock, flags);
118 wake_up(&serial->serial_io_wait);
119}
120
121
122
123
124
125static inline struct r1bio *get_resync_r1bio(struct bio *bio)
126{
127 return get_resync_pages(bio)->raid_bio;
128}
129
130static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
131{
132 struct pool_info *pi = data;
133 int size = offsetof(struct r1bio, bios[pi->raid_disks]);
134
135
136 return kzalloc(size, gfp_flags);
137}
138
139#define RESYNC_DEPTH 32
140#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
141#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
142#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
143#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
144#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
145
146static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
147{
148 struct pool_info *pi = data;
149 struct r1bio *r1_bio;
150 struct bio *bio;
151 int need_pages;
152 int j;
153 struct resync_pages *rps;
154
155 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
156 if (!r1_bio)
157 return NULL;
158
159 rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
160 gfp_flags);
161 if (!rps)
162 goto out_free_r1bio;
163
164
165
166
167 for (j = pi->raid_disks ; j-- ; ) {
168 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
169 if (!bio)
170 goto out_free_bio;
171 r1_bio->bios[j] = bio;
172 }
173
174
175
176
177
178
179 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
180 need_pages = pi->raid_disks;
181 else
182 need_pages = 1;
183 for (j = 0; j < pi->raid_disks; j++) {
184 struct resync_pages *rp = &rps[j];
185
186 bio = r1_bio->bios[j];
187
188 if (j < need_pages) {
189 if (resync_alloc_pages(rp, gfp_flags))
190 goto out_free_pages;
191 } else {
192 memcpy(rp, &rps[0], sizeof(*rp));
193 resync_get_all_pages(rp);
194 }
195
196 rp->raid_bio = r1_bio;
197 bio->bi_private = rp;
198 }
199
200 r1_bio->master_bio = NULL;
201
202 return r1_bio;
203
204out_free_pages:
205 while (--j >= 0)
206 resync_free_pages(&rps[j]);
207
208out_free_bio:
209 while (++j < pi->raid_disks)
210 bio_put(r1_bio->bios[j]);
211 kfree(rps);
212
213out_free_r1bio:
214 rbio_pool_free(r1_bio, data);
215 return NULL;
216}
217
218static void r1buf_pool_free(void *__r1_bio, void *data)
219{
220 struct pool_info *pi = data;
221 int i;
222 struct r1bio *r1bio = __r1_bio;
223 struct resync_pages *rp = NULL;
224
225 for (i = pi->raid_disks; i--; ) {
226 rp = get_resync_pages(r1bio->bios[i]);
227 resync_free_pages(rp);
228 bio_put(r1bio->bios[i]);
229 }
230
231
232 kfree(rp);
233
234 rbio_pool_free(r1bio, data);
235}
236
237static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
238{
239 int i;
240
241 for (i = 0; i < conf->raid_disks * 2; i++) {
242 struct bio **bio = r1_bio->bios + i;
243 if (!BIO_SPECIAL(*bio))
244 bio_put(*bio);
245 *bio = NULL;
246 }
247}
248
249static void free_r1bio(struct r1bio *r1_bio)
250{
251 struct r1conf *conf = r1_bio->mddev->private;
252
253 put_all_bios(conf, r1_bio);
254 mempool_free(r1_bio, &conf->r1bio_pool);
255}
256
257static void put_buf(struct r1bio *r1_bio)
258{
259 struct r1conf *conf = r1_bio->mddev->private;
260 sector_t sect = r1_bio->sector;
261 int i;
262
263 for (i = 0; i < conf->raid_disks * 2; i++) {
264 struct bio *bio = r1_bio->bios[i];
265 if (bio->bi_end_io)
266 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
267 }
268
269 mempool_free(r1_bio, &conf->r1buf_pool);
270
271 lower_barrier(conf, sect);
272}
273
274static void reschedule_retry(struct r1bio *r1_bio)
275{
276 unsigned long flags;
277 struct mddev *mddev = r1_bio->mddev;
278 struct r1conf *conf = mddev->private;
279 int idx;
280
281 idx = sector_to_idx(r1_bio->sector);
282 spin_lock_irqsave(&conf->device_lock, flags);
283 list_add(&r1_bio->retry_list, &conf->retry_list);
284 atomic_inc(&conf->nr_queued[idx]);
285 spin_unlock_irqrestore(&conf->device_lock, flags);
286
287 wake_up(&conf->wait_barrier);
288 md_wakeup_thread(mddev->thread);
289}
290
291
292
293
294
295
296static void call_bio_endio(struct r1bio *r1_bio)
297{
298 struct bio *bio = r1_bio->master_bio;
299
300 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
301 bio->bi_status = BLK_STS_IOERR;
302
303 if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
304 bio_end_io_acct(bio, r1_bio->start_time);
305 bio_endio(bio);
306}
307
308static void raid_end_bio_io(struct r1bio *r1_bio)
309{
310 struct bio *bio = r1_bio->master_bio;
311 struct r1conf *conf = r1_bio->mddev->private;
312
313
314 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
315 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
316 (bio_data_dir(bio) == WRITE) ? "write" : "read",
317 (unsigned long long) bio->bi_iter.bi_sector,
318 (unsigned long long) bio_end_sector(bio) - 1);
319
320 call_bio_endio(r1_bio);
321 }
322
323
324
325
326 allow_barrier(conf, r1_bio->sector);
327
328 free_r1bio(r1_bio);
329}
330
331
332
333
334static inline void update_head_pos(int disk, struct r1bio *r1_bio)
335{
336 struct r1conf *conf = r1_bio->mddev->private;
337
338 conf->mirrors[disk].head_position =
339 r1_bio->sector + (r1_bio->sectors);
340}
341
342
343
344
345static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
346{
347 int mirror;
348 struct r1conf *conf = r1_bio->mddev->private;
349 int raid_disks = conf->raid_disks;
350
351 for (mirror = 0; mirror < raid_disks * 2; mirror++)
352 if (r1_bio->bios[mirror] == bio)
353 break;
354
355 BUG_ON(mirror == raid_disks * 2);
356 update_head_pos(mirror, r1_bio);
357
358 return mirror;
359}
360
361static void raid1_end_read_request(struct bio *bio)
362{
363 int uptodate = !bio->bi_status;
364 struct r1bio *r1_bio = bio->bi_private;
365 struct r1conf *conf = r1_bio->mddev->private;
366 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
367
368
369
370
371 update_head_pos(r1_bio->read_disk, r1_bio);
372
373 if (uptodate)
374 set_bit(R1BIO_Uptodate, &r1_bio->state);
375 else if (test_bit(FailFast, &rdev->flags) &&
376 test_bit(R1BIO_FailFast, &r1_bio->state))
377
378
379 ;
380 else {
381
382
383
384
385 unsigned long flags;
386 spin_lock_irqsave(&conf->device_lock, flags);
387 if (r1_bio->mddev->degraded == conf->raid_disks ||
388 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
389 test_bit(In_sync, &rdev->flags)))
390 uptodate = 1;
391 spin_unlock_irqrestore(&conf->device_lock, flags);
392 }
393
394 if (uptodate) {
395 raid_end_bio_io(r1_bio);
396 rdev_dec_pending(rdev, conf->mddev);
397 } else {
398
399
400
401 char b[BDEVNAME_SIZE];
402 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
403 mdname(conf->mddev),
404 bdevname(rdev->bdev, b),
405 (unsigned long long)r1_bio->sector);
406 set_bit(R1BIO_ReadError, &r1_bio->state);
407 reschedule_retry(r1_bio);
408
409 }
410}
411
412static void close_write(struct r1bio *r1_bio)
413{
414
415 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
416 bio_free_pages(r1_bio->behind_master_bio);
417 bio_put(r1_bio->behind_master_bio);
418 r1_bio->behind_master_bio = NULL;
419 }
420
421 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
422 r1_bio->sectors,
423 !test_bit(R1BIO_Degraded, &r1_bio->state),
424 test_bit(R1BIO_BehindIO, &r1_bio->state));
425 md_write_end(r1_bio->mddev);
426}
427
428static void r1_bio_write_done(struct r1bio *r1_bio)
429{
430 if (!atomic_dec_and_test(&r1_bio->remaining))
431 return;
432
433 if (test_bit(R1BIO_WriteError, &r1_bio->state))
434 reschedule_retry(r1_bio);
435 else {
436 close_write(r1_bio);
437 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
438 reschedule_retry(r1_bio);
439 else
440 raid_end_bio_io(r1_bio);
441 }
442}
443
444static void raid1_end_write_request(struct bio *bio)
445{
446 struct r1bio *r1_bio = bio->bi_private;
447 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
448 struct r1conf *conf = r1_bio->mddev->private;
449 struct bio *to_put = NULL;
450 int mirror = find_bio_disk(r1_bio, bio);
451 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
452 bool discard_error;
453 sector_t lo = r1_bio->sector;
454 sector_t hi = r1_bio->sector + r1_bio->sectors;
455
456 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
457
458
459
460
461 if (bio->bi_status && !discard_error) {
462 set_bit(WriteErrorSeen, &rdev->flags);
463 if (!test_and_set_bit(WantReplacement, &rdev->flags))
464 set_bit(MD_RECOVERY_NEEDED, &
465 conf->mddev->recovery);
466
467 if (test_bit(FailFast, &rdev->flags) &&
468 (bio->bi_opf & MD_FAILFAST) &&
469
470 !test_bit(WriteMostly, &rdev->flags)) {
471 md_error(r1_bio->mddev, rdev);
472 }
473
474
475
476
477
478 if (!test_bit(Faulty, &rdev->flags))
479 set_bit(R1BIO_WriteError, &r1_bio->state);
480 else {
481
482 set_bit(R1BIO_Degraded, &r1_bio->state);
483
484 r1_bio->bios[mirror] = NULL;
485 to_put = bio;
486 }
487 } else {
488
489
490
491
492
493
494
495
496
497
498 sector_t first_bad;
499 int bad_sectors;
500
501 r1_bio->bios[mirror] = NULL;
502 to_put = bio;
503
504
505
506
507
508
509
510
511 if (test_bit(In_sync, &rdev->flags) &&
512 !test_bit(Faulty, &rdev->flags))
513 set_bit(R1BIO_Uptodate, &r1_bio->state);
514
515
516 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
517 &first_bad, &bad_sectors) && !discard_error) {
518 r1_bio->bios[mirror] = IO_MADE_GOOD;
519 set_bit(R1BIO_MadeGood, &r1_bio->state);
520 }
521 }
522
523 if (behind) {
524 if (test_bit(CollisionCheck, &rdev->flags))
525 remove_serial(rdev, lo, hi);
526 if (test_bit(WriteMostly, &rdev->flags))
527 atomic_dec(&r1_bio->behind_remaining);
528
529
530
531
532
533
534
535
536 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
537 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
538
539 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
540 struct bio *mbio = r1_bio->master_bio;
541 pr_debug("raid1: behind end write sectors"
542 " %llu-%llu\n",
543 (unsigned long long) mbio->bi_iter.bi_sector,
544 (unsigned long long) bio_end_sector(mbio) - 1);
545 call_bio_endio(r1_bio);
546 }
547 }
548 } else if (rdev->mddev->serialize_policy)
549 remove_serial(rdev, lo, hi);
550 if (r1_bio->bios[mirror] == NULL)
551 rdev_dec_pending(rdev, conf->mddev);
552
553
554
555
556
557 r1_bio_write_done(r1_bio);
558
559 if (to_put)
560 bio_put(to_put);
561}
562
563static sector_t align_to_barrier_unit_end(sector_t start_sector,
564 sector_t sectors)
565{
566 sector_t len;
567
568 WARN_ON(sectors == 0);
569
570
571
572
573 len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
574 start_sector;
575
576 if (len > sectors)
577 len = sectors;
578
579 return len;
580}
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
597{
598 const sector_t this_sector = r1_bio->sector;
599 int sectors;
600 int best_good_sectors;
601 int best_disk, best_dist_disk, best_pending_disk;
602 int has_nonrot_disk;
603 int disk;
604 sector_t best_dist;
605 unsigned int min_pending;
606 struct md_rdev *rdev;
607 int choose_first;
608 int choose_next_idle;
609
610 rcu_read_lock();
611
612
613
614
615
616 retry:
617 sectors = r1_bio->sectors;
618 best_disk = -1;
619 best_dist_disk = -1;
620 best_dist = MaxSector;
621 best_pending_disk = -1;
622 min_pending = UINT_MAX;
623 best_good_sectors = 0;
624 has_nonrot_disk = 0;
625 choose_next_idle = 0;
626 clear_bit(R1BIO_FailFast, &r1_bio->state);
627
628 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
629 (mddev_is_clustered(conf->mddev) &&
630 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
631 this_sector + sectors)))
632 choose_first = 1;
633 else
634 choose_first = 0;
635
636 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
637 sector_t dist;
638 sector_t first_bad;
639 int bad_sectors;
640 unsigned int pending;
641 bool nonrot;
642
643 rdev = rcu_dereference(conf->mirrors[disk].rdev);
644 if (r1_bio->bios[disk] == IO_BLOCKED
645 || rdev == NULL
646 || test_bit(Faulty, &rdev->flags))
647 continue;
648 if (!test_bit(In_sync, &rdev->flags) &&
649 rdev->recovery_offset < this_sector + sectors)
650 continue;
651 if (test_bit(WriteMostly, &rdev->flags)) {
652
653
654 if (best_dist_disk < 0) {
655 if (is_badblock(rdev, this_sector, sectors,
656 &first_bad, &bad_sectors)) {
657 if (first_bad <= this_sector)
658
659 continue;
660 best_good_sectors = first_bad - this_sector;
661 } else
662 best_good_sectors = sectors;
663 best_dist_disk = disk;
664 best_pending_disk = disk;
665 }
666 continue;
667 }
668
669
670
671 if (is_badblock(rdev, this_sector, sectors,
672 &first_bad, &bad_sectors)) {
673 if (best_dist < MaxSector)
674
675 continue;
676 if (first_bad <= this_sector) {
677
678
679
680
681 bad_sectors -= (this_sector - first_bad);
682 if (choose_first && sectors > bad_sectors)
683 sectors = bad_sectors;
684 if (best_good_sectors > sectors)
685 best_good_sectors = sectors;
686
687 } else {
688 sector_t good_sectors = first_bad - this_sector;
689 if (good_sectors > best_good_sectors) {
690 best_good_sectors = good_sectors;
691 best_disk = disk;
692 }
693 if (choose_first)
694 break;
695 }
696 continue;
697 } else {
698 if ((sectors > best_good_sectors) && (best_disk >= 0))
699 best_disk = -1;
700 best_good_sectors = sectors;
701 }
702
703 if (best_disk >= 0)
704
705 set_bit(R1BIO_FailFast, &r1_bio->state);
706
707 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
708 has_nonrot_disk |= nonrot;
709 pending = atomic_read(&rdev->nr_pending);
710 dist = abs(this_sector - conf->mirrors[disk].head_position);
711 if (choose_first) {
712 best_disk = disk;
713 break;
714 }
715
716 if (conf->mirrors[disk].next_seq_sect == this_sector
717 || dist == 0) {
718 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
719 struct raid1_info *mirror = &conf->mirrors[disk];
720
721 best_disk = disk;
722
723
724
725
726
727
728
729
730
731
732
733
734
735 if (nonrot && opt_iosize > 0 &&
736 mirror->seq_start != MaxSector &&
737 mirror->next_seq_sect > opt_iosize &&
738 mirror->next_seq_sect - opt_iosize >=
739 mirror->seq_start) {
740 choose_next_idle = 1;
741 continue;
742 }
743 break;
744 }
745
746 if (choose_next_idle)
747 continue;
748
749 if (min_pending > pending) {
750 min_pending = pending;
751 best_pending_disk = disk;
752 }
753
754 if (dist < best_dist) {
755 best_dist = dist;
756 best_dist_disk = disk;
757 }
758 }
759
760
761
762
763
764
765
766 if (best_disk == -1) {
767 if (has_nonrot_disk || min_pending == 0)
768 best_disk = best_pending_disk;
769 else
770 best_disk = best_dist_disk;
771 }
772
773 if (best_disk >= 0) {
774 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
775 if (!rdev)
776 goto retry;
777 atomic_inc(&rdev->nr_pending);
778 sectors = best_good_sectors;
779
780 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
781 conf->mirrors[best_disk].seq_start = this_sector;
782
783 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
784 }
785 rcu_read_unlock();
786 *max_sectors = sectors;
787
788 return best_disk;
789}
790
791static void flush_bio_list(struct r1conf *conf, struct bio *bio)
792{
793
794 md_bitmap_unplug(conf->mddev->bitmap);
795 wake_up(&conf->wait_barrier);
796
797 while (bio) {
798 struct bio *next = bio->bi_next;
799 struct md_rdev *rdev = (void *)bio->bi_bdev;
800 bio->bi_next = NULL;
801 bio_set_dev(bio, rdev->bdev);
802 if (test_bit(Faulty, &rdev->flags)) {
803 bio_io_error(bio);
804 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
805 !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
806
807 bio_endio(bio);
808 else
809 submit_bio_noacct(bio);
810 bio = next;
811 cond_resched();
812 }
813}
814
815static void flush_pending_writes(struct r1conf *conf)
816{
817
818
819
820 spin_lock_irq(&conf->device_lock);
821
822 if (conf->pending_bio_list.head) {
823 struct blk_plug plug;
824 struct bio *bio;
825
826 bio = bio_list_get(&conf->pending_bio_list);
827 conf->pending_count = 0;
828 spin_unlock_irq(&conf->device_lock);
829
830
831
832
833
834
835
836
837
838
839 __set_current_state(TASK_RUNNING);
840 blk_start_plug(&plug);
841 flush_bio_list(conf, bio);
842 blk_finish_plug(&plug);
843 } else
844 spin_unlock_irq(&conf->device_lock);
845}
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
872{
873 int idx = sector_to_idx(sector_nr);
874
875 spin_lock_irq(&conf->resync_lock);
876
877
878 wait_event_lock_irq(conf->wait_barrier,
879 !atomic_read(&conf->nr_waiting[idx]),
880 conf->resync_lock);
881
882
883 atomic_inc(&conf->barrier[idx]);
884
885
886
887
888
889
890
891
892 smp_mb__after_atomic();
893
894
895
896
897
898
899
900
901 wait_event_lock_irq(conf->wait_barrier,
902 (!conf->array_frozen &&
903 !atomic_read(&conf->nr_pending[idx]) &&
904 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
905 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
906 conf->resync_lock);
907
908 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
909 atomic_dec(&conf->barrier[idx]);
910 spin_unlock_irq(&conf->resync_lock);
911 wake_up(&conf->wait_barrier);
912 return -EINTR;
913 }
914
915 atomic_inc(&conf->nr_sync_pending);
916 spin_unlock_irq(&conf->resync_lock);
917
918 return 0;
919}
920
921static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
922{
923 int idx = sector_to_idx(sector_nr);
924
925 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
926
927 atomic_dec(&conf->barrier[idx]);
928 atomic_dec(&conf->nr_sync_pending);
929 wake_up(&conf->wait_barrier);
930}
931
932static void _wait_barrier(struct r1conf *conf, int idx)
933{
934
935
936
937
938
939
940
941
942 atomic_inc(&conf->nr_pending[idx]);
943
944
945
946
947
948
949
950
951 smp_mb__after_atomic();
952
953
954
955
956
957
958
959
960
961
962 if (!READ_ONCE(conf->array_frozen) &&
963 !atomic_read(&conf->barrier[idx]))
964 return;
965
966
967
968
969
970
971
972
973 spin_lock_irq(&conf->resync_lock);
974 atomic_inc(&conf->nr_waiting[idx]);
975 atomic_dec(&conf->nr_pending[idx]);
976
977
978
979
980 wake_up(&conf->wait_barrier);
981
982 wait_event_lock_irq(conf->wait_barrier,
983 !conf->array_frozen &&
984 !atomic_read(&conf->barrier[idx]),
985 conf->resync_lock);
986 atomic_inc(&conf->nr_pending[idx]);
987 atomic_dec(&conf->nr_waiting[idx]);
988 spin_unlock_irq(&conf->resync_lock);
989}
990
991static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
992{
993 int idx = sector_to_idx(sector_nr);
994
995
996
997
998
999
1000
1001
1002 atomic_inc(&conf->nr_pending[idx]);
1003
1004 if (!READ_ONCE(conf->array_frozen))
1005 return;
1006
1007 spin_lock_irq(&conf->resync_lock);
1008 atomic_inc(&conf->nr_waiting[idx]);
1009 atomic_dec(&conf->nr_pending[idx]);
1010
1011
1012
1013
1014 wake_up(&conf->wait_barrier);
1015
1016 wait_event_lock_irq(conf->wait_barrier,
1017 !conf->array_frozen,
1018 conf->resync_lock);
1019 atomic_inc(&conf->nr_pending[idx]);
1020 atomic_dec(&conf->nr_waiting[idx]);
1021 spin_unlock_irq(&conf->resync_lock);
1022}
1023
1024static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
1025{
1026 int idx = sector_to_idx(sector_nr);
1027
1028 _wait_barrier(conf, idx);
1029}
1030
1031static void _allow_barrier(struct r1conf *conf, int idx)
1032{
1033 atomic_dec(&conf->nr_pending[idx]);
1034 wake_up(&conf->wait_barrier);
1035}
1036
1037static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1038{
1039 int idx = sector_to_idx(sector_nr);
1040
1041 _allow_barrier(conf, idx);
1042}
1043
1044
1045static int get_unqueued_pending(struct r1conf *conf)
1046{
1047 int idx, ret;
1048
1049 ret = atomic_read(&conf->nr_sync_pending);
1050 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1051 ret += atomic_read(&conf->nr_pending[idx]) -
1052 atomic_read(&conf->nr_queued[idx]);
1053
1054 return ret;
1055}
1056
1057static void freeze_array(struct r1conf *conf, int extra)
1058{
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 spin_lock_irq(&conf->resync_lock);
1083 conf->array_frozen = 1;
1084 raid1_log(conf->mddev, "wait freeze");
1085 wait_event_lock_irq_cmd(
1086 conf->wait_barrier,
1087 get_unqueued_pending(conf) == extra,
1088 conf->resync_lock,
1089 flush_pending_writes(conf));
1090 spin_unlock_irq(&conf->resync_lock);
1091}
1092static void unfreeze_array(struct r1conf *conf)
1093{
1094
1095 spin_lock_irq(&conf->resync_lock);
1096 conf->array_frozen = 0;
1097 spin_unlock_irq(&conf->resync_lock);
1098 wake_up(&conf->wait_barrier);
1099}
1100
1101static void alloc_behind_master_bio(struct r1bio *r1_bio,
1102 struct bio *bio)
1103{
1104 int size = bio->bi_iter.bi_size;
1105 unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1106 int i = 0;
1107 struct bio *behind_bio = NULL;
1108
1109 behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set);
1110 if (!behind_bio)
1111 return;
1112
1113
1114 if (!bio_has_data(bio)) {
1115 behind_bio->bi_iter.bi_size = size;
1116 goto skip_copy;
1117 }
1118
1119 behind_bio->bi_write_hint = bio->bi_write_hint;
1120
1121 while (i < vcnt && size) {
1122 struct page *page;
1123 int len = min_t(int, PAGE_SIZE, size);
1124
1125 page = alloc_page(GFP_NOIO);
1126 if (unlikely(!page))
1127 goto free_pages;
1128
1129 bio_add_page(behind_bio, page, len, 0);
1130
1131 size -= len;
1132 i++;
1133 }
1134
1135 bio_copy_data(behind_bio, bio);
1136skip_copy:
1137 r1_bio->behind_master_bio = behind_bio;
1138 set_bit(R1BIO_BehindIO, &r1_bio->state);
1139
1140 return;
1141
1142free_pages:
1143 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1144 bio->bi_iter.bi_size);
1145 bio_free_pages(behind_bio);
1146 bio_put(behind_bio);
1147}
1148
1149struct raid1_plug_cb {
1150 struct blk_plug_cb cb;
1151 struct bio_list pending;
1152 int pending_cnt;
1153};
1154
1155static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1156{
1157 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1158 cb);
1159 struct mddev *mddev = plug->cb.data;
1160 struct r1conf *conf = mddev->private;
1161 struct bio *bio;
1162
1163 if (from_schedule || current->bio_list) {
1164 spin_lock_irq(&conf->device_lock);
1165 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1166 conf->pending_count += plug->pending_cnt;
1167 spin_unlock_irq(&conf->device_lock);
1168 wake_up(&conf->wait_barrier);
1169 md_wakeup_thread(mddev->thread);
1170 kfree(plug);
1171 return;
1172 }
1173
1174
1175 bio = bio_list_get(&plug->pending);
1176 flush_bio_list(conf, bio);
1177 kfree(plug);
1178}
1179
1180static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1181{
1182 r1_bio->master_bio = bio;
1183 r1_bio->sectors = bio_sectors(bio);
1184 r1_bio->state = 0;
1185 r1_bio->mddev = mddev;
1186 r1_bio->sector = bio->bi_iter.bi_sector;
1187}
1188
1189static inline struct r1bio *
1190alloc_r1bio(struct mddev *mddev, struct bio *bio)
1191{
1192 struct r1conf *conf = mddev->private;
1193 struct r1bio *r1_bio;
1194
1195 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1196
1197 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1198 init_r1bio(r1_bio, mddev, bio);
1199 return r1_bio;
1200}
1201
1202static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1203 int max_read_sectors, struct r1bio *r1_bio)
1204{
1205 struct r1conf *conf = mddev->private;
1206 struct raid1_info *mirror;
1207 struct bio *read_bio;
1208 struct bitmap *bitmap = mddev->bitmap;
1209 const int op = bio_op(bio);
1210 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1211 int max_sectors;
1212 int rdisk;
1213 bool r1bio_existed = !!r1_bio;
1214 char b[BDEVNAME_SIZE];
1215
1216
1217
1218
1219
1220
1221 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1222
1223 if (r1bio_existed) {
1224
1225 struct md_rdev *rdev;
1226 rcu_read_lock();
1227 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1228 if (rdev)
1229 bdevname(rdev->bdev, b);
1230 else
1231 strcpy(b, "???");
1232 rcu_read_unlock();
1233 }
1234
1235
1236
1237
1238
1239 wait_read_barrier(conf, bio->bi_iter.bi_sector);
1240
1241 if (!r1_bio)
1242 r1_bio = alloc_r1bio(mddev, bio);
1243 else
1244 init_r1bio(r1_bio, mddev, bio);
1245 r1_bio->sectors = max_read_sectors;
1246
1247
1248
1249
1250
1251 rdisk = read_balance(conf, r1_bio, &max_sectors);
1252
1253 if (rdisk < 0) {
1254
1255 if (r1bio_existed) {
1256 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1257 mdname(mddev),
1258 b,
1259 (unsigned long long)r1_bio->sector);
1260 }
1261 raid_end_bio_io(r1_bio);
1262 return;
1263 }
1264 mirror = conf->mirrors + rdisk;
1265
1266 if (r1bio_existed)
1267 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
1268 mdname(mddev),
1269 (unsigned long long)r1_bio->sector,
1270 bdevname(mirror->rdev->bdev, b));
1271
1272 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1273 bitmap) {
1274
1275
1276
1277
1278 raid1_log(mddev, "wait behind writes");
1279 wait_event(bitmap->behind_wait,
1280 atomic_read(&bitmap->behind_writes) == 0);
1281 }
1282
1283 if (max_sectors < bio_sectors(bio)) {
1284 struct bio *split = bio_split(bio, max_sectors,
1285 gfp, &conf->bio_split);
1286 bio_chain(split, bio);
1287 submit_bio_noacct(bio);
1288 bio = split;
1289 r1_bio->master_bio = bio;
1290 r1_bio->sectors = max_sectors;
1291 }
1292
1293 r1_bio->read_disk = rdisk;
1294
1295 if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
1296 r1_bio->start_time = bio_start_io_acct(bio);
1297
1298 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
1299
1300 r1_bio->bios[rdisk] = read_bio;
1301
1302 read_bio->bi_iter.bi_sector = r1_bio->sector +
1303 mirror->rdev->data_offset;
1304 bio_set_dev(read_bio, mirror->rdev->bdev);
1305 read_bio->bi_end_io = raid1_end_read_request;
1306 bio_set_op_attrs(read_bio, op, do_sync);
1307 if (test_bit(FailFast, &mirror->rdev->flags) &&
1308 test_bit(R1BIO_FailFast, &r1_bio->state))
1309 read_bio->bi_opf |= MD_FAILFAST;
1310 read_bio->bi_private = r1_bio;
1311
1312 if (mddev->gendisk)
1313 trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
1314 r1_bio->sector);
1315
1316 submit_bio_noacct(read_bio);
1317}
1318
1319static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1320 int max_write_sectors)
1321{
1322 struct r1conf *conf = mddev->private;
1323 struct r1bio *r1_bio;
1324 int i, disks;
1325 struct bitmap *bitmap = mddev->bitmap;
1326 unsigned long flags;
1327 struct md_rdev *blocked_rdev;
1328 struct blk_plug_cb *cb;
1329 struct raid1_plug_cb *plug = NULL;
1330 int first_clone;
1331 int max_sectors;
1332
1333 if (mddev_is_clustered(mddev) &&
1334 md_cluster_ops->area_resyncing(mddev, WRITE,
1335 bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1336
1337 DEFINE_WAIT(w);
1338 for (;;) {
1339 prepare_to_wait(&conf->wait_barrier,
1340 &w, TASK_IDLE);
1341 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1342 bio->bi_iter.bi_sector,
1343 bio_end_sector(bio)))
1344 break;
1345 schedule();
1346 }
1347 finish_wait(&conf->wait_barrier, &w);
1348 }
1349
1350
1351
1352
1353
1354
1355 wait_barrier(conf, bio->bi_iter.bi_sector);
1356
1357 r1_bio = alloc_r1bio(mddev, bio);
1358 r1_bio->sectors = max_write_sectors;
1359
1360 if (conf->pending_count >= max_queued_requests) {
1361 md_wakeup_thread(mddev->thread);
1362 raid1_log(mddev, "wait queued");
1363 wait_event(conf->wait_barrier,
1364 conf->pending_count < max_queued_requests);
1365 }
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 disks = conf->raid_disks * 2;
1378 retry_write:
1379 blocked_rdev = NULL;
1380 rcu_read_lock();
1381 max_sectors = r1_bio->sectors;
1382 for (i = 0; i < disks; i++) {
1383 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1384 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1385 atomic_inc(&rdev->nr_pending);
1386 blocked_rdev = rdev;
1387 break;
1388 }
1389 r1_bio->bios[i] = NULL;
1390 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1391 if (i < conf->raid_disks)
1392 set_bit(R1BIO_Degraded, &r1_bio->state);
1393 continue;
1394 }
1395
1396 atomic_inc(&rdev->nr_pending);
1397 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1398 sector_t first_bad;
1399 int bad_sectors;
1400 int is_bad;
1401
1402 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1403 &first_bad, &bad_sectors);
1404 if (is_bad < 0) {
1405
1406
1407 set_bit(BlockedBadBlocks, &rdev->flags);
1408 blocked_rdev = rdev;
1409 break;
1410 }
1411 if (is_bad && first_bad <= r1_bio->sector) {
1412
1413 bad_sectors -= (r1_bio->sector - first_bad);
1414 if (bad_sectors < max_sectors)
1415
1416
1417
1418 max_sectors = bad_sectors;
1419 rdev_dec_pending(rdev, mddev);
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430 continue;
1431 }
1432 if (is_bad) {
1433 int good_sectors = first_bad - r1_bio->sector;
1434 if (good_sectors < max_sectors)
1435 max_sectors = good_sectors;
1436 }
1437 }
1438 r1_bio->bios[i] = bio;
1439 }
1440 rcu_read_unlock();
1441
1442 if (unlikely(blocked_rdev)) {
1443
1444 int j;
1445
1446 for (j = 0; j < i; j++)
1447 if (r1_bio->bios[j])
1448 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1449 r1_bio->state = 0;
1450 allow_barrier(conf, bio->bi_iter.bi_sector);
1451 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1452 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1453 wait_barrier(conf, bio->bi_iter.bi_sector);
1454 goto retry_write;
1455 }
1456
1457 if (max_sectors < bio_sectors(bio)) {
1458 struct bio *split = bio_split(bio, max_sectors,
1459 GFP_NOIO, &conf->bio_split);
1460 bio_chain(split, bio);
1461 submit_bio_noacct(bio);
1462 bio = split;
1463 r1_bio->master_bio = bio;
1464 r1_bio->sectors = max_sectors;
1465 }
1466
1467 if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
1468 r1_bio->start_time = bio_start_io_acct(bio);
1469 atomic_set(&r1_bio->remaining, 1);
1470 atomic_set(&r1_bio->behind_remaining, 0);
1471
1472 first_clone = 1;
1473
1474 for (i = 0; i < disks; i++) {
1475 struct bio *mbio = NULL;
1476 struct md_rdev *rdev = conf->mirrors[i].rdev;
1477 if (!r1_bio->bios[i])
1478 continue;
1479
1480 if (first_clone) {
1481
1482
1483
1484
1485 if (bitmap &&
1486 (atomic_read(&bitmap->behind_writes)
1487 < mddev->bitmap_info.max_write_behind) &&
1488 !waitqueue_active(&bitmap->behind_wait)) {
1489 alloc_behind_master_bio(r1_bio, bio);
1490 }
1491
1492 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1493 test_bit(R1BIO_BehindIO, &r1_bio->state));
1494 first_clone = 0;
1495 }
1496
1497 if (r1_bio->behind_master_bio)
1498 mbio = bio_clone_fast(r1_bio->behind_master_bio,
1499 GFP_NOIO, &mddev->bio_set);
1500 else
1501 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
1502
1503 if (r1_bio->behind_master_bio) {
1504 if (test_bit(CollisionCheck, &rdev->flags))
1505 wait_for_serialization(rdev, r1_bio);
1506 if (test_bit(WriteMostly, &rdev->flags))
1507 atomic_inc(&r1_bio->behind_remaining);
1508 } else if (mddev->serialize_policy)
1509 wait_for_serialization(rdev, r1_bio);
1510
1511 r1_bio->bios[i] = mbio;
1512
1513 mbio->bi_iter.bi_sector = (r1_bio->sector +
1514 conf->mirrors[i].rdev->data_offset);
1515 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
1516 mbio->bi_end_io = raid1_end_write_request;
1517 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
1518 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1519 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1520 conf->raid_disks - mddev->degraded > 1)
1521 mbio->bi_opf |= MD_FAILFAST;
1522 mbio->bi_private = r1_bio;
1523
1524 atomic_inc(&r1_bio->remaining);
1525
1526 if (mddev->gendisk)
1527 trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
1528 r1_bio->sector);
1529
1530 mbio->bi_bdev = (void *)conf->mirrors[i].rdev;
1531
1532 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1533 if (cb)
1534 plug = container_of(cb, struct raid1_plug_cb, cb);
1535 else
1536 plug = NULL;
1537 if (plug) {
1538 bio_list_add(&plug->pending, mbio);
1539 plug->pending_cnt++;
1540 } else {
1541 spin_lock_irqsave(&conf->device_lock, flags);
1542 bio_list_add(&conf->pending_bio_list, mbio);
1543 conf->pending_count++;
1544 spin_unlock_irqrestore(&conf->device_lock, flags);
1545 md_wakeup_thread(mddev->thread);
1546 }
1547 }
1548
1549 r1_bio_write_done(r1_bio);
1550
1551
1552 wake_up(&conf->wait_barrier);
1553}
1554
1555static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1556{
1557 sector_t sectors;
1558
1559 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1560 && md_flush_request(mddev, bio))
1561 return true;
1562
1563
1564
1565
1566
1567
1568
1569
1570 sectors = align_to_barrier_unit_end(
1571 bio->bi_iter.bi_sector, bio_sectors(bio));
1572
1573 if (bio_data_dir(bio) == READ)
1574 raid1_read_request(mddev, bio, sectors, NULL);
1575 else {
1576 if (!md_write_start(mddev,bio))
1577 return false;
1578 raid1_write_request(mddev, bio, sectors);
1579 }
1580 return true;
1581}
1582
1583static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1584{
1585 struct r1conf *conf = mddev->private;
1586 int i;
1587
1588 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1589 conf->raid_disks - mddev->degraded);
1590 rcu_read_lock();
1591 for (i = 0; i < conf->raid_disks; i++) {
1592 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1593 seq_printf(seq, "%s",
1594 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1595 }
1596 rcu_read_unlock();
1597 seq_printf(seq, "]");
1598}
1599
1600static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1601{
1602 char b[BDEVNAME_SIZE];
1603 struct r1conf *conf = mddev->private;
1604 unsigned long flags;
1605
1606
1607
1608
1609
1610
1611
1612 spin_lock_irqsave(&conf->device_lock, flags);
1613 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
1614 && (conf->raid_disks - mddev->degraded) == 1) {
1615
1616
1617
1618
1619
1620
1621 conf->recovery_disabled = mddev->recovery_disabled;
1622 spin_unlock_irqrestore(&conf->device_lock, flags);
1623 return;
1624 }
1625 set_bit(Blocked, &rdev->flags);
1626 if (test_and_clear_bit(In_sync, &rdev->flags))
1627 mddev->degraded++;
1628 set_bit(Faulty, &rdev->flags);
1629 spin_unlock_irqrestore(&conf->device_lock, flags);
1630
1631
1632
1633 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1634 set_mask_bits(&mddev->sb_flags, 0,
1635 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1636 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1637 "md/raid1:%s: Operation continuing on %d devices.\n",
1638 mdname(mddev), bdevname(rdev->bdev, b),
1639 mdname(mddev), conf->raid_disks - mddev->degraded);
1640}
1641
1642static void print_conf(struct r1conf *conf)
1643{
1644 int i;
1645
1646 pr_debug("RAID1 conf printout:\n");
1647 if (!conf) {
1648 pr_debug("(!conf)\n");
1649 return;
1650 }
1651 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1652 conf->raid_disks);
1653
1654 rcu_read_lock();
1655 for (i = 0; i < conf->raid_disks; i++) {
1656 char b[BDEVNAME_SIZE];
1657 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1658 if (rdev)
1659 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1660 i, !test_bit(In_sync, &rdev->flags),
1661 !test_bit(Faulty, &rdev->flags),
1662 bdevname(rdev->bdev,b));
1663 }
1664 rcu_read_unlock();
1665}
1666
1667static void close_sync(struct r1conf *conf)
1668{
1669 int idx;
1670
1671 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1672 _wait_barrier(conf, idx);
1673 _allow_barrier(conf, idx);
1674 }
1675
1676 mempool_exit(&conf->r1buf_pool);
1677}
1678
1679static int raid1_spare_active(struct mddev *mddev)
1680{
1681 int i;
1682 struct r1conf *conf = mddev->private;
1683 int count = 0;
1684 unsigned long flags;
1685
1686
1687
1688
1689
1690
1691
1692
1693 spin_lock_irqsave(&conf->device_lock, flags);
1694 for (i = 0; i < conf->raid_disks; i++) {
1695 struct md_rdev *rdev = conf->mirrors[i].rdev;
1696 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1697 if (repl
1698 && !test_bit(Candidate, &repl->flags)
1699 && repl->recovery_offset == MaxSector
1700 && !test_bit(Faulty, &repl->flags)
1701 && !test_and_set_bit(In_sync, &repl->flags)) {
1702
1703 if (!rdev ||
1704 !test_and_clear_bit(In_sync, &rdev->flags))
1705 count++;
1706 if (rdev) {
1707
1708
1709
1710
1711 set_bit(Faulty, &rdev->flags);
1712 sysfs_notify_dirent_safe(
1713 rdev->sysfs_state);
1714 }
1715 }
1716 if (rdev
1717 && rdev->recovery_offset == MaxSector
1718 && !test_bit(Faulty, &rdev->flags)
1719 && !test_and_set_bit(In_sync, &rdev->flags)) {
1720 count++;
1721 sysfs_notify_dirent_safe(rdev->sysfs_state);
1722 }
1723 }
1724 mddev->degraded -= count;
1725 spin_unlock_irqrestore(&conf->device_lock, flags);
1726
1727 print_conf(conf);
1728 return count;
1729}
1730
1731static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1732{
1733 struct r1conf *conf = mddev->private;
1734 int err = -EEXIST;
1735 int mirror = 0;
1736 struct raid1_info *p;
1737 int first = 0;
1738 int last = conf->raid_disks - 1;
1739
1740 if (mddev->recovery_disabled == conf->recovery_disabled)
1741 return -EBUSY;
1742
1743 if (md_integrity_add_rdev(rdev, mddev))
1744 return -ENXIO;
1745
1746 if (rdev->raid_disk >= 0)
1747 first = last = rdev->raid_disk;
1748
1749
1750
1751
1752
1753 if (rdev->saved_raid_disk >= 0 &&
1754 rdev->saved_raid_disk >= first &&
1755 rdev->saved_raid_disk < conf->raid_disks &&
1756 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1757 first = last = rdev->saved_raid_disk;
1758
1759 for (mirror = first; mirror <= last; mirror++) {
1760 p = conf->mirrors + mirror;
1761 if (!p->rdev) {
1762 if (mddev->gendisk)
1763 disk_stack_limits(mddev->gendisk, rdev->bdev,
1764 rdev->data_offset << 9);
1765
1766 p->head_position = 0;
1767 rdev->raid_disk = mirror;
1768 err = 0;
1769
1770
1771
1772 if (rdev->saved_raid_disk < 0)
1773 conf->fullsync = 1;
1774 rcu_assign_pointer(p->rdev, rdev);
1775 break;
1776 }
1777 if (test_bit(WantReplacement, &p->rdev->flags) &&
1778 p[conf->raid_disks].rdev == NULL) {
1779
1780 clear_bit(In_sync, &rdev->flags);
1781 set_bit(Replacement, &rdev->flags);
1782 rdev->raid_disk = mirror;
1783 err = 0;
1784 conf->fullsync = 1;
1785 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1786 break;
1787 }
1788 }
1789 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1790 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1791 print_conf(conf);
1792 return err;
1793}
1794
1795static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1796{
1797 struct r1conf *conf = mddev->private;
1798 int err = 0;
1799 int number = rdev->raid_disk;
1800 struct raid1_info *p = conf->mirrors + number;
1801
1802 if (rdev != p->rdev)
1803 p = conf->mirrors + conf->raid_disks + number;
1804
1805 print_conf(conf);
1806 if (rdev == p->rdev) {
1807 if (test_bit(In_sync, &rdev->flags) ||
1808 atomic_read(&rdev->nr_pending)) {
1809 err = -EBUSY;
1810 goto abort;
1811 }
1812
1813
1814
1815 if (!test_bit(Faulty, &rdev->flags) &&
1816 mddev->recovery_disabled != conf->recovery_disabled &&
1817 mddev->degraded < conf->raid_disks) {
1818 err = -EBUSY;
1819 goto abort;
1820 }
1821 p->rdev = NULL;
1822 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1823 synchronize_rcu();
1824 if (atomic_read(&rdev->nr_pending)) {
1825
1826 err = -EBUSY;
1827 p->rdev = rdev;
1828 goto abort;
1829 }
1830 }
1831 if (conf->mirrors[conf->raid_disks + number].rdev) {
1832
1833
1834
1835
1836 struct md_rdev *repl =
1837 conf->mirrors[conf->raid_disks + number].rdev;
1838 freeze_array(conf, 0);
1839 if (atomic_read(&repl->nr_pending)) {
1840
1841
1842
1843
1844
1845
1846 err = -EBUSY;
1847 unfreeze_array(conf);
1848 goto abort;
1849 }
1850 clear_bit(Replacement, &repl->flags);
1851 p->rdev = repl;
1852 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1853 unfreeze_array(conf);
1854 }
1855
1856 clear_bit(WantReplacement, &rdev->flags);
1857 err = md_integrity_register(mddev);
1858 }
1859abort:
1860
1861 print_conf(conf);
1862 return err;
1863}
1864
1865static void end_sync_read(struct bio *bio)
1866{
1867 struct r1bio *r1_bio = get_resync_r1bio(bio);
1868
1869 update_head_pos(r1_bio->read_disk, r1_bio);
1870
1871
1872
1873
1874
1875
1876 if (!bio->bi_status)
1877 set_bit(R1BIO_Uptodate, &r1_bio->state);
1878
1879 if (atomic_dec_and_test(&r1_bio->remaining))
1880 reschedule_retry(r1_bio);
1881}
1882
1883static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1884{
1885 sector_t sync_blocks = 0;
1886 sector_t s = r1_bio->sector;
1887 long sectors_to_go = r1_bio->sectors;
1888
1889
1890 do {
1891 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1892 s += sync_blocks;
1893 sectors_to_go -= sync_blocks;
1894 } while (sectors_to_go > 0);
1895}
1896
1897static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
1898{
1899 if (atomic_dec_and_test(&r1_bio->remaining)) {
1900 struct mddev *mddev = r1_bio->mddev;
1901 int s = r1_bio->sectors;
1902
1903 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1904 test_bit(R1BIO_WriteError, &r1_bio->state))
1905 reschedule_retry(r1_bio);
1906 else {
1907 put_buf(r1_bio);
1908 md_done_sync(mddev, s, uptodate);
1909 }
1910 }
1911}
1912
1913static void end_sync_write(struct bio *bio)
1914{
1915 int uptodate = !bio->bi_status;
1916 struct r1bio *r1_bio = get_resync_r1bio(bio);
1917 struct mddev *mddev = r1_bio->mddev;
1918 struct r1conf *conf = mddev->private;
1919 sector_t first_bad;
1920 int bad_sectors;
1921 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1922
1923 if (!uptodate) {
1924 abort_sync_write(mddev, r1_bio);
1925 set_bit(WriteErrorSeen, &rdev->flags);
1926 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1927 set_bit(MD_RECOVERY_NEEDED, &
1928 mddev->recovery);
1929 set_bit(R1BIO_WriteError, &r1_bio->state);
1930 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1931 &first_bad, &bad_sectors) &&
1932 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1933 r1_bio->sector,
1934 r1_bio->sectors,
1935 &first_bad, &bad_sectors)
1936 )
1937 set_bit(R1BIO_MadeGood, &r1_bio->state);
1938
1939 put_sync_write_buf(r1_bio, uptodate);
1940}
1941
1942static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1943 int sectors, struct page *page, int rw)
1944{
1945 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1946
1947 return 1;
1948 if (rw == WRITE) {
1949 set_bit(WriteErrorSeen, &rdev->flags);
1950 if (!test_and_set_bit(WantReplacement,
1951 &rdev->flags))
1952 set_bit(MD_RECOVERY_NEEDED, &
1953 rdev->mddev->recovery);
1954 }
1955
1956 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1957 md_error(rdev->mddev, rdev);
1958 return 0;
1959}
1960
1961static int fix_sync_read_error(struct r1bio *r1_bio)
1962{
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974 struct mddev *mddev = r1_bio->mddev;
1975 struct r1conf *conf = mddev->private;
1976 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1977 struct page **pages = get_resync_pages(bio)->pages;
1978 sector_t sect = r1_bio->sector;
1979 int sectors = r1_bio->sectors;
1980 int idx = 0;
1981 struct md_rdev *rdev;
1982
1983 rdev = conf->mirrors[r1_bio->read_disk].rdev;
1984 if (test_bit(FailFast, &rdev->flags)) {
1985
1986
1987 md_error(mddev, rdev);
1988 if (test_bit(Faulty, &rdev->flags))
1989
1990
1991
1992 bio->bi_end_io = end_sync_write;
1993 }
1994
1995 while(sectors) {
1996 int s = sectors;
1997 int d = r1_bio->read_disk;
1998 int success = 0;
1999 int start;
2000
2001 if (s > (PAGE_SIZE>>9))
2002 s = PAGE_SIZE >> 9;
2003 do {
2004 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
2005
2006
2007
2008
2009 rdev = conf->mirrors[d].rdev;
2010 if (sync_page_io(rdev, sect, s<<9,
2011 pages[idx],
2012 REQ_OP_READ, 0, false)) {
2013 success = 1;
2014 break;
2015 }
2016 }
2017 d++;
2018 if (d == conf->raid_disks * 2)
2019 d = 0;
2020 } while (!success && d != r1_bio->read_disk);
2021
2022 if (!success) {
2023 char b[BDEVNAME_SIZE];
2024 int abort = 0;
2025
2026
2027
2028
2029
2030 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
2031 mdname(mddev), bio_devname(bio, b),
2032 (unsigned long long)r1_bio->sector);
2033 for (d = 0; d < conf->raid_disks * 2; d++) {
2034 rdev = conf->mirrors[d].rdev;
2035 if (!rdev || test_bit(Faulty, &rdev->flags))
2036 continue;
2037 if (!rdev_set_badblocks(rdev, sect, s, 0))
2038 abort = 1;
2039 }
2040 if (abort) {
2041 conf->recovery_disabled =
2042 mddev->recovery_disabled;
2043 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2044 md_done_sync(mddev, r1_bio->sectors, 0);
2045 put_buf(r1_bio);
2046 return 0;
2047 }
2048
2049 sectors -= s;
2050 sect += s;
2051 idx++;
2052 continue;
2053 }
2054
2055 start = d;
2056
2057 while (d != r1_bio->read_disk) {
2058 if (d == 0)
2059 d = conf->raid_disks * 2;
2060 d--;
2061 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2062 continue;
2063 rdev = conf->mirrors[d].rdev;
2064 if (r1_sync_page_io(rdev, sect, s,
2065 pages[idx],
2066 WRITE) == 0) {
2067 r1_bio->bios[d]->bi_end_io = NULL;
2068 rdev_dec_pending(rdev, mddev);
2069 }
2070 }
2071 d = start;
2072 while (d != r1_bio->read_disk) {
2073 if (d == 0)
2074 d = conf->raid_disks * 2;
2075 d--;
2076 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2077 continue;
2078 rdev = conf->mirrors[d].rdev;
2079 if (r1_sync_page_io(rdev, sect, s,
2080 pages[idx],
2081 READ) != 0)
2082 atomic_add(s, &rdev->corrected_errors);
2083 }
2084 sectors -= s;
2085 sect += s;
2086 idx ++;
2087 }
2088 set_bit(R1BIO_Uptodate, &r1_bio->state);
2089 bio->bi_status = 0;
2090 return 1;
2091}
2092
2093static void process_checks(struct r1bio *r1_bio)
2094{
2095
2096
2097
2098
2099
2100
2101
2102 struct mddev *mddev = r1_bio->mddev;
2103 struct r1conf *conf = mddev->private;
2104 int primary;
2105 int i;
2106 int vcnt;
2107
2108
2109 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2110 for (i = 0; i < conf->raid_disks * 2; i++) {
2111 blk_status_t status;
2112 struct bio *b = r1_bio->bios[i];
2113 struct resync_pages *rp = get_resync_pages(b);
2114 if (b->bi_end_io != end_sync_read)
2115 continue;
2116
2117 status = b->bi_status;
2118 bio_reset(b);
2119 b->bi_status = status;
2120 b->bi_iter.bi_sector = r1_bio->sector +
2121 conf->mirrors[i].rdev->data_offset;
2122 bio_set_dev(b, conf->mirrors[i].rdev->bdev);
2123 b->bi_end_io = end_sync_read;
2124 rp->raid_bio = r1_bio;
2125 b->bi_private = rp;
2126
2127
2128 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2129 }
2130 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2131 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2132 !r1_bio->bios[primary]->bi_status) {
2133 r1_bio->bios[primary]->bi_end_io = NULL;
2134 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2135 break;
2136 }
2137 r1_bio->read_disk = primary;
2138 for (i = 0; i < conf->raid_disks * 2; i++) {
2139 int j = 0;
2140 struct bio *pbio = r1_bio->bios[primary];
2141 struct bio *sbio = r1_bio->bios[i];
2142 blk_status_t status = sbio->bi_status;
2143 struct page **ppages = get_resync_pages(pbio)->pages;
2144 struct page **spages = get_resync_pages(sbio)->pages;
2145 struct bio_vec *bi;
2146 int page_len[RESYNC_PAGES] = { 0 };
2147 struct bvec_iter_all iter_all;
2148
2149 if (sbio->bi_end_io != end_sync_read)
2150 continue;
2151
2152 sbio->bi_status = 0;
2153
2154 bio_for_each_segment_all(bi, sbio, iter_all)
2155 page_len[j++] = bi->bv_len;
2156
2157 if (!status) {
2158 for (j = vcnt; j-- ; ) {
2159 if (memcmp(page_address(ppages[j]),
2160 page_address(spages[j]),
2161 page_len[j]))
2162 break;
2163 }
2164 } else
2165 j = 0;
2166 if (j >= 0)
2167 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2168 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2169 && !status)) {
2170
2171 sbio->bi_end_io = NULL;
2172 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2173 continue;
2174 }
2175
2176 bio_copy_data(sbio, pbio);
2177 }
2178}
2179
2180static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2181{
2182 struct r1conf *conf = mddev->private;
2183 int i;
2184 int disks = conf->raid_disks * 2;
2185 struct bio *wbio;
2186
2187 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2188
2189 if (!fix_sync_read_error(r1_bio))
2190 return;
2191
2192 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2193 process_checks(r1_bio);
2194
2195
2196
2197
2198 atomic_set(&r1_bio->remaining, 1);
2199 for (i = 0; i < disks ; i++) {
2200 wbio = r1_bio->bios[i];
2201 if (wbio->bi_end_io == NULL ||
2202 (wbio->bi_end_io == end_sync_read &&
2203 (i == r1_bio->read_disk ||
2204 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2205 continue;
2206 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2207 abort_sync_write(mddev, r1_bio);
2208 continue;
2209 }
2210
2211 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2212 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2213 wbio->bi_opf |= MD_FAILFAST;
2214
2215 wbio->bi_end_io = end_sync_write;
2216 atomic_inc(&r1_bio->remaining);
2217 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2218
2219 submit_bio_noacct(wbio);
2220 }
2221
2222 put_sync_write_buf(r1_bio, 1);
2223}
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233static void fix_read_error(struct r1conf *conf, int read_disk,
2234 sector_t sect, int sectors)
2235{
2236 struct mddev *mddev = conf->mddev;
2237 while(sectors) {
2238 int s = sectors;
2239 int d = read_disk;
2240 int success = 0;
2241 int start;
2242 struct md_rdev *rdev;
2243
2244 if (s > (PAGE_SIZE>>9))
2245 s = PAGE_SIZE >> 9;
2246
2247 do {
2248 sector_t first_bad;
2249 int bad_sectors;
2250
2251 rcu_read_lock();
2252 rdev = rcu_dereference(conf->mirrors[d].rdev);
2253 if (rdev &&
2254 (test_bit(In_sync, &rdev->flags) ||
2255 (!test_bit(Faulty, &rdev->flags) &&
2256 rdev->recovery_offset >= sect + s)) &&
2257 is_badblock(rdev, sect, s,
2258 &first_bad, &bad_sectors) == 0) {
2259 atomic_inc(&rdev->nr_pending);
2260 rcu_read_unlock();
2261 if (sync_page_io(rdev, sect, s<<9,
2262 conf->tmppage, REQ_OP_READ, 0, false))
2263 success = 1;
2264 rdev_dec_pending(rdev, mddev);
2265 if (success)
2266 break;
2267 } else
2268 rcu_read_unlock();
2269 d++;
2270 if (d == conf->raid_disks * 2)
2271 d = 0;
2272 } while (!success && d != read_disk);
2273
2274 if (!success) {
2275
2276 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2277 if (!rdev_set_badblocks(rdev, sect, s, 0))
2278 md_error(mddev, rdev);
2279 break;
2280 }
2281
2282 start = d;
2283 while (d != read_disk) {
2284 if (d==0)
2285 d = conf->raid_disks * 2;
2286 d--;
2287 rcu_read_lock();
2288 rdev = rcu_dereference(conf->mirrors[d].rdev);
2289 if (rdev &&
2290 !test_bit(Faulty, &rdev->flags)) {
2291 atomic_inc(&rdev->nr_pending);
2292 rcu_read_unlock();
2293 r1_sync_page_io(rdev, sect, s,
2294 conf->tmppage, WRITE);
2295 rdev_dec_pending(rdev, mddev);
2296 } else
2297 rcu_read_unlock();
2298 }
2299 d = start;
2300 while (d != read_disk) {
2301 char b[BDEVNAME_SIZE];
2302 if (d==0)
2303 d = conf->raid_disks * 2;
2304 d--;
2305 rcu_read_lock();
2306 rdev = rcu_dereference(conf->mirrors[d].rdev);
2307 if (rdev &&
2308 !test_bit(Faulty, &rdev->flags)) {
2309 atomic_inc(&rdev->nr_pending);
2310 rcu_read_unlock();
2311 if (r1_sync_page_io(rdev, sect, s,
2312 conf->tmppage, READ)) {
2313 atomic_add(s, &rdev->corrected_errors);
2314 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2315 mdname(mddev), s,
2316 (unsigned long long)(sect +
2317 rdev->data_offset),
2318 bdevname(rdev->bdev, b));
2319 }
2320 rdev_dec_pending(rdev, mddev);
2321 } else
2322 rcu_read_unlock();
2323 }
2324 sectors -= s;
2325 sect += s;
2326 }
2327}
2328
2329static int narrow_write_error(struct r1bio *r1_bio, int i)
2330{
2331 struct mddev *mddev = r1_bio->mddev;
2332 struct r1conf *conf = mddev->private;
2333 struct md_rdev *rdev = conf->mirrors[i].rdev;
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346 int block_sectors;
2347 sector_t sector;
2348 int sectors;
2349 int sect_to_write = r1_bio->sectors;
2350 int ok = 1;
2351
2352 if (rdev->badblocks.shift < 0)
2353 return 0;
2354
2355 block_sectors = roundup(1 << rdev->badblocks.shift,
2356 bdev_logical_block_size(rdev->bdev) >> 9);
2357 sector = r1_bio->sector;
2358 sectors = ((sector + block_sectors)
2359 & ~(sector_t)(block_sectors - 1))
2360 - sector;
2361
2362 while (sect_to_write) {
2363 struct bio *wbio;
2364 if (sectors > sect_to_write)
2365 sectors = sect_to_write;
2366
2367
2368 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2369 wbio = bio_clone_fast(r1_bio->behind_master_bio,
2370 GFP_NOIO,
2371 &mddev->bio_set);
2372 } else {
2373 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2374 &mddev->bio_set);
2375 }
2376
2377 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2378 wbio->bi_iter.bi_sector = r1_bio->sector;
2379 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2380
2381 bio_trim(wbio, sector - r1_bio->sector, sectors);
2382 wbio->bi_iter.bi_sector += rdev->data_offset;
2383 bio_set_dev(wbio, rdev->bdev);
2384
2385 if (submit_bio_wait(wbio) < 0)
2386
2387 ok = rdev_set_badblocks(rdev, sector,
2388 sectors, 0)
2389 && ok;
2390
2391 bio_put(wbio);
2392 sect_to_write -= sectors;
2393 sector += sectors;
2394 sectors = block_sectors;
2395 }
2396 return ok;
2397}
2398
2399static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2400{
2401 int m;
2402 int s = r1_bio->sectors;
2403 for (m = 0; m < conf->raid_disks * 2 ; m++) {
2404 struct md_rdev *rdev = conf->mirrors[m].rdev;
2405 struct bio *bio = r1_bio->bios[m];
2406 if (bio->bi_end_io == NULL)
2407 continue;
2408 if (!bio->bi_status &&
2409 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2410 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2411 }
2412 if (bio->bi_status &&
2413 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2414 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2415 md_error(conf->mddev, rdev);
2416 }
2417 }
2418 put_buf(r1_bio);
2419 md_done_sync(conf->mddev, s, 1);
2420}
2421
2422static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2423{
2424 int m, idx;
2425 bool fail = false;
2426
2427 for (m = 0; m < conf->raid_disks * 2 ; m++)
2428 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2429 struct md_rdev *rdev = conf->mirrors[m].rdev;
2430 rdev_clear_badblocks(rdev,
2431 r1_bio->sector,
2432 r1_bio->sectors, 0);
2433 rdev_dec_pending(rdev, conf->mddev);
2434 } else if (r1_bio->bios[m] != NULL) {
2435
2436
2437
2438
2439 fail = true;
2440 if (!narrow_write_error(r1_bio, m)) {
2441 md_error(conf->mddev,
2442 conf->mirrors[m].rdev);
2443
2444 set_bit(R1BIO_Degraded, &r1_bio->state);
2445 }
2446 rdev_dec_pending(conf->mirrors[m].rdev,
2447 conf->mddev);
2448 }
2449 if (fail) {
2450 spin_lock_irq(&conf->device_lock);
2451 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2452 idx = sector_to_idx(r1_bio->sector);
2453 atomic_inc(&conf->nr_queued[idx]);
2454 spin_unlock_irq(&conf->device_lock);
2455
2456
2457
2458
2459 wake_up(&conf->wait_barrier);
2460 md_wakeup_thread(conf->mddev->thread);
2461 } else {
2462 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2463 close_write(r1_bio);
2464 raid_end_bio_io(r1_bio);
2465 }
2466}
2467
2468static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2469{
2470 struct mddev *mddev = conf->mddev;
2471 struct bio *bio;
2472 struct md_rdev *rdev;
2473
2474 clear_bit(R1BIO_ReadError, &r1_bio->state);
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484 bio = r1_bio->bios[r1_bio->read_disk];
2485 bio_put(bio);
2486 r1_bio->bios[r1_bio->read_disk] = NULL;
2487
2488 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2489 if (mddev->ro == 0
2490 && !test_bit(FailFast, &rdev->flags)) {
2491 freeze_array(conf, 1);
2492 fix_read_error(conf, r1_bio->read_disk,
2493 r1_bio->sector, r1_bio->sectors);
2494 unfreeze_array(conf);
2495 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2496 md_error(mddev, rdev);
2497 } else {
2498 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2499 }
2500
2501 rdev_dec_pending(rdev, conf->mddev);
2502 allow_barrier(conf, r1_bio->sector);
2503 bio = r1_bio->master_bio;
2504
2505
2506 r1_bio->state = 0;
2507 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2508}
2509
2510static void raid1d(struct md_thread *thread)
2511{
2512 struct mddev *mddev = thread->mddev;
2513 struct r1bio *r1_bio;
2514 unsigned long flags;
2515 struct r1conf *conf = mddev->private;
2516 struct list_head *head = &conf->retry_list;
2517 struct blk_plug plug;
2518 int idx;
2519
2520 md_check_recovery(mddev);
2521
2522 if (!list_empty_careful(&conf->bio_end_io_list) &&
2523 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2524 LIST_HEAD(tmp);
2525 spin_lock_irqsave(&conf->device_lock, flags);
2526 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2527 list_splice_init(&conf->bio_end_io_list, &tmp);
2528 spin_unlock_irqrestore(&conf->device_lock, flags);
2529 while (!list_empty(&tmp)) {
2530 r1_bio = list_first_entry(&tmp, struct r1bio,
2531 retry_list);
2532 list_del(&r1_bio->retry_list);
2533 idx = sector_to_idx(r1_bio->sector);
2534 atomic_dec(&conf->nr_queued[idx]);
2535 if (mddev->degraded)
2536 set_bit(R1BIO_Degraded, &r1_bio->state);
2537 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2538 close_write(r1_bio);
2539 raid_end_bio_io(r1_bio);
2540 }
2541 }
2542
2543 blk_start_plug(&plug);
2544 for (;;) {
2545
2546 flush_pending_writes(conf);
2547
2548 spin_lock_irqsave(&conf->device_lock, flags);
2549 if (list_empty(head)) {
2550 spin_unlock_irqrestore(&conf->device_lock, flags);
2551 break;
2552 }
2553 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2554 list_del(head->prev);
2555 idx = sector_to_idx(r1_bio->sector);
2556 atomic_dec(&conf->nr_queued[idx]);
2557 spin_unlock_irqrestore(&conf->device_lock, flags);
2558
2559 mddev = r1_bio->mddev;
2560 conf = mddev->private;
2561 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2562 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2563 test_bit(R1BIO_WriteError, &r1_bio->state))
2564 handle_sync_write_finished(conf, r1_bio);
2565 else
2566 sync_request_write(mddev, r1_bio);
2567 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2568 test_bit(R1BIO_WriteError, &r1_bio->state))
2569 handle_write_finished(conf, r1_bio);
2570 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2571 handle_read_error(conf, r1_bio);
2572 else
2573 WARN_ON_ONCE(1);
2574
2575 cond_resched();
2576 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2577 md_check_recovery(mddev);
2578 }
2579 blk_finish_plug(&plug);
2580}
2581
2582static int init_resync(struct r1conf *conf)
2583{
2584 int buffs;
2585
2586 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2587 BUG_ON(mempool_initialized(&conf->r1buf_pool));
2588
2589 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2590 r1buf_pool_free, conf->poolinfo);
2591}
2592
2593static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2594{
2595 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2596 struct resync_pages *rps;
2597 struct bio *bio;
2598 int i;
2599
2600 for (i = conf->poolinfo->raid_disks; i--; ) {
2601 bio = r1bio->bios[i];
2602 rps = bio->bi_private;
2603 bio_reset(bio);
2604 bio->bi_private = rps;
2605 }
2606 r1bio->master_bio = NULL;
2607 return r1bio;
2608}
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2621 int *skipped)
2622{
2623 struct r1conf *conf = mddev->private;
2624 struct r1bio *r1_bio;
2625 struct bio *bio;
2626 sector_t max_sector, nr_sectors;
2627 int disk = -1;
2628 int i;
2629 int wonly = -1;
2630 int write_targets = 0, read_targets = 0;
2631 sector_t sync_blocks;
2632 int still_degraded = 0;
2633 int good_sectors = RESYNC_SECTORS;
2634 int min_bad = 0;
2635 int idx = sector_to_idx(sector_nr);
2636 int page_idx = 0;
2637
2638 if (!mempool_initialized(&conf->r1buf_pool))
2639 if (init_resync(conf))
2640 return 0;
2641
2642 max_sector = mddev->dev_sectors;
2643 if (sector_nr >= max_sector) {
2644
2645
2646
2647
2648
2649 if (mddev->curr_resync < max_sector)
2650 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2651 &sync_blocks, 1);
2652 else
2653 conf->fullsync = 0;
2654
2655 md_bitmap_close_sync(mddev->bitmap);
2656 close_sync(conf);
2657
2658 if (mddev_is_clustered(mddev)) {
2659 conf->cluster_sync_low = 0;
2660 conf->cluster_sync_high = 0;
2661 }
2662 return 0;
2663 }
2664
2665 if (mddev->bitmap == NULL &&
2666 mddev->recovery_cp == MaxSector &&
2667 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2668 conf->fullsync == 0) {
2669 *skipped = 1;
2670 return max_sector - sector_nr;
2671 }
2672
2673
2674
2675 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2676 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2677
2678 *skipped = 1;
2679 return sync_blocks;
2680 }
2681
2682
2683
2684
2685
2686 if (atomic_read(&conf->nr_waiting[idx]))
2687 schedule_timeout_uninterruptible(1);
2688
2689
2690
2691
2692
2693 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2694 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2695
2696
2697 if (raise_barrier(conf, sector_nr))
2698 return 0;
2699
2700 r1_bio = raid1_alloc_init_r1buf(conf);
2701
2702 rcu_read_lock();
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712 r1_bio->mddev = mddev;
2713 r1_bio->sector = sector_nr;
2714 r1_bio->state = 0;
2715 set_bit(R1BIO_IsSync, &r1_bio->state);
2716
2717 good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2718
2719 for (i = 0; i < conf->raid_disks * 2; i++) {
2720 struct md_rdev *rdev;
2721 bio = r1_bio->bios[i];
2722
2723 rdev = rcu_dereference(conf->mirrors[i].rdev);
2724 if (rdev == NULL ||
2725 test_bit(Faulty, &rdev->flags)) {
2726 if (i < conf->raid_disks)
2727 still_degraded = 1;
2728 } else if (!test_bit(In_sync, &rdev->flags)) {
2729 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2730 bio->bi_end_io = end_sync_write;
2731 write_targets ++;
2732 } else {
2733
2734 sector_t first_bad = MaxSector;
2735 int bad_sectors;
2736
2737 if (is_badblock(rdev, sector_nr, good_sectors,
2738 &first_bad, &bad_sectors)) {
2739 if (first_bad > sector_nr)
2740 good_sectors = first_bad - sector_nr;
2741 else {
2742 bad_sectors -= (sector_nr - first_bad);
2743 if (min_bad == 0 ||
2744 min_bad > bad_sectors)
2745 min_bad = bad_sectors;
2746 }
2747 }
2748 if (sector_nr < first_bad) {
2749 if (test_bit(WriteMostly, &rdev->flags)) {
2750 if (wonly < 0)
2751 wonly = i;
2752 } else {
2753 if (disk < 0)
2754 disk = i;
2755 }
2756 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2757 bio->bi_end_io = end_sync_read;
2758 read_targets++;
2759 } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2760 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2761 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2762
2763
2764
2765
2766
2767
2768 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2769 bio->bi_end_io = end_sync_write;
2770 write_targets++;
2771 }
2772 }
2773 if (rdev && bio->bi_end_io) {
2774 atomic_inc(&rdev->nr_pending);
2775 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2776 bio_set_dev(bio, rdev->bdev);
2777 if (test_bit(FailFast, &rdev->flags))
2778 bio->bi_opf |= MD_FAILFAST;
2779 }
2780 }
2781 rcu_read_unlock();
2782 if (disk < 0)
2783 disk = wonly;
2784 r1_bio->read_disk = disk;
2785
2786 if (read_targets == 0 && min_bad > 0) {
2787
2788
2789
2790 int ok = 1;
2791 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2792 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2793 struct md_rdev *rdev = conf->mirrors[i].rdev;
2794 ok = rdev_set_badblocks(rdev, sector_nr,
2795 min_bad, 0
2796 ) && ok;
2797 }
2798 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2799 *skipped = 1;
2800 put_buf(r1_bio);
2801
2802 if (!ok) {
2803
2804
2805
2806
2807
2808 conf->recovery_disabled = mddev->recovery_disabled;
2809 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2810 return 0;
2811 } else
2812 return min_bad;
2813
2814 }
2815 if (min_bad > 0 && min_bad < good_sectors) {
2816
2817
2818 good_sectors = min_bad;
2819 }
2820
2821 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2822
2823 write_targets += read_targets-1;
2824
2825 if (write_targets == 0 || read_targets == 0) {
2826
2827
2828
2829 sector_t rv;
2830 if (min_bad > 0)
2831 max_sector = sector_nr + min_bad;
2832 rv = max_sector - sector_nr;
2833 *skipped = 1;
2834 put_buf(r1_bio);
2835 return rv;
2836 }
2837
2838 if (max_sector > mddev->resync_max)
2839 max_sector = mddev->resync_max;
2840 if (max_sector > sector_nr + good_sectors)
2841 max_sector = sector_nr + good_sectors;
2842 nr_sectors = 0;
2843 sync_blocks = 0;
2844 do {
2845 struct page *page;
2846 int len = PAGE_SIZE;
2847 if (sector_nr + (len>>9) > max_sector)
2848 len = (max_sector - sector_nr) << 9;
2849 if (len == 0)
2850 break;
2851 if (sync_blocks == 0) {
2852 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2853 &sync_blocks, still_degraded) &&
2854 !conf->fullsync &&
2855 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2856 break;
2857 if ((len >> 9) > sync_blocks)
2858 len = sync_blocks<<9;
2859 }
2860
2861 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2862 struct resync_pages *rp;
2863
2864 bio = r1_bio->bios[i];
2865 rp = get_resync_pages(bio);
2866 if (bio->bi_end_io) {
2867 page = resync_fetch_page(rp, page_idx);
2868
2869
2870
2871
2872
2873 bio_add_page(bio, page, len, 0);
2874 }
2875 }
2876 nr_sectors += len>>9;
2877 sector_nr += len>>9;
2878 sync_blocks -= (len>>9);
2879 } while (++page_idx < RESYNC_PAGES);
2880
2881 r1_bio->sectors = nr_sectors;
2882
2883 if (mddev_is_clustered(mddev) &&
2884 conf->cluster_sync_high < sector_nr + nr_sectors) {
2885 conf->cluster_sync_low = mddev->curr_resync_completed;
2886 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2887
2888 md_cluster_ops->resync_info_update(mddev,
2889 conf->cluster_sync_low,
2890 conf->cluster_sync_high);
2891 }
2892
2893
2894
2895
2896 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2897 atomic_set(&r1_bio->remaining, read_targets);
2898 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2899 bio = r1_bio->bios[i];
2900 if (bio->bi_end_io == end_sync_read) {
2901 read_targets--;
2902 md_sync_acct_bio(bio, nr_sectors);
2903 if (read_targets == 1)
2904 bio->bi_opf &= ~MD_FAILFAST;
2905 submit_bio_noacct(bio);
2906 }
2907 }
2908 } else {
2909 atomic_set(&r1_bio->remaining, 1);
2910 bio = r1_bio->bios[r1_bio->read_disk];
2911 md_sync_acct_bio(bio, nr_sectors);
2912 if (read_targets == 1)
2913 bio->bi_opf &= ~MD_FAILFAST;
2914 submit_bio_noacct(bio);
2915 }
2916 return nr_sectors;
2917}
2918
2919static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2920{
2921 if (sectors)
2922 return sectors;
2923
2924 return mddev->dev_sectors;
2925}
2926
2927static struct r1conf *setup_conf(struct mddev *mddev)
2928{
2929 struct r1conf *conf;
2930 int i;
2931 struct raid1_info *disk;
2932 struct md_rdev *rdev;
2933 int err = -ENOMEM;
2934
2935 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2936 if (!conf)
2937 goto abort;
2938
2939 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
2940 sizeof(atomic_t), GFP_KERNEL);
2941 if (!conf->nr_pending)
2942 goto abort;
2943
2944 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
2945 sizeof(atomic_t), GFP_KERNEL);
2946 if (!conf->nr_waiting)
2947 goto abort;
2948
2949 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
2950 sizeof(atomic_t), GFP_KERNEL);
2951 if (!conf->nr_queued)
2952 goto abort;
2953
2954 conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
2955 sizeof(atomic_t), GFP_KERNEL);
2956 if (!conf->barrier)
2957 goto abort;
2958
2959 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
2960 mddev->raid_disks, 2),
2961 GFP_KERNEL);
2962 if (!conf->mirrors)
2963 goto abort;
2964
2965 conf->tmppage = alloc_page(GFP_KERNEL);
2966 if (!conf->tmppage)
2967 goto abort;
2968
2969 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2970 if (!conf->poolinfo)
2971 goto abort;
2972 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2973 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
2974 rbio_pool_free, conf->poolinfo);
2975 if (err)
2976 goto abort;
2977
2978 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
2979 if (err)
2980 goto abort;
2981
2982 conf->poolinfo->mddev = mddev;
2983
2984 err = -EINVAL;
2985 spin_lock_init(&conf->device_lock);
2986 rdev_for_each(rdev, mddev) {
2987 int disk_idx = rdev->raid_disk;
2988 if (disk_idx >= mddev->raid_disks
2989 || disk_idx < 0)
2990 continue;
2991 if (test_bit(Replacement, &rdev->flags))
2992 disk = conf->mirrors + mddev->raid_disks + disk_idx;
2993 else
2994 disk = conf->mirrors + disk_idx;
2995
2996 if (disk->rdev)
2997 goto abort;
2998 disk->rdev = rdev;
2999 disk->head_position = 0;
3000 disk->seq_start = MaxSector;
3001 }
3002 conf->raid_disks = mddev->raid_disks;
3003 conf->mddev = mddev;
3004 INIT_LIST_HEAD(&conf->retry_list);
3005 INIT_LIST_HEAD(&conf->bio_end_io_list);
3006
3007 spin_lock_init(&conf->resync_lock);
3008 init_waitqueue_head(&conf->wait_barrier);
3009
3010 bio_list_init(&conf->pending_bio_list);
3011 conf->pending_count = 0;
3012 conf->recovery_disabled = mddev->recovery_disabled - 1;
3013
3014 err = -EIO;
3015 for (i = 0; i < conf->raid_disks * 2; i++) {
3016
3017 disk = conf->mirrors + i;
3018
3019 if (i < conf->raid_disks &&
3020 disk[conf->raid_disks].rdev) {
3021
3022 if (!disk->rdev) {
3023
3024
3025
3026 disk->rdev =
3027 disk[conf->raid_disks].rdev;
3028 disk[conf->raid_disks].rdev = NULL;
3029 } else if (!test_bit(In_sync, &disk->rdev->flags))
3030
3031 goto abort;
3032 }
3033
3034 if (!disk->rdev ||
3035 !test_bit(In_sync, &disk->rdev->flags)) {
3036 disk->head_position = 0;
3037 if (disk->rdev &&
3038 (disk->rdev->saved_raid_disk < 0))
3039 conf->fullsync = 1;
3040 }
3041 }
3042
3043 err = -ENOMEM;
3044 conf->thread = md_register_thread(raid1d, mddev, "raid1");
3045 if (!conf->thread)
3046 goto abort;
3047
3048 return conf;
3049
3050 abort:
3051 if (conf) {
3052 mempool_exit(&conf->r1bio_pool);
3053 kfree(conf->mirrors);
3054 safe_put_page(conf->tmppage);
3055 kfree(conf->poolinfo);
3056 kfree(conf->nr_pending);
3057 kfree(conf->nr_waiting);
3058 kfree(conf->nr_queued);
3059 kfree(conf->barrier);
3060 bioset_exit(&conf->bio_split);
3061 kfree(conf);
3062 }
3063 return ERR_PTR(err);
3064}
3065
3066static void raid1_free(struct mddev *mddev, void *priv);
3067static int raid1_run(struct mddev *mddev)
3068{
3069 struct r1conf *conf;
3070 int i;
3071 struct md_rdev *rdev;
3072 int ret;
3073 bool discard_supported = false;
3074
3075 if (mddev->level != 1) {
3076 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3077 mdname(mddev), mddev->level);
3078 return -EIO;
3079 }
3080 if (mddev->reshape_position != MaxSector) {
3081 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3082 mdname(mddev));
3083 return -EIO;
3084 }
3085 if (mddev_init_writes_pending(mddev) < 0)
3086 return -ENOMEM;
3087
3088
3089
3090
3091
3092 if (mddev->private == NULL)
3093 conf = setup_conf(mddev);
3094 else
3095 conf = mddev->private;
3096
3097 if (IS_ERR(conf))
3098 return PTR_ERR(conf);
3099
3100 if (mddev->queue) {
3101 blk_queue_max_write_same_sectors(mddev->queue, 0);
3102 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3103 }
3104
3105 rdev_for_each(rdev, mddev) {
3106 if (!mddev->gendisk)
3107 continue;
3108 disk_stack_limits(mddev->gendisk, rdev->bdev,
3109 rdev->data_offset << 9);
3110 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3111 discard_supported = true;
3112 }
3113
3114 mddev->degraded = 0;
3115 for (i = 0; i < conf->raid_disks; i++)
3116 if (conf->mirrors[i].rdev == NULL ||
3117 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3118 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3119 mddev->degraded++;
3120
3121
3122
3123 if (conf->raid_disks - mddev->degraded < 1) {
3124 ret = -EINVAL;
3125 goto abort;
3126 }
3127
3128 if (conf->raid_disks - mddev->degraded == 1)
3129 mddev->recovery_cp = MaxSector;
3130
3131 if (mddev->recovery_cp != MaxSector)
3132 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3133 mdname(mddev));
3134 pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3135 mdname(mddev), mddev->raid_disks - mddev->degraded,
3136 mddev->raid_disks);
3137
3138
3139
3140
3141 mddev->thread = conf->thread;
3142 conf->thread = NULL;
3143 mddev->private = conf;
3144 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3145
3146 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3147
3148 if (mddev->queue) {
3149 if (discard_supported)
3150 blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3151 mddev->queue);
3152 else
3153 blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3154 mddev->queue);
3155 }
3156
3157 ret = md_integrity_register(mddev);
3158 if (ret) {
3159 md_unregister_thread(&mddev->thread);
3160 goto abort;
3161 }
3162 return 0;
3163
3164abort:
3165 raid1_free(mddev, conf);
3166 return ret;
3167}
3168
3169static void raid1_free(struct mddev *mddev, void *priv)
3170{
3171 struct r1conf *conf = priv;
3172
3173 mempool_exit(&conf->r1bio_pool);
3174 kfree(conf->mirrors);
3175 safe_put_page(conf->tmppage);
3176 kfree(conf->poolinfo);
3177 kfree(conf->nr_pending);
3178 kfree(conf->nr_waiting);
3179 kfree(conf->nr_queued);
3180 kfree(conf->barrier);
3181 bioset_exit(&conf->bio_split);
3182 kfree(conf);
3183}
3184
3185static int raid1_resize(struct mddev *mddev, sector_t sectors)
3186{
3187
3188
3189
3190
3191
3192
3193
3194 sector_t newsize = raid1_size(mddev, sectors, 0);
3195 if (mddev->external_size &&
3196 mddev->array_sectors > newsize)
3197 return -EINVAL;
3198 if (mddev->bitmap) {
3199 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
3200 if (ret)
3201 return ret;
3202 }
3203 md_set_array_sectors(mddev, newsize);
3204 if (sectors > mddev->dev_sectors &&
3205 mddev->recovery_cp > mddev->dev_sectors) {
3206 mddev->recovery_cp = mddev->dev_sectors;
3207 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3208 }
3209 mddev->dev_sectors = sectors;
3210 mddev->resync_max_sectors = sectors;
3211 return 0;
3212}
3213
3214static int raid1_reshape(struct mddev *mddev)
3215{
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227 mempool_t newpool, oldpool;
3228 struct pool_info *newpoolinfo;
3229 struct raid1_info *newmirrors;
3230 struct r1conf *conf = mddev->private;
3231 int cnt, raid_disks;
3232 unsigned long flags;
3233 int d, d2;
3234 int ret;
3235
3236 memset(&newpool, 0, sizeof(newpool));
3237 memset(&oldpool, 0, sizeof(oldpool));
3238
3239
3240 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3241 mddev->layout != mddev->new_layout ||
3242 mddev->level != mddev->new_level) {
3243 mddev->new_chunk_sectors = mddev->chunk_sectors;
3244 mddev->new_layout = mddev->layout;
3245 mddev->new_level = mddev->level;
3246 return -EINVAL;
3247 }
3248
3249 if (!mddev_is_clustered(mddev))
3250 md_allow_write(mddev);
3251
3252 raid_disks = mddev->raid_disks + mddev->delta_disks;
3253
3254 if (raid_disks < conf->raid_disks) {
3255 cnt=0;
3256 for (d= 0; d < conf->raid_disks; d++)
3257 if (conf->mirrors[d].rdev)
3258 cnt++;
3259 if (cnt > raid_disks)
3260 return -EBUSY;
3261 }
3262
3263 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3264 if (!newpoolinfo)
3265 return -ENOMEM;
3266 newpoolinfo->mddev = mddev;
3267 newpoolinfo->raid_disks = raid_disks * 2;
3268
3269 ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
3270 rbio_pool_free, newpoolinfo);
3271 if (ret) {
3272 kfree(newpoolinfo);
3273 return ret;
3274 }
3275 newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3276 raid_disks, 2),
3277 GFP_KERNEL);
3278 if (!newmirrors) {
3279 kfree(newpoolinfo);
3280 mempool_exit(&newpool);
3281 return -ENOMEM;
3282 }
3283
3284 freeze_array(conf, 0);
3285
3286
3287 oldpool = conf->r1bio_pool;
3288 conf->r1bio_pool = newpool;
3289
3290 for (d = d2 = 0; d < conf->raid_disks; d++) {
3291 struct md_rdev *rdev = conf->mirrors[d].rdev;
3292 if (rdev && rdev->raid_disk != d2) {
3293 sysfs_unlink_rdev(mddev, rdev);
3294 rdev->raid_disk = d2;
3295 sysfs_unlink_rdev(mddev, rdev);
3296 if (sysfs_link_rdev(mddev, rdev))
3297 pr_warn("md/raid1:%s: cannot register rd%d\n",
3298 mdname(mddev), rdev->raid_disk);
3299 }
3300 if (rdev)
3301 newmirrors[d2++].rdev = rdev;
3302 }
3303 kfree(conf->mirrors);
3304 conf->mirrors = newmirrors;
3305 kfree(conf->poolinfo);
3306 conf->poolinfo = newpoolinfo;
3307
3308 spin_lock_irqsave(&conf->device_lock, flags);
3309 mddev->degraded += (raid_disks - conf->raid_disks);
3310 spin_unlock_irqrestore(&conf->device_lock, flags);
3311 conf->raid_disks = mddev->raid_disks = raid_disks;
3312 mddev->delta_disks = 0;
3313
3314 unfreeze_array(conf);
3315
3316 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3317 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3318 md_wakeup_thread(mddev->thread);
3319
3320 mempool_exit(&oldpool);
3321 return 0;
3322}
3323
3324static void raid1_quiesce(struct mddev *mddev, int quiesce)
3325{
3326 struct r1conf *conf = mddev->private;
3327
3328 if (quiesce)
3329 freeze_array(conf, 0);
3330 else
3331 unfreeze_array(conf);
3332}
3333
3334static void *raid1_takeover(struct mddev *mddev)
3335{
3336
3337
3338
3339 if (mddev->level == 5 && mddev->raid_disks == 2) {
3340 struct r1conf *conf;
3341 mddev->new_level = 1;
3342 mddev->new_layout = 0;
3343 mddev->new_chunk_sectors = 0;
3344 conf = setup_conf(mddev);
3345 if (!IS_ERR(conf)) {
3346
3347 conf->array_frozen = 1;
3348 mddev_clear_unsupported_flags(mddev,
3349 UNSUPPORTED_MDDEV_FLAGS);
3350 }
3351 return conf;
3352 }
3353 return ERR_PTR(-EINVAL);
3354}
3355
3356static struct md_personality raid1_personality =
3357{
3358 .name = "raid1",
3359 .level = 1,
3360 .owner = THIS_MODULE,
3361 .make_request = raid1_make_request,
3362 .run = raid1_run,
3363 .free = raid1_free,
3364 .status = raid1_status,
3365 .error_handler = raid1_error,
3366 .hot_add_disk = raid1_add_disk,
3367 .hot_remove_disk= raid1_remove_disk,
3368 .spare_active = raid1_spare_active,
3369 .sync_request = raid1_sync_request,
3370 .resize = raid1_resize,
3371 .size = raid1_size,
3372 .check_reshape = raid1_reshape,
3373 .quiesce = raid1_quiesce,
3374 .takeover = raid1_takeover,
3375};
3376
3377static int __init raid_init(void)
3378{
3379 return register_md_personality(&raid1_personality);
3380}
3381
3382static void raid_exit(void)
3383{
3384 unregister_md_personality(&raid1_personality);
3385}
3386
3387module_init(raid_init);
3388module_exit(raid_exit);
3389MODULE_LICENSE("GPL");
3390MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3391MODULE_ALIAS("md-personality-3");
3392MODULE_ALIAS("md-raid1");
3393MODULE_ALIAS("md-level-1");
3394
3395module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
3396