1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/slab.h>
35#include <linux/delay.h>
36#include <linux/blkdev.h>
37#include <linux/module.h>
38#include <linux/seq_file.h>
39#include <linux/ratelimit.h>
40#include "md.h"
41#include "raid1.h"
42#include "bitmap.h"
43
44
45
46
47#define NR_RAID1_BIOS 256
48
49
50
51
52
53
54#define IO_BLOCKED ((struct bio *)1)
55
56
57
58
59#define IO_MADE_GOOD ((struct bio *)2)
60
61#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
62
63
64
65
66
67static int max_queued_requests = 1024;
68
69static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
70 sector_t bi_sector);
71static void lower_barrier(struct r1conf *conf);
72
73static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
74{
75 struct pool_info *pi = data;
76 int size = offsetof(struct r1bio, bios[pi->raid_disks]);
77
78
79 return kzalloc(size, gfp_flags);
80}
81
82static void r1bio_pool_free(void *r1_bio, void *data)
83{
84 kfree(r1_bio);
85}
86
87#define RESYNC_BLOCK_SIZE (64*1024)
88#define RESYNC_DEPTH 32
89#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
90#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
91#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
92#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
93#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
94
95static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
96{
97 struct pool_info *pi = data;
98 struct r1bio *r1_bio;
99 struct bio *bio;
100 int need_pages;
101 int i, j;
102
103 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
104 if (!r1_bio)
105 return NULL;
106
107
108
109
110 for (j = pi->raid_disks ; j-- ; ) {
111 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
112 if (!bio)
113 goto out_free_bio;
114 r1_bio->bios[j] = bio;
115 }
116
117
118
119
120
121
122 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
123 need_pages = pi->raid_disks;
124 else
125 need_pages = 1;
126 for (j = 0; j < need_pages; j++) {
127 bio = r1_bio->bios[j];
128 bio->bi_vcnt = RESYNC_PAGES;
129
130 if (bio_alloc_pages(bio, gfp_flags))
131 goto out_free_pages;
132 }
133
134 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
135 for (i=0; i<RESYNC_PAGES ; i++)
136 for (j=1; j<pi->raid_disks; j++)
137 r1_bio->bios[j]->bi_io_vec[i].bv_page =
138 r1_bio->bios[0]->bi_io_vec[i].bv_page;
139 }
140
141 r1_bio->master_bio = NULL;
142
143 return r1_bio;
144
145out_free_pages:
146 while (--j >= 0) {
147 struct bio_vec *bv;
148
149 bio_for_each_segment_all(bv, r1_bio->bios[j], i)
150 __free_page(bv->bv_page);
151 }
152
153out_free_bio:
154 while (++j < pi->raid_disks)
155 bio_put(r1_bio->bios[j]);
156 r1bio_pool_free(r1_bio, data);
157 return NULL;
158}
159
160static void r1buf_pool_free(void *__r1_bio, void *data)
161{
162 struct pool_info *pi = data;
163 int i,j;
164 struct r1bio *r1bio = __r1_bio;
165
166 for (i = 0; i < RESYNC_PAGES; i++)
167 for (j = pi->raid_disks; j-- ;) {
168 if (j == 0 ||
169 r1bio->bios[j]->bi_io_vec[i].bv_page !=
170 r1bio->bios[0]->bi_io_vec[i].bv_page)
171 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
172 }
173 for (i=0 ; i < pi->raid_disks; i++)
174 bio_put(r1bio->bios[i]);
175
176 r1bio_pool_free(r1bio, data);
177}
178
179static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
180{
181 int i;
182
183 for (i = 0; i < conf->raid_disks * 2; i++) {
184 struct bio **bio = r1_bio->bios + i;
185 if (!BIO_SPECIAL(*bio))
186 bio_put(*bio);
187 *bio = NULL;
188 }
189}
190
191static void free_r1bio(struct r1bio *r1_bio)
192{
193 struct r1conf *conf = r1_bio->mddev->private;
194
195 put_all_bios(conf, r1_bio);
196 mempool_free(r1_bio, conf->r1bio_pool);
197}
198
199static void put_buf(struct r1bio *r1_bio)
200{
201 struct r1conf *conf = r1_bio->mddev->private;
202 int i;
203
204 for (i = 0; i < conf->raid_disks * 2; i++) {
205 struct bio *bio = r1_bio->bios[i];
206 if (bio->bi_end_io)
207 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
208 }
209
210 mempool_free(r1_bio, conf->r1buf_pool);
211
212 lower_barrier(conf);
213}
214
215static void reschedule_retry(struct r1bio *r1_bio)
216{
217 unsigned long flags;
218 struct mddev *mddev = r1_bio->mddev;
219 struct r1conf *conf = mddev->private;
220
221 spin_lock_irqsave(&conf->device_lock, flags);
222 list_add(&r1_bio->retry_list, &conf->retry_list);
223 conf->nr_queued ++;
224 spin_unlock_irqrestore(&conf->device_lock, flags);
225
226 wake_up(&conf->wait_barrier);
227 md_wakeup_thread(mddev->thread);
228}
229
230
231
232
233
234
235static void call_bio_endio(struct r1bio *r1_bio)
236{
237 struct bio *bio = r1_bio->master_bio;
238 int done;
239 struct r1conf *conf = r1_bio->mddev->private;
240 sector_t start_next_window = r1_bio->start_next_window;
241 sector_t bi_sector = bio->bi_iter.bi_sector;
242
243 if (bio->bi_phys_segments) {
244 unsigned long flags;
245 spin_lock_irqsave(&conf->device_lock, flags);
246 bio->bi_phys_segments--;
247 done = (bio->bi_phys_segments == 0);
248 spin_unlock_irqrestore(&conf->device_lock, flags);
249
250
251
252
253 wake_up(&conf->wait_barrier);
254 } else
255 done = 1;
256
257 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
258 clear_bit(BIO_UPTODATE, &bio->bi_flags);
259 if (done) {
260 bio_endio(bio, 0);
261
262
263
264
265 allow_barrier(conf, start_next_window, bi_sector);
266 }
267}
268
269static void raid_end_bio_io(struct r1bio *r1_bio)
270{
271 struct bio *bio = r1_bio->master_bio;
272
273
274 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
275 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
276 (bio_data_dir(bio) == WRITE) ? "write" : "read",
277 (unsigned long long) bio->bi_iter.bi_sector,
278 (unsigned long long) bio_end_sector(bio) - 1);
279
280 call_bio_endio(r1_bio);
281 }
282 free_r1bio(r1_bio);
283}
284
285
286
287
288static inline void update_head_pos(int disk, struct r1bio *r1_bio)
289{
290 struct r1conf *conf = r1_bio->mddev->private;
291
292 conf->mirrors[disk].head_position =
293 r1_bio->sector + (r1_bio->sectors);
294}
295
296
297
298
299static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
300{
301 int mirror;
302 struct r1conf *conf = r1_bio->mddev->private;
303 int raid_disks = conf->raid_disks;
304
305 for (mirror = 0; mirror < raid_disks * 2; mirror++)
306 if (r1_bio->bios[mirror] == bio)
307 break;
308
309 BUG_ON(mirror == raid_disks * 2);
310 update_head_pos(mirror, r1_bio);
311
312 return mirror;
313}
314
315static void raid1_end_read_request(struct bio *bio, int error)
316{
317 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
318 struct r1bio *r1_bio = bio->bi_private;
319 int mirror;
320 struct r1conf *conf = r1_bio->mddev->private;
321
322 mirror = r1_bio->read_disk;
323
324
325
326 update_head_pos(mirror, r1_bio);
327
328 if (uptodate)
329 set_bit(R1BIO_Uptodate, &r1_bio->state);
330 else {
331
332
333
334
335 unsigned long flags;
336 spin_lock_irqsave(&conf->device_lock, flags);
337 if (r1_bio->mddev->degraded == conf->raid_disks ||
338 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
339 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
340 uptodate = 1;
341 spin_unlock_irqrestore(&conf->device_lock, flags);
342 }
343
344 if (uptodate) {
345 raid_end_bio_io(r1_bio);
346 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
347 } else {
348
349
350
351 char b[BDEVNAME_SIZE];
352 printk_ratelimited(
353 KERN_ERR "md/raid1:%s: %s: "
354 "rescheduling sector %llu\n",
355 mdname(conf->mddev),
356 bdevname(conf->mirrors[mirror].rdev->bdev,
357 b),
358 (unsigned long long)r1_bio->sector);
359 set_bit(R1BIO_ReadError, &r1_bio->state);
360 reschedule_retry(r1_bio);
361
362 }
363}
364
365static void close_write(struct r1bio *r1_bio)
366{
367
368 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
369
370 int i = r1_bio->behind_page_count;
371 while (i--)
372 safe_put_page(r1_bio->behind_bvecs[i].bv_page);
373 kfree(r1_bio->behind_bvecs);
374 r1_bio->behind_bvecs = NULL;
375 }
376
377 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
378 r1_bio->sectors,
379 !test_bit(R1BIO_Degraded, &r1_bio->state),
380 test_bit(R1BIO_BehindIO, &r1_bio->state));
381 md_write_end(r1_bio->mddev);
382}
383
384static void r1_bio_write_done(struct r1bio *r1_bio)
385{
386 if (!atomic_dec_and_test(&r1_bio->remaining))
387 return;
388
389 if (test_bit(R1BIO_WriteError, &r1_bio->state))
390 reschedule_retry(r1_bio);
391 else {
392 close_write(r1_bio);
393 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
394 reschedule_retry(r1_bio);
395 else
396 raid_end_bio_io(r1_bio);
397 }
398}
399
400static void raid1_end_write_request(struct bio *bio, int error)
401{
402 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
403 struct r1bio *r1_bio = bio->bi_private;
404 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
405 struct r1conf *conf = r1_bio->mddev->private;
406 struct bio *to_put = NULL;
407
408 mirror = find_bio_disk(r1_bio, bio);
409
410
411
412
413 if (!uptodate) {
414 set_bit(WriteErrorSeen,
415 &conf->mirrors[mirror].rdev->flags);
416 if (!test_and_set_bit(WantReplacement,
417 &conf->mirrors[mirror].rdev->flags))
418 set_bit(MD_RECOVERY_NEEDED, &
419 conf->mddev->recovery);
420
421 set_bit(R1BIO_WriteError, &r1_bio->state);
422 } else {
423
424
425
426
427
428
429
430
431
432
433 sector_t first_bad;
434 int bad_sectors;
435
436 r1_bio->bios[mirror] = NULL;
437 to_put = bio;
438
439
440
441
442
443
444
445
446 if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
447 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
448 set_bit(R1BIO_Uptodate, &r1_bio->state);
449
450
451 if (is_badblock(conf->mirrors[mirror].rdev,
452 r1_bio->sector, r1_bio->sectors,
453 &first_bad, &bad_sectors)) {
454 r1_bio->bios[mirror] = IO_MADE_GOOD;
455 set_bit(R1BIO_MadeGood, &r1_bio->state);
456 }
457 }
458
459 if (behind) {
460 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
461 atomic_dec(&r1_bio->behind_remaining);
462
463
464
465
466
467
468
469
470 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
471 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
472
473 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
474 struct bio *mbio = r1_bio->master_bio;
475 pr_debug("raid1: behind end write sectors"
476 " %llu-%llu\n",
477 (unsigned long long) mbio->bi_iter.bi_sector,
478 (unsigned long long) bio_end_sector(mbio) - 1);
479 call_bio_endio(r1_bio);
480 }
481 }
482 }
483 if (r1_bio->bios[mirror] == NULL)
484 rdev_dec_pending(conf->mirrors[mirror].rdev,
485 conf->mddev);
486
487
488
489
490
491 r1_bio_write_done(r1_bio);
492
493 if (to_put)
494 bio_put(to_put);
495}
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
512{
513 const sector_t this_sector = r1_bio->sector;
514 int sectors;
515 int best_good_sectors;
516 int best_disk, best_dist_disk, best_pending_disk;
517 int has_nonrot_disk;
518 int disk;
519 sector_t best_dist;
520 unsigned int min_pending;
521 struct md_rdev *rdev;
522 int choose_first;
523 int choose_next_idle;
524
525 rcu_read_lock();
526
527
528
529
530
531 retry:
532 sectors = r1_bio->sectors;
533 best_disk = -1;
534 best_dist_disk = -1;
535 best_dist = MaxSector;
536 best_pending_disk = -1;
537 min_pending = UINT_MAX;
538 best_good_sectors = 0;
539 has_nonrot_disk = 0;
540 choose_next_idle = 0;
541
542 choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
543
544 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
545 sector_t dist;
546 sector_t first_bad;
547 int bad_sectors;
548 unsigned int pending;
549 bool nonrot;
550
551 rdev = rcu_dereference(conf->mirrors[disk].rdev);
552 if (r1_bio->bios[disk] == IO_BLOCKED
553 || rdev == NULL
554 || test_bit(Unmerged, &rdev->flags)
555 || test_bit(Faulty, &rdev->flags))
556 continue;
557 if (!test_bit(In_sync, &rdev->flags) &&
558 rdev->recovery_offset < this_sector + sectors)
559 continue;
560 if (test_bit(WriteMostly, &rdev->flags)) {
561
562
563 if (best_disk < 0) {
564 if (is_badblock(rdev, this_sector, sectors,
565 &first_bad, &bad_sectors)) {
566 if (first_bad < this_sector)
567
568 continue;
569 best_good_sectors = first_bad - this_sector;
570 } else
571 best_good_sectors = sectors;
572 best_disk = disk;
573 }
574 continue;
575 }
576
577
578
579 if (is_badblock(rdev, this_sector, sectors,
580 &first_bad, &bad_sectors)) {
581 if (best_dist < MaxSector)
582
583 continue;
584 if (first_bad <= this_sector) {
585
586
587
588
589 bad_sectors -= (this_sector - first_bad);
590 if (choose_first && sectors > bad_sectors)
591 sectors = bad_sectors;
592 if (best_good_sectors > sectors)
593 best_good_sectors = sectors;
594
595 } else {
596 sector_t good_sectors = first_bad - this_sector;
597 if (good_sectors > best_good_sectors) {
598 best_good_sectors = good_sectors;
599 best_disk = disk;
600 }
601 if (choose_first)
602 break;
603 }
604 continue;
605 } else
606 best_good_sectors = sectors;
607
608 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
609 has_nonrot_disk |= nonrot;
610 pending = atomic_read(&rdev->nr_pending);
611 dist = abs(this_sector - conf->mirrors[disk].head_position);
612 if (choose_first) {
613 best_disk = disk;
614 break;
615 }
616
617 if (conf->mirrors[disk].next_seq_sect == this_sector
618 || dist == 0) {
619 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
620 struct raid1_info *mirror = &conf->mirrors[disk];
621
622 best_disk = disk;
623
624
625
626
627
628
629
630
631
632
633
634
635
636 if (nonrot && opt_iosize > 0 &&
637 mirror->seq_start != MaxSector &&
638 mirror->next_seq_sect > opt_iosize &&
639 mirror->next_seq_sect - opt_iosize >=
640 mirror->seq_start) {
641 choose_next_idle = 1;
642 continue;
643 }
644 break;
645 }
646
647 if (pending == 0) {
648 best_disk = disk;
649 break;
650 }
651
652 if (choose_next_idle)
653 continue;
654
655 if (min_pending > pending) {
656 min_pending = pending;
657 best_pending_disk = disk;
658 }
659
660 if (dist < best_dist) {
661 best_dist = dist;
662 best_dist_disk = disk;
663 }
664 }
665
666
667
668
669
670
671
672 if (best_disk == -1) {
673 if (has_nonrot_disk)
674 best_disk = best_pending_disk;
675 else
676 best_disk = best_dist_disk;
677 }
678
679 if (best_disk >= 0) {
680 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
681 if (!rdev)
682 goto retry;
683 atomic_inc(&rdev->nr_pending);
684 if (test_bit(Faulty, &rdev->flags)) {
685
686
687
688 rdev_dec_pending(rdev, conf->mddev);
689 goto retry;
690 }
691 sectors = best_good_sectors;
692
693 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
694 conf->mirrors[best_disk].seq_start = this_sector;
695
696 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
697 }
698 rcu_read_unlock();
699 *max_sectors = sectors;
700
701 return best_disk;
702}
703
704static int raid1_mergeable_bvec(struct request_queue *q,
705 struct bvec_merge_data *bvm,
706 struct bio_vec *biovec)
707{
708 struct mddev *mddev = q->queuedata;
709 struct r1conf *conf = mddev->private;
710 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
711 int max = biovec->bv_len;
712
713 if (mddev->merge_check_needed) {
714 int disk;
715 rcu_read_lock();
716 for (disk = 0; disk < conf->raid_disks * 2; disk++) {
717 struct md_rdev *rdev = rcu_dereference(
718 conf->mirrors[disk].rdev);
719 if (rdev && !test_bit(Faulty, &rdev->flags)) {
720 struct request_queue *q =
721 bdev_get_queue(rdev->bdev);
722 if (q->merge_bvec_fn) {
723 bvm->bi_sector = sector +
724 rdev->data_offset;
725 bvm->bi_bdev = rdev->bdev;
726 max = min(max, q->merge_bvec_fn(
727 q, bvm, biovec));
728 }
729 }
730 }
731 rcu_read_unlock();
732 }
733 return max;
734
735}
736
737int md_raid1_congested(struct mddev *mddev, int bits)
738{
739 struct r1conf *conf = mddev->private;
740 int i, ret = 0;
741
742 if ((bits & (1 << BDI_async_congested)) &&
743 conf->pending_count >= max_queued_requests)
744 return 1;
745
746 rcu_read_lock();
747 for (i = 0; i < conf->raid_disks * 2; i++) {
748 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
749 if (rdev && !test_bit(Faulty, &rdev->flags)) {
750 struct request_queue *q = bdev_get_queue(rdev->bdev);
751
752 BUG_ON(!q);
753
754
755
756
757 if ((bits & (1<<BDI_async_congested)) || 1)
758 ret |= bdi_congested(&q->backing_dev_info, bits);
759 else
760 ret &= bdi_congested(&q->backing_dev_info, bits);
761 }
762 }
763 rcu_read_unlock();
764 return ret;
765}
766EXPORT_SYMBOL_GPL(md_raid1_congested);
767
768static int raid1_congested(void *data, int bits)
769{
770 struct mddev *mddev = data;
771
772 return mddev_congested(mddev, bits) ||
773 md_raid1_congested(mddev, bits);
774}
775
776static void flush_pending_writes(struct r1conf *conf)
777{
778
779
780
781 spin_lock_irq(&conf->device_lock);
782
783 if (conf->pending_bio_list.head) {
784 struct bio *bio;
785 bio = bio_list_get(&conf->pending_bio_list);
786 conf->pending_count = 0;
787 spin_unlock_irq(&conf->device_lock);
788
789
790 bitmap_unplug(conf->mddev->bitmap);
791 wake_up(&conf->wait_barrier);
792
793 while (bio) {
794 struct bio *next = bio->bi_next;
795 bio->bi_next = NULL;
796 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
797 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
798
799 bio_endio(bio, 0);
800 else
801 generic_make_request(bio);
802 bio = next;
803 }
804 } else
805 spin_unlock_irq(&conf->device_lock);
806}
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
830{
831 spin_lock_irq(&conf->resync_lock);
832
833
834 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
835 conf->resync_lock);
836
837
838 conf->barrier++;
839 conf->next_resync = sector_nr;
840
841
842
843
844
845
846
847
848
849
850 wait_event_lock_irq(conf->wait_barrier,
851 !conf->array_frozen &&
852 conf->barrier < RESYNC_DEPTH &&
853 conf->current_window_requests == 0 &&
854 (conf->start_next_window >=
855 conf->next_resync + RESYNC_SECTORS),
856 conf->resync_lock);
857
858 conf->nr_pending++;
859 spin_unlock_irq(&conf->resync_lock);
860}
861
862static void lower_barrier(struct r1conf *conf)
863{
864 unsigned long flags;
865 BUG_ON(conf->barrier <= 0);
866 spin_lock_irqsave(&conf->resync_lock, flags);
867 conf->barrier--;
868 conf->nr_pending--;
869 spin_unlock_irqrestore(&conf->resync_lock, flags);
870 wake_up(&conf->wait_barrier);
871}
872
873static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
874{
875 bool wait = false;
876
877 if (conf->array_frozen || !bio)
878 wait = true;
879 else if (conf->barrier && bio_data_dir(bio) == WRITE) {
880 if ((conf->mddev->curr_resync_completed
881 >= bio_end_sector(bio)) ||
882 (conf->next_resync + NEXT_NORMALIO_DISTANCE
883 <= bio->bi_iter.bi_sector))
884 wait = false;
885 else
886 wait = true;
887 }
888
889 return wait;
890}
891
892static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
893{
894 sector_t sector = 0;
895
896 spin_lock_irq(&conf->resync_lock);
897 if (need_to_wait_for_sync(conf, bio)) {
898 conf->nr_waiting++;
899
900
901
902
903
904
905
906
907
908 wait_event_lock_irq(conf->wait_barrier,
909 !conf->array_frozen &&
910 (!conf->barrier ||
911 ((conf->start_next_window <
912 conf->next_resync + RESYNC_SECTORS) &&
913 current->bio_list &&
914 !bio_list_empty(current->bio_list))),
915 conf->resync_lock);
916 conf->nr_waiting--;
917 }
918
919 if (bio && bio_data_dir(bio) == WRITE) {
920 if (bio->bi_iter.bi_sector >=
921 conf->mddev->curr_resync_completed) {
922 if (conf->start_next_window == MaxSector)
923 conf->start_next_window =
924 conf->next_resync +
925 NEXT_NORMALIO_DISTANCE;
926
927 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
928 <= bio->bi_iter.bi_sector)
929 conf->next_window_requests++;
930 else
931 conf->current_window_requests++;
932 sector = conf->start_next_window;
933 }
934 }
935
936 conf->nr_pending++;
937 spin_unlock_irq(&conf->resync_lock);
938 return sector;
939}
940
941static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
942 sector_t bi_sector)
943{
944 unsigned long flags;
945
946 spin_lock_irqsave(&conf->resync_lock, flags);
947 conf->nr_pending--;
948 if (start_next_window) {
949 if (start_next_window == conf->start_next_window) {
950 if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
951 <= bi_sector)
952 conf->next_window_requests--;
953 else
954 conf->current_window_requests--;
955 } else
956 conf->current_window_requests--;
957
958 if (!conf->current_window_requests) {
959 if (conf->next_window_requests) {
960 conf->current_window_requests =
961 conf->next_window_requests;
962 conf->next_window_requests = 0;
963 conf->start_next_window +=
964 NEXT_NORMALIO_DISTANCE;
965 } else
966 conf->start_next_window = MaxSector;
967 }
968 }
969 spin_unlock_irqrestore(&conf->resync_lock, flags);
970 wake_up(&conf->wait_barrier);
971}
972
973static void freeze_array(struct r1conf *conf, int extra)
974{
975
976
977
978
979
980
981
982
983
984
985
986 spin_lock_irq(&conf->resync_lock);
987 conf->array_frozen = 1;
988 wait_event_lock_irq_cmd(conf->wait_barrier,
989 conf->nr_pending == conf->nr_queued+extra,
990 conf->resync_lock,
991 flush_pending_writes(conf));
992 spin_unlock_irq(&conf->resync_lock);
993}
994static void unfreeze_array(struct r1conf *conf)
995{
996
997 spin_lock_irq(&conf->resync_lock);
998 conf->array_frozen = 0;
999 wake_up(&conf->wait_barrier);
1000 spin_unlock_irq(&conf->resync_lock);
1001}
1002
1003
1004
1005static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
1006{
1007 int i;
1008 struct bio_vec *bvec;
1009 struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
1010 GFP_NOIO);
1011 if (unlikely(!bvecs))
1012 return;
1013
1014 bio_for_each_segment_all(bvec, bio, i) {
1015 bvecs[i] = *bvec;
1016 bvecs[i].bv_page = alloc_page(GFP_NOIO);
1017 if (unlikely(!bvecs[i].bv_page))
1018 goto do_sync_io;
1019 memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
1020 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
1021 kunmap(bvecs[i].bv_page);
1022 kunmap(bvec->bv_page);
1023 }
1024 r1_bio->behind_bvecs = bvecs;
1025 r1_bio->behind_page_count = bio->bi_vcnt;
1026 set_bit(R1BIO_BehindIO, &r1_bio->state);
1027 return;
1028
1029do_sync_io:
1030 for (i = 0; i < bio->bi_vcnt; i++)
1031 if (bvecs[i].bv_page)
1032 put_page(bvecs[i].bv_page);
1033 kfree(bvecs);
1034 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1035 bio->bi_iter.bi_size);
1036}
1037
1038struct raid1_plug_cb {
1039 struct blk_plug_cb cb;
1040 struct bio_list pending;
1041 int pending_cnt;
1042};
1043
1044static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1045{
1046 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1047 cb);
1048 struct mddev *mddev = plug->cb.data;
1049 struct r1conf *conf = mddev->private;
1050 struct bio *bio;
1051
1052 if (from_schedule || current->bio_list) {
1053 spin_lock_irq(&conf->device_lock);
1054 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1055 conf->pending_count += plug->pending_cnt;
1056 spin_unlock_irq(&conf->device_lock);
1057 wake_up(&conf->wait_barrier);
1058 md_wakeup_thread(mddev->thread);
1059 kfree(plug);
1060 return;
1061 }
1062
1063
1064 bio = bio_list_get(&plug->pending);
1065 bitmap_unplug(mddev->bitmap);
1066 wake_up(&conf->wait_barrier);
1067
1068 while (bio) {
1069 struct bio *next = bio->bi_next;
1070 bio->bi_next = NULL;
1071 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
1072 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1073
1074 bio_endio(bio, 0);
1075 else
1076 generic_make_request(bio);
1077 bio = next;
1078 }
1079 kfree(plug);
1080}
1081
1082static void make_request(struct mddev *mddev, struct bio * bio)
1083{
1084 struct r1conf *conf = mddev->private;
1085 struct raid1_info *mirror;
1086 struct r1bio *r1_bio;
1087 struct bio *read_bio;
1088 int i, disks;
1089 struct bitmap *bitmap;
1090 unsigned long flags;
1091 const int rw = bio_data_dir(bio);
1092 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1093 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
1094 const unsigned long do_discard = (bio->bi_rw
1095 & (REQ_DISCARD | REQ_SECURE));
1096 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1097 struct md_rdev *blocked_rdev;
1098 struct blk_plug_cb *cb;
1099 struct raid1_plug_cb *plug = NULL;
1100 int first_clone;
1101 int sectors_handled;
1102 int max_sectors;
1103 sector_t start_next_window;
1104
1105
1106
1107
1108
1109
1110
1111 md_write_start(mddev, bio);
1112
1113 if (bio_data_dir(bio) == WRITE &&
1114 bio_end_sector(bio) > mddev->suspend_lo &&
1115 bio->bi_iter.bi_sector < mddev->suspend_hi) {
1116
1117
1118
1119
1120 DEFINE_WAIT(w);
1121 for (;;) {
1122 flush_signals(current);
1123 prepare_to_wait(&conf->wait_barrier,
1124 &w, TASK_INTERRUPTIBLE);
1125 if (bio_end_sector(bio) <= mddev->suspend_lo ||
1126 bio->bi_iter.bi_sector >= mddev->suspend_hi)
1127 break;
1128 schedule();
1129 }
1130 finish_wait(&conf->wait_barrier, &w);
1131 }
1132
1133 start_next_window = wait_barrier(conf, bio);
1134
1135 bitmap = mddev->bitmap;
1136
1137
1138
1139
1140
1141
1142 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1143
1144 r1_bio->master_bio = bio;
1145 r1_bio->sectors = bio_sectors(bio);
1146 r1_bio->state = 0;
1147 r1_bio->mddev = mddev;
1148 r1_bio->sector = bio->bi_iter.bi_sector;
1149
1150
1151
1152
1153
1154
1155
1156
1157 bio->bi_phys_segments = 0;
1158 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1159
1160 if (rw == READ) {
1161
1162
1163
1164 int rdisk;
1165
1166read_again:
1167 rdisk = read_balance(conf, r1_bio, &max_sectors);
1168
1169 if (rdisk < 0) {
1170
1171 raid_end_bio_io(r1_bio);
1172 return;
1173 }
1174 mirror = conf->mirrors + rdisk;
1175
1176 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1177 bitmap) {
1178
1179
1180
1181
1182 wait_event(bitmap->behind_wait,
1183 atomic_read(&bitmap->behind_writes) == 0);
1184 }
1185 r1_bio->read_disk = rdisk;
1186 r1_bio->start_next_window = 0;
1187
1188 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1189 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1190 max_sectors);
1191
1192 r1_bio->bios[rdisk] = read_bio;
1193
1194 read_bio->bi_iter.bi_sector = r1_bio->sector +
1195 mirror->rdev->data_offset;
1196 read_bio->bi_bdev = mirror->rdev->bdev;
1197 read_bio->bi_end_io = raid1_end_read_request;
1198 read_bio->bi_rw = READ | do_sync;
1199 read_bio->bi_private = r1_bio;
1200
1201 if (max_sectors < r1_bio->sectors) {
1202
1203
1204
1205
1206 sectors_handled = (r1_bio->sector + max_sectors
1207 - bio->bi_iter.bi_sector);
1208 r1_bio->sectors = max_sectors;
1209 spin_lock_irq(&conf->device_lock);
1210 if (bio->bi_phys_segments == 0)
1211 bio->bi_phys_segments = 2;
1212 else
1213 bio->bi_phys_segments++;
1214 spin_unlock_irq(&conf->device_lock);
1215
1216
1217
1218
1219
1220 reschedule_retry(r1_bio);
1221
1222 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1223
1224 r1_bio->master_bio = bio;
1225 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1226 r1_bio->state = 0;
1227 r1_bio->mddev = mddev;
1228 r1_bio->sector = bio->bi_iter.bi_sector +
1229 sectors_handled;
1230 goto read_again;
1231 } else
1232 generic_make_request(read_bio);
1233 return;
1234 }
1235
1236
1237
1238
1239 if (conf->pending_count >= max_queued_requests) {
1240 md_wakeup_thread(mddev->thread);
1241 wait_event(conf->wait_barrier,
1242 conf->pending_count < max_queued_requests);
1243 }
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 disks = conf->raid_disks * 2;
1256 retry_write:
1257 r1_bio->start_next_window = start_next_window;
1258 blocked_rdev = NULL;
1259 rcu_read_lock();
1260 max_sectors = r1_bio->sectors;
1261 for (i = 0; i < disks; i++) {
1262 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1263 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1264 atomic_inc(&rdev->nr_pending);
1265 blocked_rdev = rdev;
1266 break;
1267 }
1268 r1_bio->bios[i] = NULL;
1269 if (!rdev || test_bit(Faulty, &rdev->flags)
1270 || test_bit(Unmerged, &rdev->flags)) {
1271 if (i < conf->raid_disks)
1272 set_bit(R1BIO_Degraded, &r1_bio->state);
1273 continue;
1274 }
1275
1276 atomic_inc(&rdev->nr_pending);
1277 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1278 sector_t first_bad;
1279 int bad_sectors;
1280 int is_bad;
1281
1282 is_bad = is_badblock(rdev, r1_bio->sector,
1283 max_sectors,
1284 &first_bad, &bad_sectors);
1285 if (is_bad < 0) {
1286
1287
1288 set_bit(BlockedBadBlocks, &rdev->flags);
1289 blocked_rdev = rdev;
1290 break;
1291 }
1292 if (is_bad && first_bad <= r1_bio->sector) {
1293
1294 bad_sectors -= (r1_bio->sector - first_bad);
1295 if (bad_sectors < max_sectors)
1296
1297
1298
1299 max_sectors = bad_sectors;
1300 rdev_dec_pending(rdev, mddev);
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311 continue;
1312 }
1313 if (is_bad) {
1314 int good_sectors = first_bad - r1_bio->sector;
1315 if (good_sectors < max_sectors)
1316 max_sectors = good_sectors;
1317 }
1318 }
1319 r1_bio->bios[i] = bio;
1320 }
1321 rcu_read_unlock();
1322
1323 if (unlikely(blocked_rdev)) {
1324
1325 int j;
1326 sector_t old = start_next_window;
1327
1328 for (j = 0; j < i; j++)
1329 if (r1_bio->bios[j])
1330 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1331 r1_bio->state = 0;
1332 allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
1333 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1334 start_next_window = wait_barrier(conf, bio);
1335
1336
1337
1338
1339 if (bio->bi_phys_segments && old &&
1340 old != start_next_window)
1341
1342 wait_event(conf->wait_barrier,
1343 bio->bi_phys_segments == 1);
1344 goto retry_write;
1345 }
1346
1347 if (max_sectors < r1_bio->sectors) {
1348
1349
1350
1351 r1_bio->sectors = max_sectors;
1352 spin_lock_irq(&conf->device_lock);
1353 if (bio->bi_phys_segments == 0)
1354 bio->bi_phys_segments = 2;
1355 else
1356 bio->bi_phys_segments++;
1357 spin_unlock_irq(&conf->device_lock);
1358 }
1359 sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
1360
1361 atomic_set(&r1_bio->remaining, 1);
1362 atomic_set(&r1_bio->behind_remaining, 0);
1363
1364 first_clone = 1;
1365 for (i = 0; i < disks; i++) {
1366 struct bio *mbio;
1367 if (!r1_bio->bios[i])
1368 continue;
1369
1370 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1371 bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
1372
1373 if (first_clone) {
1374
1375
1376
1377
1378 if (bitmap &&
1379 (atomic_read(&bitmap->behind_writes)
1380 < mddev->bitmap_info.max_write_behind) &&
1381 !waitqueue_active(&bitmap->behind_wait))
1382 alloc_behind_pages(mbio, r1_bio);
1383
1384 bitmap_startwrite(bitmap, r1_bio->sector,
1385 r1_bio->sectors,
1386 test_bit(R1BIO_BehindIO,
1387 &r1_bio->state));
1388 first_clone = 0;
1389 }
1390 if (r1_bio->behind_bvecs) {
1391 struct bio_vec *bvec;
1392 int j;
1393
1394
1395
1396
1397 bio_for_each_segment_all(bvec, mbio, j)
1398 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
1399 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1400 atomic_inc(&r1_bio->behind_remaining);
1401 }
1402
1403 r1_bio->bios[i] = mbio;
1404
1405 mbio->bi_iter.bi_sector = (r1_bio->sector +
1406 conf->mirrors[i].rdev->data_offset);
1407 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1408 mbio->bi_end_io = raid1_end_write_request;
1409 mbio->bi_rw =
1410 WRITE | do_flush_fua | do_sync | do_discard | do_same;
1411 mbio->bi_private = r1_bio;
1412
1413 atomic_inc(&r1_bio->remaining);
1414
1415 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1416 if (cb)
1417 plug = container_of(cb, struct raid1_plug_cb, cb);
1418 else
1419 plug = NULL;
1420 spin_lock_irqsave(&conf->device_lock, flags);
1421 if (plug) {
1422 bio_list_add(&plug->pending, mbio);
1423 plug->pending_cnt++;
1424 } else {
1425 bio_list_add(&conf->pending_bio_list, mbio);
1426 conf->pending_count++;
1427 }
1428 spin_unlock_irqrestore(&conf->device_lock, flags);
1429 if (!plug)
1430 md_wakeup_thread(mddev->thread);
1431 }
1432
1433
1434
1435 if (sectors_handled < bio_sectors(bio)) {
1436 r1_bio_write_done(r1_bio);
1437
1438
1439
1440 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1441 r1_bio->master_bio = bio;
1442 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1443 r1_bio->state = 0;
1444 r1_bio->mddev = mddev;
1445 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1446 goto retry_write;
1447 }
1448
1449 r1_bio_write_done(r1_bio);
1450
1451
1452 wake_up(&conf->wait_barrier);
1453}
1454
1455static void status(struct seq_file *seq, struct mddev *mddev)
1456{
1457 struct r1conf *conf = mddev->private;
1458 int i;
1459
1460 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1461 conf->raid_disks - mddev->degraded);
1462 rcu_read_lock();
1463 for (i = 0; i < conf->raid_disks; i++) {
1464 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1465 seq_printf(seq, "%s",
1466 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1467 }
1468 rcu_read_unlock();
1469 seq_printf(seq, "]");
1470}
1471
1472static void error(struct mddev *mddev, struct md_rdev *rdev)
1473{
1474 char b[BDEVNAME_SIZE];
1475 struct r1conf *conf = mddev->private;
1476
1477
1478
1479
1480
1481
1482
1483 if (test_bit(In_sync, &rdev->flags)
1484 && (conf->raid_disks - mddev->degraded) == 1) {
1485
1486
1487
1488
1489
1490
1491 conf->recovery_disabled = mddev->recovery_disabled;
1492 return;
1493 }
1494 set_bit(Blocked, &rdev->flags);
1495 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1496 unsigned long flags;
1497 spin_lock_irqsave(&conf->device_lock, flags);
1498 mddev->degraded++;
1499 set_bit(Faulty, &rdev->flags);
1500 spin_unlock_irqrestore(&conf->device_lock, flags);
1501 } else
1502 set_bit(Faulty, &rdev->flags);
1503
1504
1505
1506 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1507 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1508 printk(KERN_ALERT
1509 "md/raid1:%s: Disk failure on %s, disabling device.\n"
1510 "md/raid1:%s: Operation continuing on %d devices.\n",
1511 mdname(mddev), bdevname(rdev->bdev, b),
1512 mdname(mddev), conf->raid_disks - mddev->degraded);
1513}
1514
1515static void print_conf(struct r1conf *conf)
1516{
1517 int i;
1518
1519 printk(KERN_DEBUG "RAID1 conf printout:\n");
1520 if (!conf) {
1521 printk(KERN_DEBUG "(!conf)\n");
1522 return;
1523 }
1524 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1525 conf->raid_disks);
1526
1527 rcu_read_lock();
1528 for (i = 0; i < conf->raid_disks; i++) {
1529 char b[BDEVNAME_SIZE];
1530 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1531 if (rdev)
1532 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1533 i, !test_bit(In_sync, &rdev->flags),
1534 !test_bit(Faulty, &rdev->flags),
1535 bdevname(rdev->bdev,b));
1536 }
1537 rcu_read_unlock();
1538}
1539
1540static void close_sync(struct r1conf *conf)
1541{
1542 wait_barrier(conf, NULL);
1543 allow_barrier(conf, 0, 0);
1544
1545 mempool_destroy(conf->r1buf_pool);
1546 conf->r1buf_pool = NULL;
1547
1548 spin_lock_irq(&conf->resync_lock);
1549 conf->next_resync = 0;
1550 conf->start_next_window = MaxSector;
1551 conf->current_window_requests +=
1552 conf->next_window_requests;
1553 conf->next_window_requests = 0;
1554 spin_unlock_irq(&conf->resync_lock);
1555}
1556
1557static int raid1_spare_active(struct mddev *mddev)
1558{
1559 int i;
1560 struct r1conf *conf = mddev->private;
1561 int count = 0;
1562 unsigned long flags;
1563
1564
1565
1566
1567
1568
1569 for (i = 0; i < conf->raid_disks; i++) {
1570 struct md_rdev *rdev = conf->mirrors[i].rdev;
1571 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1572 if (repl
1573 && repl->recovery_offset == MaxSector
1574 && !test_bit(Faulty, &repl->flags)
1575 && !test_and_set_bit(In_sync, &repl->flags)) {
1576
1577 if (!rdev ||
1578 !test_and_clear_bit(In_sync, &rdev->flags))
1579 count++;
1580 if (rdev) {
1581
1582
1583
1584
1585 set_bit(Faulty, &rdev->flags);
1586 sysfs_notify_dirent_safe(
1587 rdev->sysfs_state);
1588 }
1589 }
1590 if (rdev
1591 && rdev->recovery_offset == MaxSector
1592 && !test_bit(Faulty, &rdev->flags)
1593 && !test_and_set_bit(In_sync, &rdev->flags)) {
1594 count++;
1595 sysfs_notify_dirent_safe(rdev->sysfs_state);
1596 }
1597 }
1598 spin_lock_irqsave(&conf->device_lock, flags);
1599 mddev->degraded -= count;
1600 spin_unlock_irqrestore(&conf->device_lock, flags);
1601
1602 print_conf(conf);
1603 return count;
1604}
1605
1606static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1607{
1608 struct r1conf *conf = mddev->private;
1609 int err = -EEXIST;
1610 int mirror = 0;
1611 struct raid1_info *p;
1612 int first = 0;
1613 int last = conf->raid_disks - 1;
1614 struct request_queue *q = bdev_get_queue(rdev->bdev);
1615
1616 if (mddev->recovery_disabled == conf->recovery_disabled)
1617 return -EBUSY;
1618
1619 if (rdev->raid_disk >= 0)
1620 first = last = rdev->raid_disk;
1621
1622 if (q->merge_bvec_fn) {
1623 set_bit(Unmerged, &rdev->flags);
1624 mddev->merge_check_needed = 1;
1625 }
1626
1627 for (mirror = first; mirror <= last; mirror++) {
1628 p = conf->mirrors+mirror;
1629 if (!p->rdev) {
1630
1631 if (mddev->gendisk)
1632 disk_stack_limits(mddev->gendisk, rdev->bdev,
1633 rdev->data_offset << 9);
1634
1635 p->head_position = 0;
1636 rdev->raid_disk = mirror;
1637 err = 0;
1638
1639
1640
1641 if (rdev->saved_raid_disk < 0)
1642 conf->fullsync = 1;
1643 rcu_assign_pointer(p->rdev, rdev);
1644 break;
1645 }
1646 if (test_bit(WantReplacement, &p->rdev->flags) &&
1647 p[conf->raid_disks].rdev == NULL) {
1648
1649 clear_bit(In_sync, &rdev->flags);
1650 set_bit(Replacement, &rdev->flags);
1651 rdev->raid_disk = mirror;
1652 err = 0;
1653 conf->fullsync = 1;
1654 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1655 break;
1656 }
1657 }
1658 if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
1659
1660
1661
1662
1663
1664
1665
1666 synchronize_sched();
1667 freeze_array(conf, 0);
1668 unfreeze_array(conf);
1669 clear_bit(Unmerged, &rdev->flags);
1670 }
1671 md_integrity_add_rdev(rdev, mddev);
1672 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1673 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1674 print_conf(conf);
1675 return err;
1676}
1677
1678static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1679{
1680 struct r1conf *conf = mddev->private;
1681 int err = 0;
1682 int number = rdev->raid_disk;
1683 struct raid1_info *p = conf->mirrors + number;
1684
1685 if (rdev != p->rdev)
1686 p = conf->mirrors + conf->raid_disks + number;
1687
1688 print_conf(conf);
1689 if (rdev == p->rdev) {
1690 if (test_bit(In_sync, &rdev->flags) ||
1691 atomic_read(&rdev->nr_pending)) {
1692 err = -EBUSY;
1693 goto abort;
1694 }
1695
1696
1697
1698 if (!test_bit(Faulty, &rdev->flags) &&
1699 mddev->recovery_disabled != conf->recovery_disabled &&
1700 mddev->degraded < conf->raid_disks) {
1701 err = -EBUSY;
1702 goto abort;
1703 }
1704 p->rdev = NULL;
1705 synchronize_rcu();
1706 if (atomic_read(&rdev->nr_pending)) {
1707
1708 err = -EBUSY;
1709 p->rdev = rdev;
1710 goto abort;
1711 } else if (conf->mirrors[conf->raid_disks + number].rdev) {
1712
1713
1714
1715
1716 struct md_rdev *repl =
1717 conf->mirrors[conf->raid_disks + number].rdev;
1718 freeze_array(conf, 0);
1719 clear_bit(Replacement, &repl->flags);
1720 p->rdev = repl;
1721 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1722 unfreeze_array(conf);
1723 clear_bit(WantReplacement, &rdev->flags);
1724 } else
1725 clear_bit(WantReplacement, &rdev->flags);
1726 err = md_integrity_register(mddev);
1727 }
1728abort:
1729
1730 print_conf(conf);
1731 return err;
1732}
1733
1734static void end_sync_read(struct bio *bio, int error)
1735{
1736 struct r1bio *r1_bio = bio->bi_private;
1737
1738 update_head_pos(r1_bio->read_disk, r1_bio);
1739
1740
1741
1742
1743
1744
1745 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1746 set_bit(R1BIO_Uptodate, &r1_bio->state);
1747
1748 if (atomic_dec_and_test(&r1_bio->remaining))
1749 reschedule_retry(r1_bio);
1750}
1751
1752static void end_sync_write(struct bio *bio, int error)
1753{
1754 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1755 struct r1bio *r1_bio = bio->bi_private;
1756 struct mddev *mddev = r1_bio->mddev;
1757 struct r1conf *conf = mddev->private;
1758 int mirror=0;
1759 sector_t first_bad;
1760 int bad_sectors;
1761
1762 mirror = find_bio_disk(r1_bio, bio);
1763
1764 if (!uptodate) {
1765 sector_t sync_blocks = 0;
1766 sector_t s = r1_bio->sector;
1767 long sectors_to_go = r1_bio->sectors;
1768
1769 do {
1770 bitmap_end_sync(mddev->bitmap, s,
1771 &sync_blocks, 1);
1772 s += sync_blocks;
1773 sectors_to_go -= sync_blocks;
1774 } while (sectors_to_go > 0);
1775 set_bit(WriteErrorSeen,
1776 &conf->mirrors[mirror].rdev->flags);
1777 if (!test_and_set_bit(WantReplacement,
1778 &conf->mirrors[mirror].rdev->flags))
1779 set_bit(MD_RECOVERY_NEEDED, &
1780 mddev->recovery);
1781 set_bit(R1BIO_WriteError, &r1_bio->state);
1782 } else if (is_badblock(conf->mirrors[mirror].rdev,
1783 r1_bio->sector,
1784 r1_bio->sectors,
1785 &first_bad, &bad_sectors) &&
1786 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1787 r1_bio->sector,
1788 r1_bio->sectors,
1789 &first_bad, &bad_sectors)
1790 )
1791 set_bit(R1BIO_MadeGood, &r1_bio->state);
1792
1793 if (atomic_dec_and_test(&r1_bio->remaining)) {
1794 int s = r1_bio->sectors;
1795 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1796 test_bit(R1BIO_WriteError, &r1_bio->state))
1797 reschedule_retry(r1_bio);
1798 else {
1799 put_buf(r1_bio);
1800 md_done_sync(mddev, s, uptodate);
1801 }
1802 }
1803}
1804
1805static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1806 int sectors, struct page *page, int rw)
1807{
1808 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1809
1810 return 1;
1811 if (rw == WRITE) {
1812 set_bit(WriteErrorSeen, &rdev->flags);
1813 if (!test_and_set_bit(WantReplacement,
1814 &rdev->flags))
1815 set_bit(MD_RECOVERY_NEEDED, &
1816 rdev->mddev->recovery);
1817 }
1818
1819 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1820 md_error(rdev->mddev, rdev);
1821 return 0;
1822}
1823
1824static int fix_sync_read_error(struct r1bio *r1_bio)
1825{
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837 struct mddev *mddev = r1_bio->mddev;
1838 struct r1conf *conf = mddev->private;
1839 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1840 sector_t sect = r1_bio->sector;
1841 int sectors = r1_bio->sectors;
1842 int idx = 0;
1843
1844 while(sectors) {
1845 int s = sectors;
1846 int d = r1_bio->read_disk;
1847 int success = 0;
1848 struct md_rdev *rdev;
1849 int start;
1850
1851 if (s > (PAGE_SIZE>>9))
1852 s = PAGE_SIZE >> 9;
1853 do {
1854 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1855
1856
1857
1858
1859 rdev = conf->mirrors[d].rdev;
1860 if (sync_page_io(rdev, sect, s<<9,
1861 bio->bi_io_vec[idx].bv_page,
1862 READ, false)) {
1863 success = 1;
1864 break;
1865 }
1866 }
1867 d++;
1868 if (d == conf->raid_disks * 2)
1869 d = 0;
1870 } while (!success && d != r1_bio->read_disk);
1871
1872 if (!success) {
1873 char b[BDEVNAME_SIZE];
1874 int abort = 0;
1875
1876
1877
1878
1879
1880 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
1881 " for block %llu\n",
1882 mdname(mddev),
1883 bdevname(bio->bi_bdev, b),
1884 (unsigned long long)r1_bio->sector);
1885 for (d = 0; d < conf->raid_disks * 2; d++) {
1886 rdev = conf->mirrors[d].rdev;
1887 if (!rdev || test_bit(Faulty, &rdev->flags))
1888 continue;
1889 if (!rdev_set_badblocks(rdev, sect, s, 0))
1890 abort = 1;
1891 }
1892 if (abort) {
1893 conf->recovery_disabled =
1894 mddev->recovery_disabled;
1895 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1896 md_done_sync(mddev, r1_bio->sectors, 0);
1897 put_buf(r1_bio);
1898 return 0;
1899 }
1900
1901 sectors -= s;
1902 sect += s;
1903 idx++;
1904 continue;
1905 }
1906
1907 start = d;
1908
1909 while (d != r1_bio->read_disk) {
1910 if (d == 0)
1911 d = conf->raid_disks * 2;
1912 d--;
1913 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1914 continue;
1915 rdev = conf->mirrors[d].rdev;
1916 if (r1_sync_page_io(rdev, sect, s,
1917 bio->bi_io_vec[idx].bv_page,
1918 WRITE) == 0) {
1919 r1_bio->bios[d]->bi_end_io = NULL;
1920 rdev_dec_pending(rdev, mddev);
1921 }
1922 }
1923 d = start;
1924 while (d != r1_bio->read_disk) {
1925 if (d == 0)
1926 d = conf->raid_disks * 2;
1927 d--;
1928 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1929 continue;
1930 rdev = conf->mirrors[d].rdev;
1931 if (r1_sync_page_io(rdev, sect, s,
1932 bio->bi_io_vec[idx].bv_page,
1933 READ) != 0)
1934 atomic_add(s, &rdev->corrected_errors);
1935 }
1936 sectors -= s;
1937 sect += s;
1938 idx ++;
1939 }
1940 set_bit(R1BIO_Uptodate, &r1_bio->state);
1941 set_bit(BIO_UPTODATE, &bio->bi_flags);
1942 return 1;
1943}
1944
1945static void process_checks(struct r1bio *r1_bio)
1946{
1947
1948
1949
1950
1951
1952
1953
1954 struct mddev *mddev = r1_bio->mddev;
1955 struct r1conf *conf = mddev->private;
1956 int primary;
1957 int i;
1958 int vcnt;
1959
1960
1961 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
1962 for (i = 0; i < conf->raid_disks * 2; i++) {
1963 int j;
1964 int size;
1965 int uptodate;
1966 struct bio *b = r1_bio->bios[i];
1967 if (b->bi_end_io != end_sync_read)
1968 continue;
1969
1970 uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
1971 bio_reset(b);
1972 if (!uptodate)
1973 clear_bit(BIO_UPTODATE, &b->bi_flags);
1974 b->bi_vcnt = vcnt;
1975 b->bi_iter.bi_size = r1_bio->sectors << 9;
1976 b->bi_iter.bi_sector = r1_bio->sector +
1977 conf->mirrors[i].rdev->data_offset;
1978 b->bi_bdev = conf->mirrors[i].rdev->bdev;
1979 b->bi_end_io = end_sync_read;
1980 b->bi_private = r1_bio;
1981
1982 size = b->bi_iter.bi_size;
1983 for (j = 0; j < vcnt ; j++) {
1984 struct bio_vec *bi;
1985 bi = &b->bi_io_vec[j];
1986 bi->bv_offset = 0;
1987 if (size > PAGE_SIZE)
1988 bi->bv_len = PAGE_SIZE;
1989 else
1990 bi->bv_len = size;
1991 size -= PAGE_SIZE;
1992 }
1993 }
1994 for (primary = 0; primary < conf->raid_disks * 2; primary++)
1995 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1996 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1997 r1_bio->bios[primary]->bi_end_io = NULL;
1998 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1999 break;
2000 }
2001 r1_bio->read_disk = primary;
2002 for (i = 0; i < conf->raid_disks * 2; i++) {
2003 int j;
2004 struct bio *pbio = r1_bio->bios[primary];
2005 struct bio *sbio = r1_bio->bios[i];
2006 int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
2007
2008 if (sbio->bi_end_io != end_sync_read)
2009 continue;
2010
2011 set_bit(BIO_UPTODATE, &sbio->bi_flags);
2012
2013 if (uptodate) {
2014 for (j = vcnt; j-- ; ) {
2015 struct page *p, *s;
2016 p = pbio->bi_io_vec[j].bv_page;
2017 s = sbio->bi_io_vec[j].bv_page;
2018 if (memcmp(page_address(p),
2019 page_address(s),
2020 sbio->bi_io_vec[j].bv_len))
2021 break;
2022 }
2023 } else
2024 j = 0;
2025 if (j >= 0)
2026 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2027 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2028 && uptodate)) {
2029
2030 sbio->bi_end_io = NULL;
2031 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2032 continue;
2033 }
2034
2035 bio_copy_data(sbio, pbio);
2036 }
2037}
2038
2039static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2040{
2041 struct r1conf *conf = mddev->private;
2042 int i;
2043 int disks = conf->raid_disks * 2;
2044 struct bio *bio, *wbio;
2045
2046 bio = r1_bio->bios[r1_bio->read_disk];
2047
2048 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2049
2050 if (!fix_sync_read_error(r1_bio))
2051 return;
2052
2053 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2054 process_checks(r1_bio);
2055
2056
2057
2058
2059 atomic_set(&r1_bio->remaining, 1);
2060 for (i = 0; i < disks ; i++) {
2061 wbio = r1_bio->bios[i];
2062 if (wbio->bi_end_io == NULL ||
2063 (wbio->bi_end_io == end_sync_read &&
2064 (i == r1_bio->read_disk ||
2065 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2066 continue;
2067
2068 wbio->bi_rw = WRITE;
2069 wbio->bi_end_io = end_sync_write;
2070 atomic_inc(&r1_bio->remaining);
2071 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2072
2073 generic_make_request(wbio);
2074 }
2075
2076 if (atomic_dec_and_test(&r1_bio->remaining)) {
2077
2078 int s = r1_bio->sectors;
2079 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2080 test_bit(R1BIO_WriteError, &r1_bio->state))
2081 reschedule_retry(r1_bio);
2082 else {
2083 put_buf(r1_bio);
2084 md_done_sync(mddev, s, 1);
2085 }
2086 }
2087}
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097static void fix_read_error(struct r1conf *conf, int read_disk,
2098 sector_t sect, int sectors)
2099{
2100 struct mddev *mddev = conf->mddev;
2101 while(sectors) {
2102 int s = sectors;
2103 int d = read_disk;
2104 int success = 0;
2105 int start;
2106 struct md_rdev *rdev;
2107
2108 if (s > (PAGE_SIZE>>9))
2109 s = PAGE_SIZE >> 9;
2110
2111 do {
2112
2113
2114
2115
2116
2117 sector_t first_bad;
2118 int bad_sectors;
2119
2120 rdev = conf->mirrors[d].rdev;
2121 if (rdev &&
2122 (test_bit(In_sync, &rdev->flags) ||
2123 (!test_bit(Faulty, &rdev->flags) &&
2124 rdev->recovery_offset >= sect + s)) &&
2125 is_badblock(rdev, sect, s,
2126 &first_bad, &bad_sectors) == 0 &&
2127 sync_page_io(rdev, sect, s<<9,
2128 conf->tmppage, READ, false))
2129 success = 1;
2130 else {
2131 d++;
2132 if (d == conf->raid_disks * 2)
2133 d = 0;
2134 }
2135 } while (!success && d != read_disk);
2136
2137 if (!success) {
2138
2139 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2140 if (!rdev_set_badblocks(rdev, sect, s, 0))
2141 md_error(mddev, rdev);
2142 break;
2143 }
2144
2145 start = d;
2146 while (d != read_disk) {
2147 if (d==0)
2148 d = conf->raid_disks * 2;
2149 d--;
2150 rdev = conf->mirrors[d].rdev;
2151 if (rdev &&
2152 !test_bit(Faulty, &rdev->flags))
2153 r1_sync_page_io(rdev, sect, s,
2154 conf->tmppage, WRITE);
2155 }
2156 d = start;
2157 while (d != read_disk) {
2158 char b[BDEVNAME_SIZE];
2159 if (d==0)
2160 d = conf->raid_disks * 2;
2161 d--;
2162 rdev = conf->mirrors[d].rdev;
2163 if (rdev &&
2164 !test_bit(Faulty, &rdev->flags)) {
2165 if (r1_sync_page_io(rdev, sect, s,
2166 conf->tmppage, READ)) {
2167 atomic_add(s, &rdev->corrected_errors);
2168 printk(KERN_INFO
2169 "md/raid1:%s: read error corrected "
2170 "(%d sectors at %llu on %s)\n",
2171 mdname(mddev), s,
2172 (unsigned long long)(sect +
2173 rdev->data_offset),
2174 bdevname(rdev->bdev, b));
2175 }
2176 }
2177 }
2178 sectors -= s;
2179 sect += s;
2180 }
2181}
2182
2183static int narrow_write_error(struct r1bio *r1_bio, int i)
2184{
2185 struct mddev *mddev = r1_bio->mddev;
2186 struct r1conf *conf = mddev->private;
2187 struct md_rdev *rdev = conf->mirrors[i].rdev;
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200 int block_sectors;
2201 sector_t sector;
2202 int sectors;
2203 int sect_to_write = r1_bio->sectors;
2204 int ok = 1;
2205
2206 if (rdev->badblocks.shift < 0)
2207 return 0;
2208
2209 block_sectors = 1 << rdev->badblocks.shift;
2210 sector = r1_bio->sector;
2211 sectors = ((sector + block_sectors)
2212 & ~(sector_t)(block_sectors - 1))
2213 - sector;
2214
2215 while (sect_to_write) {
2216 struct bio *wbio;
2217 if (sectors > sect_to_write)
2218 sectors = sect_to_write;
2219
2220
2221 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2222 unsigned vcnt = r1_bio->behind_page_count;
2223 struct bio_vec *vec = r1_bio->behind_bvecs;
2224
2225 while (!vec->bv_page) {
2226 vec++;
2227 vcnt--;
2228 }
2229
2230 wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
2231 memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
2232
2233 wbio->bi_vcnt = vcnt;
2234 } else {
2235 wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2236 }
2237
2238 wbio->bi_rw = WRITE;
2239 wbio->bi_iter.bi_sector = r1_bio->sector;
2240 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2241
2242 bio_trim(wbio, sector - r1_bio->sector, sectors);
2243 wbio->bi_iter.bi_sector += rdev->data_offset;
2244 wbio->bi_bdev = rdev->bdev;
2245 if (submit_bio_wait(WRITE, wbio) == 0)
2246
2247 ok = rdev_set_badblocks(rdev, sector,
2248 sectors, 0)
2249 && ok;
2250
2251 bio_put(wbio);
2252 sect_to_write -= sectors;
2253 sector += sectors;
2254 sectors = block_sectors;
2255 }
2256 return ok;
2257}
2258
2259static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2260{
2261 int m;
2262 int s = r1_bio->sectors;
2263 for (m = 0; m < conf->raid_disks * 2 ; m++) {
2264 struct md_rdev *rdev = conf->mirrors[m].rdev;
2265 struct bio *bio = r1_bio->bios[m];
2266 if (bio->bi_end_io == NULL)
2267 continue;
2268 if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
2269 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2270 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2271 }
2272 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
2273 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2274 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2275 md_error(conf->mddev, rdev);
2276 }
2277 }
2278 put_buf(r1_bio);
2279 md_done_sync(conf->mddev, s, 1);
2280}
2281
2282static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2283{
2284 int m;
2285 for (m = 0; m < conf->raid_disks * 2 ; m++)
2286 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2287 struct md_rdev *rdev = conf->mirrors[m].rdev;
2288 rdev_clear_badblocks(rdev,
2289 r1_bio->sector,
2290 r1_bio->sectors, 0);
2291 rdev_dec_pending(rdev, conf->mddev);
2292 } else if (r1_bio->bios[m] != NULL) {
2293
2294
2295
2296
2297 if (!narrow_write_error(r1_bio, m)) {
2298 md_error(conf->mddev,
2299 conf->mirrors[m].rdev);
2300
2301 set_bit(R1BIO_Degraded, &r1_bio->state);
2302 }
2303 rdev_dec_pending(conf->mirrors[m].rdev,
2304 conf->mddev);
2305 }
2306 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2307 close_write(r1_bio);
2308 raid_end_bio_io(r1_bio);
2309}
2310
2311static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2312{
2313 int disk;
2314 int max_sectors;
2315 struct mddev *mddev = conf->mddev;
2316 struct bio *bio;
2317 char b[BDEVNAME_SIZE];
2318 struct md_rdev *rdev;
2319
2320 clear_bit(R1BIO_ReadError, &r1_bio->state);
2321
2322
2323
2324
2325
2326
2327
2328
2329 if (mddev->ro == 0) {
2330 freeze_array(conf, 1);
2331 fix_read_error(conf, r1_bio->read_disk,
2332 r1_bio->sector, r1_bio->sectors);
2333 unfreeze_array(conf);
2334 } else
2335 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
2336 rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
2337
2338 bio = r1_bio->bios[r1_bio->read_disk];
2339 bdevname(bio->bi_bdev, b);
2340read_more:
2341 disk = read_balance(conf, r1_bio, &max_sectors);
2342 if (disk == -1) {
2343 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
2344 " read error for block %llu\n",
2345 mdname(mddev), b, (unsigned long long)r1_bio->sector);
2346 raid_end_bio_io(r1_bio);
2347 } else {
2348 const unsigned long do_sync
2349 = r1_bio->master_bio->bi_rw & REQ_SYNC;
2350 if (bio) {
2351 r1_bio->bios[r1_bio->read_disk] =
2352 mddev->ro ? IO_BLOCKED : NULL;
2353 bio_put(bio);
2354 }
2355 r1_bio->read_disk = disk;
2356 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2357 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
2358 max_sectors);
2359 r1_bio->bios[r1_bio->read_disk] = bio;
2360 rdev = conf->mirrors[disk].rdev;
2361 printk_ratelimited(KERN_ERR
2362 "md/raid1:%s: redirecting sector %llu"
2363 " to other mirror: %s\n",
2364 mdname(mddev),
2365 (unsigned long long)r1_bio->sector,
2366 bdevname(rdev->bdev, b));
2367 bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
2368 bio->bi_bdev = rdev->bdev;
2369 bio->bi_end_io = raid1_end_read_request;
2370 bio->bi_rw = READ | do_sync;
2371 bio->bi_private = r1_bio;
2372 if (max_sectors < r1_bio->sectors) {
2373
2374 struct bio *mbio = r1_bio->master_bio;
2375 int sectors_handled = (r1_bio->sector + max_sectors
2376 - mbio->bi_iter.bi_sector);
2377 r1_bio->sectors = max_sectors;
2378 spin_lock_irq(&conf->device_lock);
2379 if (mbio->bi_phys_segments == 0)
2380 mbio->bi_phys_segments = 2;
2381 else
2382 mbio->bi_phys_segments++;
2383 spin_unlock_irq(&conf->device_lock);
2384 generic_make_request(bio);
2385 bio = NULL;
2386
2387 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
2388
2389 r1_bio->master_bio = mbio;
2390 r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
2391 r1_bio->state = 0;
2392 set_bit(R1BIO_ReadError, &r1_bio->state);
2393 r1_bio->mddev = mddev;
2394 r1_bio->sector = mbio->bi_iter.bi_sector +
2395 sectors_handled;
2396
2397 goto read_more;
2398 } else
2399 generic_make_request(bio);
2400 }
2401}
2402
2403static void raid1d(struct md_thread *thread)
2404{
2405 struct mddev *mddev = thread->mddev;
2406 struct r1bio *r1_bio;
2407 unsigned long flags;
2408 struct r1conf *conf = mddev->private;
2409 struct list_head *head = &conf->retry_list;
2410 struct blk_plug plug;
2411
2412 md_check_recovery(mddev);
2413
2414 blk_start_plug(&plug);
2415 for (;;) {
2416
2417 flush_pending_writes(conf);
2418
2419 spin_lock_irqsave(&conf->device_lock, flags);
2420 if (list_empty(head)) {
2421 spin_unlock_irqrestore(&conf->device_lock, flags);
2422 break;
2423 }
2424 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2425 list_del(head->prev);
2426 conf->nr_queued--;
2427 spin_unlock_irqrestore(&conf->device_lock, flags);
2428
2429 mddev = r1_bio->mddev;
2430 conf = mddev->private;
2431 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2432 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2433 test_bit(R1BIO_WriteError, &r1_bio->state))
2434 handle_sync_write_finished(conf, r1_bio);
2435 else
2436 sync_request_write(mddev, r1_bio);
2437 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2438 test_bit(R1BIO_WriteError, &r1_bio->state))
2439 handle_write_finished(conf, r1_bio);
2440 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2441 handle_read_error(conf, r1_bio);
2442 else
2443
2444
2445
2446 generic_make_request(r1_bio->bios[r1_bio->read_disk]);
2447
2448 cond_resched();
2449 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2450 md_check_recovery(mddev);
2451 }
2452 blk_finish_plug(&plug);
2453}
2454
2455static int init_resync(struct r1conf *conf)
2456{
2457 int buffs;
2458
2459 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2460 BUG_ON(conf->r1buf_pool);
2461 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
2462 conf->poolinfo);
2463 if (!conf->r1buf_pool)
2464 return -ENOMEM;
2465 conf->next_resync = 0;
2466 return 0;
2467}
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
2480{
2481 struct r1conf *conf = mddev->private;
2482 struct r1bio *r1_bio;
2483 struct bio *bio;
2484 sector_t max_sector, nr_sectors;
2485 int disk = -1;
2486 int i;
2487 int wonly = -1;
2488 int write_targets = 0, read_targets = 0;
2489 sector_t sync_blocks;
2490 int still_degraded = 0;
2491 int good_sectors = RESYNC_SECTORS;
2492 int min_bad = 0;
2493
2494 if (!conf->r1buf_pool)
2495 if (init_resync(conf))
2496 return 0;
2497
2498 max_sector = mddev->dev_sectors;
2499 if (sector_nr >= max_sector) {
2500
2501
2502
2503
2504
2505 if (mddev->curr_resync < max_sector)
2506 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2507 &sync_blocks, 1);
2508 else
2509 conf->fullsync = 0;
2510
2511 bitmap_close_sync(mddev->bitmap);
2512 close_sync(conf);
2513 return 0;
2514 }
2515
2516 if (mddev->bitmap == NULL &&
2517 mddev->recovery_cp == MaxSector &&
2518 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2519 conf->fullsync == 0) {
2520 *skipped = 1;
2521 return max_sector - sector_nr;
2522 }
2523
2524
2525
2526 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2527 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2528
2529 *skipped = 1;
2530 return sync_blocks;
2531 }
2532
2533
2534
2535
2536
2537 if (!go_faster && conf->nr_waiting)
2538 msleep_interruptible(1000);
2539
2540 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2541 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2542
2543 raise_barrier(conf, sector_nr);
2544
2545 rcu_read_lock();
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555 r1_bio->mddev = mddev;
2556 r1_bio->sector = sector_nr;
2557 r1_bio->state = 0;
2558 set_bit(R1BIO_IsSync, &r1_bio->state);
2559
2560 for (i = 0; i < conf->raid_disks * 2; i++) {
2561 struct md_rdev *rdev;
2562 bio = r1_bio->bios[i];
2563 bio_reset(bio);
2564
2565 rdev = rcu_dereference(conf->mirrors[i].rdev);
2566 if (rdev == NULL ||
2567 test_bit(Faulty, &rdev->flags)) {
2568 if (i < conf->raid_disks)
2569 still_degraded = 1;
2570 } else if (!test_bit(In_sync, &rdev->flags)) {
2571 bio->bi_rw = WRITE;
2572 bio->bi_end_io = end_sync_write;
2573 write_targets ++;
2574 } else {
2575
2576 sector_t first_bad = MaxSector;
2577 int bad_sectors;
2578
2579 if (is_badblock(rdev, sector_nr, good_sectors,
2580 &first_bad, &bad_sectors)) {
2581 if (first_bad > sector_nr)
2582 good_sectors = first_bad - sector_nr;
2583 else {
2584 bad_sectors -= (sector_nr - first_bad);
2585 if (min_bad == 0 ||
2586 min_bad > bad_sectors)
2587 min_bad = bad_sectors;
2588 }
2589 }
2590 if (sector_nr < first_bad) {
2591 if (test_bit(WriteMostly, &rdev->flags)) {
2592 if (wonly < 0)
2593 wonly = i;
2594 } else {
2595 if (disk < 0)
2596 disk = i;
2597 }
2598 bio->bi_rw = READ;
2599 bio->bi_end_io = end_sync_read;
2600 read_targets++;
2601 } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2602 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2603 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2604
2605
2606
2607
2608
2609
2610 bio->bi_rw = WRITE;
2611 bio->bi_end_io = end_sync_write;
2612 write_targets++;
2613 }
2614 }
2615 if (bio->bi_end_io) {
2616 atomic_inc(&rdev->nr_pending);
2617 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2618 bio->bi_bdev = rdev->bdev;
2619 bio->bi_private = r1_bio;
2620 }
2621 }
2622 rcu_read_unlock();
2623 if (disk < 0)
2624 disk = wonly;
2625 r1_bio->read_disk = disk;
2626
2627 if (read_targets == 0 && min_bad > 0) {
2628
2629
2630
2631 int ok = 1;
2632 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2633 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2634 struct md_rdev *rdev = conf->mirrors[i].rdev;
2635 ok = rdev_set_badblocks(rdev, sector_nr,
2636 min_bad, 0
2637 ) && ok;
2638 }
2639 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2640 *skipped = 1;
2641 put_buf(r1_bio);
2642
2643 if (!ok) {
2644
2645
2646
2647
2648
2649 conf->recovery_disabled = mddev->recovery_disabled;
2650 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2651 return 0;
2652 } else
2653 return min_bad;
2654
2655 }
2656 if (min_bad > 0 && min_bad < good_sectors) {
2657
2658
2659 good_sectors = min_bad;
2660 }
2661
2662 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2663
2664 write_targets += read_targets-1;
2665
2666 if (write_targets == 0 || read_targets == 0) {
2667
2668
2669
2670 sector_t rv;
2671 if (min_bad > 0)
2672 max_sector = sector_nr + min_bad;
2673 rv = max_sector - sector_nr;
2674 *skipped = 1;
2675 put_buf(r1_bio);
2676 return rv;
2677 }
2678
2679 if (max_sector > mddev->resync_max)
2680 max_sector = mddev->resync_max;
2681 if (max_sector > sector_nr + good_sectors)
2682 max_sector = sector_nr + good_sectors;
2683 nr_sectors = 0;
2684 sync_blocks = 0;
2685 do {
2686 struct page *page;
2687 int len = PAGE_SIZE;
2688 if (sector_nr + (len>>9) > max_sector)
2689 len = (max_sector - sector_nr) << 9;
2690 if (len == 0)
2691 break;
2692 if (sync_blocks == 0) {
2693 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2694 &sync_blocks, still_degraded) &&
2695 !conf->fullsync &&
2696 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2697 break;
2698 BUG_ON(sync_blocks < (PAGE_SIZE>>9));
2699 if ((len >> 9) > sync_blocks)
2700 len = sync_blocks<<9;
2701 }
2702
2703 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2704 bio = r1_bio->bios[i];
2705 if (bio->bi_end_io) {
2706 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2707 if (bio_add_page(bio, page, len, 0) == 0) {
2708
2709 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2710 while (i > 0) {
2711 i--;
2712 bio = r1_bio->bios[i];
2713 if (bio->bi_end_io==NULL)
2714 continue;
2715
2716 bio->bi_vcnt--;
2717 bio->bi_iter.bi_size -= len;
2718 __clear_bit(BIO_SEG_VALID, &bio->bi_flags);
2719 }
2720 goto bio_full;
2721 }
2722 }
2723 }
2724 nr_sectors += len>>9;
2725 sector_nr += len>>9;
2726 sync_blocks -= (len>>9);
2727 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
2728 bio_full:
2729 r1_bio->sectors = nr_sectors;
2730
2731
2732
2733
2734 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2735 atomic_set(&r1_bio->remaining, read_targets);
2736 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2737 bio = r1_bio->bios[i];
2738 if (bio->bi_end_io == end_sync_read) {
2739 read_targets--;
2740 md_sync_acct(bio->bi_bdev, nr_sectors);
2741 generic_make_request(bio);
2742 }
2743 }
2744 } else {
2745 atomic_set(&r1_bio->remaining, 1);
2746 bio = r1_bio->bios[r1_bio->read_disk];
2747 md_sync_acct(bio->bi_bdev, nr_sectors);
2748 generic_make_request(bio);
2749
2750 }
2751 return nr_sectors;
2752}
2753
2754static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2755{
2756 if (sectors)
2757 return sectors;
2758
2759 return mddev->dev_sectors;
2760}
2761
2762static struct r1conf *setup_conf(struct mddev *mddev)
2763{
2764 struct r1conf *conf;
2765 int i;
2766 struct raid1_info *disk;
2767 struct md_rdev *rdev;
2768 int err = -ENOMEM;
2769
2770 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2771 if (!conf)
2772 goto abort;
2773
2774 conf->mirrors = kzalloc(sizeof(struct raid1_info)
2775 * mddev->raid_disks * 2,
2776 GFP_KERNEL);
2777 if (!conf->mirrors)
2778 goto abort;
2779
2780 conf->tmppage = alloc_page(GFP_KERNEL);
2781 if (!conf->tmppage)
2782 goto abort;
2783
2784 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2785 if (!conf->poolinfo)
2786 goto abort;
2787 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2788 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2789 r1bio_pool_free,
2790 conf->poolinfo);
2791 if (!conf->r1bio_pool)
2792 goto abort;
2793
2794 conf->poolinfo->mddev = mddev;
2795
2796 err = -EINVAL;
2797 spin_lock_init(&conf->device_lock);
2798 rdev_for_each(rdev, mddev) {
2799 struct request_queue *q;
2800 int disk_idx = rdev->raid_disk;
2801 if (disk_idx >= mddev->raid_disks
2802 || disk_idx < 0)
2803 continue;
2804 if (test_bit(Replacement, &rdev->flags))
2805 disk = conf->mirrors + mddev->raid_disks + disk_idx;
2806 else
2807 disk = conf->mirrors + disk_idx;
2808
2809 if (disk->rdev)
2810 goto abort;
2811 disk->rdev = rdev;
2812 q = bdev_get_queue(rdev->bdev);
2813 if (q->merge_bvec_fn)
2814 mddev->merge_check_needed = 1;
2815
2816 disk->head_position = 0;
2817 disk->seq_start = MaxSector;
2818 }
2819 conf->raid_disks = mddev->raid_disks;
2820 conf->mddev = mddev;
2821 INIT_LIST_HEAD(&conf->retry_list);
2822
2823 spin_lock_init(&conf->resync_lock);
2824 init_waitqueue_head(&conf->wait_barrier);
2825
2826 bio_list_init(&conf->pending_bio_list);
2827 conf->pending_count = 0;
2828 conf->recovery_disabled = mddev->recovery_disabled - 1;
2829
2830 conf->start_next_window = MaxSector;
2831 conf->current_window_requests = conf->next_window_requests = 0;
2832
2833 err = -EIO;
2834 for (i = 0; i < conf->raid_disks * 2; i++) {
2835
2836 disk = conf->mirrors + i;
2837
2838 if (i < conf->raid_disks &&
2839 disk[conf->raid_disks].rdev) {
2840
2841 if (!disk->rdev) {
2842
2843
2844
2845 disk->rdev =
2846 disk[conf->raid_disks].rdev;
2847 disk[conf->raid_disks].rdev = NULL;
2848 } else if (!test_bit(In_sync, &disk->rdev->flags))
2849
2850 goto abort;
2851 }
2852
2853 if (!disk->rdev ||
2854 !test_bit(In_sync, &disk->rdev->flags)) {
2855 disk->head_position = 0;
2856 if (disk->rdev &&
2857 (disk->rdev->saved_raid_disk < 0))
2858 conf->fullsync = 1;
2859 }
2860 }
2861
2862 err = -ENOMEM;
2863 conf->thread = md_register_thread(raid1d, mddev, "raid1");
2864 if (!conf->thread) {
2865 printk(KERN_ERR
2866 "md/raid1:%s: couldn't allocate thread\n",
2867 mdname(mddev));
2868 goto abort;
2869 }
2870
2871 return conf;
2872
2873 abort:
2874 if (conf) {
2875 if (conf->r1bio_pool)
2876 mempool_destroy(conf->r1bio_pool);
2877 kfree(conf->mirrors);
2878 safe_put_page(conf->tmppage);
2879 kfree(conf->poolinfo);
2880 kfree(conf);
2881 }
2882 return ERR_PTR(err);
2883}
2884
2885static int stop(struct mddev *mddev);
2886static int run(struct mddev *mddev)
2887{
2888 struct r1conf *conf;
2889 int i;
2890 struct md_rdev *rdev;
2891 int ret;
2892 bool discard_supported = false;
2893
2894 if (mddev->level != 1) {
2895 printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
2896 mdname(mddev), mddev->level);
2897 return -EIO;
2898 }
2899 if (mddev->reshape_position != MaxSector) {
2900 printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
2901 mdname(mddev));
2902 return -EIO;
2903 }
2904
2905
2906
2907
2908
2909 if (mddev->private == NULL)
2910 conf = setup_conf(mddev);
2911 else
2912 conf = mddev->private;
2913
2914 if (IS_ERR(conf))
2915 return PTR_ERR(conf);
2916
2917 if (mddev->queue)
2918 blk_queue_max_write_same_sectors(mddev->queue, 0);
2919
2920 rdev_for_each(rdev, mddev) {
2921 if (!mddev->gendisk)
2922 continue;
2923 disk_stack_limits(mddev->gendisk, rdev->bdev,
2924 rdev->data_offset << 9);
2925 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
2926 discard_supported = true;
2927 }
2928
2929 mddev->degraded = 0;
2930 for (i=0; i < conf->raid_disks; i++)
2931 if (conf->mirrors[i].rdev == NULL ||
2932 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
2933 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2934 mddev->degraded++;
2935
2936 if (conf->raid_disks - mddev->degraded == 1)
2937 mddev->recovery_cp = MaxSector;
2938
2939 if (mddev->recovery_cp != MaxSector)
2940 printk(KERN_NOTICE "md/raid1:%s: not clean"
2941 " -- starting background reconstruction\n",
2942 mdname(mddev));
2943 printk(KERN_INFO
2944 "md/raid1:%s: active with %d out of %d mirrors\n",
2945 mdname(mddev), mddev->raid_disks - mddev->degraded,
2946 mddev->raid_disks);
2947
2948
2949
2950
2951 mddev->thread = conf->thread;
2952 conf->thread = NULL;
2953 mddev->private = conf;
2954
2955 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2956
2957 if (mddev->queue) {
2958 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2959 mddev->queue->backing_dev_info.congested_data = mddev;
2960 blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
2961
2962 if (discard_supported)
2963 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
2964 mddev->queue);
2965 else
2966 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
2967 mddev->queue);
2968 }
2969
2970 ret = md_integrity_register(mddev);
2971 if (ret)
2972 stop(mddev);
2973 return ret;
2974}
2975
2976static int stop(struct mddev *mddev)
2977{
2978 struct r1conf *conf = mddev->private;
2979 struct bitmap *bitmap = mddev->bitmap;
2980
2981
2982 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
2983 printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
2984 mdname(mddev));
2985
2986 wait_event(bitmap->behind_wait,
2987 atomic_read(&bitmap->behind_writes) == 0);
2988 }
2989
2990 freeze_array(conf, 0);
2991 unfreeze_array(conf);
2992
2993 md_unregister_thread(&mddev->thread);
2994 if (conf->r1bio_pool)
2995 mempool_destroy(conf->r1bio_pool);
2996 kfree(conf->mirrors);
2997 safe_put_page(conf->tmppage);
2998 kfree(conf->poolinfo);
2999 kfree(conf);
3000 mddev->private = NULL;
3001 return 0;
3002}
3003
3004static int raid1_resize(struct mddev *mddev, sector_t sectors)
3005{
3006
3007
3008
3009
3010
3011
3012
3013 sector_t newsize = raid1_size(mddev, sectors, 0);
3014 if (mddev->external_size &&
3015 mddev->array_sectors > newsize)
3016 return -EINVAL;
3017 if (mddev->bitmap) {
3018 int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
3019 if (ret)
3020 return ret;
3021 }
3022 md_set_array_sectors(mddev, newsize);
3023 set_capacity(mddev->gendisk, mddev->array_sectors);
3024 revalidate_disk(mddev->gendisk);
3025 if (sectors > mddev->dev_sectors &&
3026 mddev->recovery_cp > mddev->dev_sectors) {
3027 mddev->recovery_cp = mddev->dev_sectors;
3028 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3029 }
3030 mddev->dev_sectors = sectors;
3031 mddev->resync_max_sectors = sectors;
3032 return 0;
3033}
3034
3035static int raid1_reshape(struct mddev *mddev)
3036{
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048 mempool_t *newpool, *oldpool;
3049 struct pool_info *newpoolinfo;
3050 struct raid1_info *newmirrors;
3051 struct r1conf *conf = mddev->private;
3052 int cnt, raid_disks;
3053 unsigned long flags;
3054 int d, d2, err;
3055
3056
3057 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3058 mddev->layout != mddev->new_layout ||
3059 mddev->level != mddev->new_level) {
3060 mddev->new_chunk_sectors = mddev->chunk_sectors;
3061 mddev->new_layout = mddev->layout;
3062 mddev->new_level = mddev->level;
3063 return -EINVAL;
3064 }
3065
3066 err = md_allow_write(mddev);
3067 if (err)
3068 return err;
3069
3070 raid_disks = mddev->raid_disks + mddev->delta_disks;
3071
3072 if (raid_disks < conf->raid_disks) {
3073 cnt=0;
3074 for (d= 0; d < conf->raid_disks; d++)
3075 if (conf->mirrors[d].rdev)
3076 cnt++;
3077 if (cnt > raid_disks)
3078 return -EBUSY;
3079 }
3080
3081 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3082 if (!newpoolinfo)
3083 return -ENOMEM;
3084 newpoolinfo->mddev = mddev;
3085 newpoolinfo->raid_disks = raid_disks * 2;
3086
3087 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
3088 r1bio_pool_free, newpoolinfo);
3089 if (!newpool) {
3090 kfree(newpoolinfo);
3091 return -ENOMEM;
3092 }
3093 newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
3094 GFP_KERNEL);
3095 if (!newmirrors) {
3096 kfree(newpoolinfo);
3097 mempool_destroy(newpool);
3098 return -ENOMEM;
3099 }
3100
3101 freeze_array(conf, 0);
3102
3103
3104 oldpool = conf->r1bio_pool;
3105 conf->r1bio_pool = newpool;
3106
3107 for (d = d2 = 0; d < conf->raid_disks; d++) {
3108 struct md_rdev *rdev = conf->mirrors[d].rdev;
3109 if (rdev && rdev->raid_disk != d2) {
3110 sysfs_unlink_rdev(mddev, rdev);
3111 rdev->raid_disk = d2;
3112 sysfs_unlink_rdev(mddev, rdev);
3113 if (sysfs_link_rdev(mddev, rdev))
3114 printk(KERN_WARNING
3115 "md/raid1:%s: cannot register rd%d\n",
3116 mdname(mddev), rdev->raid_disk);
3117 }
3118 if (rdev)
3119 newmirrors[d2++].rdev = rdev;
3120 }
3121 kfree(conf->mirrors);
3122 conf->mirrors = newmirrors;
3123 kfree(conf->poolinfo);
3124 conf->poolinfo = newpoolinfo;
3125
3126 spin_lock_irqsave(&conf->device_lock, flags);
3127 mddev->degraded += (raid_disks - conf->raid_disks);
3128 spin_unlock_irqrestore(&conf->device_lock, flags);
3129 conf->raid_disks = mddev->raid_disks = raid_disks;
3130 mddev->delta_disks = 0;
3131
3132 unfreeze_array(conf);
3133
3134 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3135 md_wakeup_thread(mddev->thread);
3136
3137 mempool_destroy(oldpool);
3138 return 0;
3139}
3140
3141static void raid1_quiesce(struct mddev *mddev, int state)
3142{
3143 struct r1conf *conf = mddev->private;
3144
3145 switch(state) {
3146 case 2:
3147 wake_up(&conf->wait_barrier);
3148 break;
3149 case 1:
3150 freeze_array(conf, 0);
3151 break;
3152 case 0:
3153 unfreeze_array(conf);
3154 break;
3155 }
3156}
3157
3158static void *raid1_takeover(struct mddev *mddev)
3159{
3160
3161
3162
3163 if (mddev->level == 5 && mddev->raid_disks == 2) {
3164 struct r1conf *conf;
3165 mddev->new_level = 1;
3166 mddev->new_layout = 0;
3167 mddev->new_chunk_sectors = 0;
3168 conf = setup_conf(mddev);
3169 if (!IS_ERR(conf))
3170
3171 conf->array_frozen = 1;
3172 return conf;
3173 }
3174 return ERR_PTR(-EINVAL);
3175}
3176
3177static struct md_personality raid1_personality =
3178{
3179 .name = "raid1",
3180 .level = 1,
3181 .owner = THIS_MODULE,
3182 .make_request = make_request,
3183 .run = run,
3184 .stop = stop,
3185 .status = status,
3186 .error_handler = error,
3187 .hot_add_disk = raid1_add_disk,
3188 .hot_remove_disk= raid1_remove_disk,
3189 .spare_active = raid1_spare_active,
3190 .sync_request = sync_request,
3191 .resize = raid1_resize,
3192 .size = raid1_size,
3193 .check_reshape = raid1_reshape,
3194 .quiesce = raid1_quiesce,
3195 .takeover = raid1_takeover,
3196};
3197
3198static int __init raid_init(void)
3199{
3200 return register_md_personality(&raid1_personality);
3201}
3202
3203static void raid_exit(void)
3204{
3205 unregister_md_personality(&raid1_personality);
3206}
3207
3208module_init(raid_init);
3209module_exit(raid_exit);
3210MODULE_LICENSE("GPL");
3211MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3212MODULE_ALIAS("md-personality-3");
3213MODULE_ALIAS("md-raid1");
3214MODULE_ALIAS("md-level-1");
3215
3216module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
3217