1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "dm-bio-list.h"
35#include <linux/raid/raid1.h>
36#include <linux/raid/bitmap.h>
37
38#define DEBUG 0
39#if DEBUG
40#define PRINTK(x...) printk(x)
41#else
42#define PRINTK(x...)
43#endif
44
45
46
47
48#define NR_RAID1_BIOS 256
49
50
51static void unplug_slaves(mddev_t *mddev);
52
53static void allow_barrier(conf_t *conf);
54static void lower_barrier(conf_t *conf);
55
56static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
57{
58 struct pool_info *pi = data;
59 r1bio_t *r1_bio;
60 int size = offsetof(r1bio_t, bios[pi->raid_disks]);
61
62
63 r1_bio = kzalloc(size, gfp_flags);
64 if (!r1_bio)
65 unplug_slaves(pi->mddev);
66
67 return r1_bio;
68}
69
70static void r1bio_pool_free(void *r1_bio, void *data)
71{
72 kfree(r1_bio);
73}
74
75#define RESYNC_BLOCK_SIZE (64*1024)
76
77#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
78#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
79#define RESYNC_WINDOW (2048*1024)
80
81static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
82{
83 struct pool_info *pi = data;
84 struct page *page;
85 r1bio_t *r1_bio;
86 struct bio *bio;
87 int i, j;
88
89 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
90 if (!r1_bio) {
91 unplug_slaves(pi->mddev);
92 return NULL;
93 }
94
95
96
97
98 for (j = pi->raid_disks ; j-- ; ) {
99 bio = bio_alloc(gfp_flags, RESYNC_PAGES);
100 if (!bio)
101 goto out_free_bio;
102 r1_bio->bios[j] = bio;
103 }
104
105
106
107
108
109
110 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
111 j = pi->raid_disks;
112 else
113 j = 1;
114 while(j--) {
115 bio = r1_bio->bios[j];
116 for (i = 0; i < RESYNC_PAGES; i++) {
117 page = alloc_page(gfp_flags);
118 if (unlikely(!page))
119 goto out_free_pages;
120
121 bio->bi_io_vec[i].bv_page = page;
122 }
123 }
124
125 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
126 for (i=0; i<RESYNC_PAGES ; i++)
127 for (j=1; j<pi->raid_disks; j++)
128 r1_bio->bios[j]->bi_io_vec[i].bv_page =
129 r1_bio->bios[0]->bi_io_vec[i].bv_page;
130 }
131
132 r1_bio->master_bio = NULL;
133
134 return r1_bio;
135
136out_free_pages:
137 for (i=0; i < RESYNC_PAGES ; i++)
138 for (j=0 ; j < pi->raid_disks; j++)
139 safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
140 j = -1;
141out_free_bio:
142 while ( ++j < pi->raid_disks )
143 bio_put(r1_bio->bios[j]);
144 r1bio_pool_free(r1_bio, data);
145 return NULL;
146}
147
148static void r1buf_pool_free(void *__r1_bio, void *data)
149{
150 struct pool_info *pi = data;
151 int i,j;
152 r1bio_t *r1bio = __r1_bio;
153
154 for (i = 0; i < RESYNC_PAGES; i++)
155 for (j = pi->raid_disks; j-- ;) {
156 if (j == 0 ||
157 r1bio->bios[j]->bi_io_vec[i].bv_page !=
158 r1bio->bios[0]->bi_io_vec[i].bv_page)
159 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
160 }
161 for (i=0 ; i < pi->raid_disks; i++)
162 bio_put(r1bio->bios[i]);
163
164 r1bio_pool_free(r1bio, data);
165}
166
167static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
168{
169 int i;
170
171 for (i = 0; i < conf->raid_disks; i++) {
172 struct bio **bio = r1_bio->bios + i;
173 if (*bio && *bio != IO_BLOCKED)
174 bio_put(*bio);
175 *bio = NULL;
176 }
177}
178
179static void free_r1bio(r1bio_t *r1_bio)
180{
181 conf_t *conf = mddev_to_conf(r1_bio->mddev);
182
183
184
185
186
187 allow_barrier(conf);
188
189 put_all_bios(conf, r1_bio);
190 mempool_free(r1_bio, conf->r1bio_pool);
191}
192
193static void put_buf(r1bio_t *r1_bio)
194{
195 conf_t *conf = mddev_to_conf(r1_bio->mddev);
196 int i;
197
198 for (i=0; i<conf->raid_disks; i++) {
199 struct bio *bio = r1_bio->bios[i];
200 if (bio->bi_end_io)
201 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
202 }
203
204 mempool_free(r1_bio, conf->r1buf_pool);
205
206 lower_barrier(conf);
207}
208
209static void reschedule_retry(r1bio_t *r1_bio)
210{
211 unsigned long flags;
212 mddev_t *mddev = r1_bio->mddev;
213 conf_t *conf = mddev_to_conf(mddev);
214
215 spin_lock_irqsave(&conf->device_lock, flags);
216 list_add(&r1_bio->retry_list, &conf->retry_list);
217 conf->nr_queued ++;
218 spin_unlock_irqrestore(&conf->device_lock, flags);
219
220 wake_up(&conf->wait_barrier);
221 md_wakeup_thread(mddev->thread);
222}
223
224
225
226
227
228
229static void raid_end_bio_io(r1bio_t *r1_bio)
230{
231 struct bio *bio = r1_bio->master_bio;
232
233
234 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
235 PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n",
236 (bio_data_dir(bio) == WRITE) ? "write" : "read",
237 (unsigned long long) bio->bi_sector,
238 (unsigned long long) bio->bi_sector +
239 (bio->bi_size >> 9) - 1);
240
241 bio_endio(bio,
242 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
243 }
244 free_r1bio(r1_bio);
245}
246
247
248
249
250static inline void update_head_pos(int disk, r1bio_t *r1_bio)
251{
252 conf_t *conf = mddev_to_conf(r1_bio->mddev);
253
254 conf->mirrors[disk].head_position =
255 r1_bio->sector + (r1_bio->sectors);
256}
257
258static void raid1_end_read_request(struct bio *bio, int error)
259{
260 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
261 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
262 int mirror;
263 conf_t *conf = mddev_to_conf(r1_bio->mddev);
264
265 mirror = r1_bio->read_disk;
266
267
268
269 update_head_pos(mirror, r1_bio);
270
271 if (uptodate)
272 set_bit(R1BIO_Uptodate, &r1_bio->state);
273 else {
274
275
276
277
278 unsigned long flags;
279 spin_lock_irqsave(&conf->device_lock, flags);
280 if (r1_bio->mddev->degraded == conf->raid_disks ||
281 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
282 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
283 uptodate = 1;
284 spin_unlock_irqrestore(&conf->device_lock, flags);
285 }
286
287 if (uptodate)
288 raid_end_bio_io(r1_bio);
289 else {
290
291
292
293 char b[BDEVNAME_SIZE];
294 if (printk_ratelimit())
295 printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
296 bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
297 reschedule_retry(r1_bio);
298 }
299
300 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
301}
302
303static void raid1_end_write_request(struct bio *bio, int error)
304{
305 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
306 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
307 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
308 conf_t *conf = mddev_to_conf(r1_bio->mddev);
309 struct bio *to_put = NULL;
310
311
312 for (mirror = 0; mirror < conf->raid_disks; mirror++)
313 if (r1_bio->bios[mirror] == bio)
314 break;
315
316 if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
317 set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
318 set_bit(R1BIO_BarrierRetry, &r1_bio->state);
319 r1_bio->mddev->barriers_work = 0;
320
321 } else {
322
323
324
325 r1_bio->bios[mirror] = NULL;
326 to_put = bio;
327 if (!uptodate) {
328 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
329
330 set_bit(R1BIO_Degraded, &r1_bio->state);
331 } else
332
333
334
335
336
337
338
339
340
341 set_bit(R1BIO_Uptodate, &r1_bio->state);
342
343 update_head_pos(mirror, r1_bio);
344
345 if (behind) {
346 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
347 atomic_dec(&r1_bio->behind_remaining);
348
349
350
351
352
353
354 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
355 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
356
357 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
358 struct bio *mbio = r1_bio->master_bio;
359 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
360 (unsigned long long) mbio->bi_sector,
361 (unsigned long long) mbio->bi_sector +
362 (mbio->bi_size >> 9) - 1);
363 bio_endio(mbio, 0);
364 }
365 }
366 }
367 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
368 }
369
370
371
372
373
374 if (atomic_dec_and_test(&r1_bio->remaining)) {
375 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state))
376 reschedule_retry(r1_bio);
377 else {
378
379 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
380
381 int i = bio->bi_vcnt;
382 while (i--)
383 safe_put_page(bio->bi_io_vec[i].bv_page);
384 }
385
386 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
387 r1_bio->sectors,
388 !test_bit(R1BIO_Degraded, &r1_bio->state),
389 behind);
390 md_write_end(r1_bio->mddev);
391 raid_end_bio_io(r1_bio);
392 }
393 }
394
395 if (to_put)
396 bio_put(to_put);
397}
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414static int read_balance(conf_t *conf, r1bio_t *r1_bio)
415{
416 const unsigned long this_sector = r1_bio->sector;
417 int new_disk = conf->last_used, disk = new_disk;
418 int wonly_disk = -1;
419 const int sectors = r1_bio->sectors;
420 sector_t new_distance, current_distance;
421 mdk_rdev_t *rdev;
422
423 rcu_read_lock();
424
425
426
427
428
429 retry:
430 if (conf->mddev->recovery_cp < MaxSector &&
431 (this_sector + sectors >= conf->next_resync)) {
432
433 new_disk = 0;
434
435 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
436 r1_bio->bios[new_disk] == IO_BLOCKED ||
437 !rdev || !test_bit(In_sync, &rdev->flags)
438 || test_bit(WriteMostly, &rdev->flags);
439 rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
440
441 if (rdev && test_bit(In_sync, &rdev->flags) &&
442 r1_bio->bios[new_disk] != IO_BLOCKED)
443 wonly_disk = new_disk;
444
445 if (new_disk == conf->raid_disks - 1) {
446 new_disk = wonly_disk;
447 break;
448 }
449 }
450 goto rb_out;
451 }
452
453
454
455 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
456 r1_bio->bios[new_disk] == IO_BLOCKED ||
457 !rdev || !test_bit(In_sync, &rdev->flags) ||
458 test_bit(WriteMostly, &rdev->flags);
459 rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
460
461 if (rdev && test_bit(In_sync, &rdev->flags) &&
462 r1_bio->bios[new_disk] != IO_BLOCKED)
463 wonly_disk = new_disk;
464
465 if (new_disk <= 0)
466 new_disk = conf->raid_disks;
467 new_disk--;
468 if (new_disk == disk) {
469 new_disk = wonly_disk;
470 break;
471 }
472 }
473
474 if (new_disk < 0)
475 goto rb_out;
476
477 disk = new_disk;
478
479
480
481
482
483 if (conf->next_seq_sect == this_sector)
484 goto rb_out;
485 if (this_sector == conf->mirrors[new_disk].head_position)
486 goto rb_out;
487
488 current_distance = abs(this_sector - conf->mirrors[disk].head_position);
489
490
491
492 do {
493 if (disk <= 0)
494 disk = conf->raid_disks;
495 disk--;
496
497 rdev = rcu_dereference(conf->mirrors[disk].rdev);
498
499 if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
500 !test_bit(In_sync, &rdev->flags) ||
501 test_bit(WriteMostly, &rdev->flags))
502 continue;
503
504 if (!atomic_read(&rdev->nr_pending)) {
505 new_disk = disk;
506 break;
507 }
508 new_distance = abs(this_sector - conf->mirrors[disk].head_position);
509 if (new_distance < current_distance) {
510 current_distance = new_distance;
511 new_disk = disk;
512 }
513 } while (disk != conf->last_used);
514
515 rb_out:
516
517
518 if (new_disk >= 0) {
519 rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
520 if (!rdev)
521 goto retry;
522 atomic_inc(&rdev->nr_pending);
523 if (!test_bit(In_sync, &rdev->flags)) {
524
525
526
527 rdev_dec_pending(rdev, conf->mddev);
528 goto retry;
529 }
530 conf->next_seq_sect = this_sector + sectors;
531 conf->last_used = new_disk;
532 }
533 rcu_read_unlock();
534
535 return new_disk;
536}
537
538static void unplug_slaves(mddev_t *mddev)
539{
540 conf_t *conf = mddev_to_conf(mddev);
541 int i;
542
543 rcu_read_lock();
544 for (i=0; i<mddev->raid_disks; i++) {
545 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
546 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
547 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
548
549 atomic_inc(&rdev->nr_pending);
550 rcu_read_unlock();
551
552 blk_unplug(r_queue);
553
554 rdev_dec_pending(rdev, mddev);
555 rcu_read_lock();
556 }
557 }
558 rcu_read_unlock();
559}
560
561static void raid1_unplug(struct request_queue *q)
562{
563 mddev_t *mddev = q->queuedata;
564
565 unplug_slaves(mddev);
566 md_wakeup_thread(mddev->thread);
567}
568
569static int raid1_congested(void *data, int bits)
570{
571 mddev_t *mddev = data;
572 conf_t *conf = mddev_to_conf(mddev);
573 int i, ret = 0;
574
575 rcu_read_lock();
576 for (i = 0; i < mddev->raid_disks; i++) {
577 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
578 if (rdev && !test_bit(Faulty, &rdev->flags)) {
579 struct request_queue *q = bdev_get_queue(rdev->bdev);
580
581
582
583
584 if ((bits & (1<<BDI_write_congested)) || 1)
585 ret |= bdi_congested(&q->backing_dev_info, bits);
586 else
587 ret &= bdi_congested(&q->backing_dev_info, bits);
588 }
589 }
590 rcu_read_unlock();
591 return ret;
592}
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616#define RESYNC_DEPTH 32
617
618static void raise_barrier(conf_t *conf)
619{
620 spin_lock_irq(&conf->resync_lock);
621
622
623 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
624 conf->resync_lock,
625 raid1_unplug(conf->mddev->queue));
626
627
628 conf->barrier++;
629
630
631 wait_event_lock_irq(conf->wait_barrier,
632 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
633 conf->resync_lock,
634 raid1_unplug(conf->mddev->queue));
635
636 spin_unlock_irq(&conf->resync_lock);
637}
638
639static void lower_barrier(conf_t *conf)
640{
641 unsigned long flags;
642 spin_lock_irqsave(&conf->resync_lock, flags);
643 conf->barrier--;
644 spin_unlock_irqrestore(&conf->resync_lock, flags);
645 wake_up(&conf->wait_barrier);
646}
647
648static void wait_barrier(conf_t *conf)
649{
650 spin_lock_irq(&conf->resync_lock);
651 if (conf->barrier) {
652 conf->nr_waiting++;
653 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
654 conf->resync_lock,
655 raid1_unplug(conf->mddev->queue));
656 conf->nr_waiting--;
657 }
658 conf->nr_pending++;
659 spin_unlock_irq(&conf->resync_lock);
660}
661
662static void allow_barrier(conf_t *conf)
663{
664 unsigned long flags;
665 spin_lock_irqsave(&conf->resync_lock, flags);
666 conf->nr_pending--;
667 spin_unlock_irqrestore(&conf->resync_lock, flags);
668 wake_up(&conf->wait_barrier);
669}
670
671static void freeze_array(conf_t *conf)
672{
673
674
675
676
677
678 spin_lock_irq(&conf->resync_lock);
679 conf->barrier++;
680 conf->nr_waiting++;
681 wait_event_lock_irq(conf->wait_barrier,
682 conf->barrier+conf->nr_pending == conf->nr_queued+2,
683 conf->resync_lock,
684 raid1_unplug(conf->mddev->queue));
685 spin_unlock_irq(&conf->resync_lock);
686}
687static void unfreeze_array(conf_t *conf)
688{
689
690 spin_lock_irq(&conf->resync_lock);
691 conf->barrier--;
692 conf->nr_waiting--;
693 wake_up(&conf->wait_barrier);
694 spin_unlock_irq(&conf->resync_lock);
695}
696
697
698
699static struct page **alloc_behind_pages(struct bio *bio)
700{
701 int i;
702 struct bio_vec *bvec;
703 struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
704 GFP_NOIO);
705 if (unlikely(!pages))
706 goto do_sync_io;
707
708 bio_for_each_segment(bvec, bio, i) {
709 pages[i] = alloc_page(GFP_NOIO);
710 if (unlikely(!pages[i]))
711 goto do_sync_io;
712 memcpy(kmap(pages[i]) + bvec->bv_offset,
713 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
714 kunmap(pages[i]);
715 kunmap(bvec->bv_page);
716 }
717
718 return pages;
719
720do_sync_io:
721 if (pages)
722 for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
723 put_page(pages[i]);
724 kfree(pages);
725 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
726 return NULL;
727}
728
729static int make_request(struct request_queue *q, struct bio * bio)
730{
731 mddev_t *mddev = q->queuedata;
732 conf_t *conf = mddev_to_conf(mddev);
733 mirror_info_t *mirror;
734 r1bio_t *r1_bio;
735 struct bio *read_bio;
736 int i, targets = 0, disks;
737 mdk_rdev_t *rdev;
738 struct bitmap *bitmap = mddev->bitmap;
739 unsigned long flags;
740 struct bio_list bl;
741 struct page **behind_pages = NULL;
742 const int rw = bio_data_dir(bio);
743 const int do_sync = bio_sync(bio);
744 int do_barriers;
745
746
747
748
749
750
751
752
753
754
755 md_write_start(mddev, bio);
756
757 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
758 if (rw == WRITE)
759 md_write_end(mddev);
760 bio_endio(bio, -EOPNOTSUPP);
761 return 0;
762 }
763
764 wait_barrier(conf);
765
766 disk_stat_inc(mddev->gendisk, ios[rw]);
767 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
768
769
770
771
772
773
774 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
775
776 r1_bio->master_bio = bio;
777 r1_bio->sectors = bio->bi_size >> 9;
778 r1_bio->state = 0;
779 r1_bio->mddev = mddev;
780 r1_bio->sector = bio->bi_sector;
781
782 if (rw == READ) {
783
784
785
786 int rdisk = read_balance(conf, r1_bio);
787
788 if (rdisk < 0) {
789
790 raid_end_bio_io(r1_bio);
791 return 0;
792 }
793 mirror = conf->mirrors + rdisk;
794
795 r1_bio->read_disk = rdisk;
796
797 read_bio = bio_clone(bio, GFP_NOIO);
798
799 r1_bio->bios[rdisk] = read_bio;
800
801 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
802 read_bio->bi_bdev = mirror->rdev->bdev;
803 read_bio->bi_end_io = raid1_end_read_request;
804 read_bio->bi_rw = READ | do_sync;
805 read_bio->bi_private = r1_bio;
806
807 generic_make_request(read_bio);
808 return 0;
809 }
810
811
812
813
814
815
816
817
818 disks = conf->raid_disks;
819#if 0
820 { static int first=1;
821 if (first) printk("First Write sector %llu disks %d\n",
822 (unsigned long long)r1_bio->sector, disks);
823 first = 0;
824 }
825#endif
826 rcu_read_lock();
827 for (i = 0; i < disks; i++) {
828 if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL &&
829 !test_bit(Faulty, &rdev->flags)) {
830 atomic_inc(&rdev->nr_pending);
831 if (test_bit(Faulty, &rdev->flags)) {
832 rdev_dec_pending(rdev, mddev);
833 r1_bio->bios[i] = NULL;
834 } else
835 r1_bio->bios[i] = bio;
836 targets++;
837 } else
838 r1_bio->bios[i] = NULL;
839 }
840 rcu_read_unlock();
841
842 BUG_ON(targets == 0);
843
844 if (targets < conf->raid_disks) {
845
846
847 set_bit(R1BIO_Degraded, &r1_bio->state);
848 }
849
850
851 if (bitmap &&
852 atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind &&
853 (behind_pages = alloc_behind_pages(bio)) != NULL)
854 set_bit(R1BIO_BehindIO, &r1_bio->state);
855
856 atomic_set(&r1_bio->remaining, 0);
857 atomic_set(&r1_bio->behind_remaining, 0);
858
859 do_barriers = bio_barrier(bio);
860 if (do_barriers)
861 set_bit(R1BIO_Barrier, &r1_bio->state);
862
863 bio_list_init(&bl);
864 for (i = 0; i < disks; i++) {
865 struct bio *mbio;
866 if (!r1_bio->bios[i])
867 continue;
868
869 mbio = bio_clone(bio, GFP_NOIO);
870 r1_bio->bios[i] = mbio;
871
872 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
873 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
874 mbio->bi_end_io = raid1_end_write_request;
875 mbio->bi_rw = WRITE | do_barriers | do_sync;
876 mbio->bi_private = r1_bio;
877
878 if (behind_pages) {
879 struct bio_vec *bvec;
880 int j;
881
882
883
884
885
886
887
888
889 __bio_for_each_segment(bvec, mbio, j, 0)
890 bvec->bv_page = behind_pages[j];
891 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
892 atomic_inc(&r1_bio->behind_remaining);
893 }
894
895 atomic_inc(&r1_bio->remaining);
896
897 bio_list_add(&bl, mbio);
898 }
899 kfree(behind_pages);
900
901 bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
902 test_bit(R1BIO_BehindIO, &r1_bio->state));
903 spin_lock_irqsave(&conf->device_lock, flags);
904 bio_list_merge(&conf->pending_bio_list, &bl);
905 bio_list_init(&bl);
906
907 blk_plug_device(mddev->queue);
908 spin_unlock_irqrestore(&conf->device_lock, flags);
909
910 if (do_sync)
911 md_wakeup_thread(mddev->thread);
912#if 0
913 while ((bio = bio_list_pop(&bl)) != NULL)
914 generic_make_request(bio);
915#endif
916
917 return 0;
918}
919
920static void status(struct seq_file *seq, mddev_t *mddev)
921{
922 conf_t *conf = mddev_to_conf(mddev);
923 int i;
924
925 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
926 conf->raid_disks - mddev->degraded);
927 rcu_read_lock();
928 for (i = 0; i < conf->raid_disks; i++) {
929 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
930 seq_printf(seq, "%s",
931 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
932 }
933 rcu_read_unlock();
934 seq_printf(seq, "]");
935}
936
937
938static void error(mddev_t *mddev, mdk_rdev_t *rdev)
939{
940 char b[BDEVNAME_SIZE];
941 conf_t *conf = mddev_to_conf(mddev);
942
943
944
945
946
947
948
949 if (test_bit(In_sync, &rdev->flags)
950 && (conf->raid_disks - mddev->degraded) == 1)
951
952
953
954
955 return;
956 if (test_and_clear_bit(In_sync, &rdev->flags)) {
957 unsigned long flags;
958 spin_lock_irqsave(&conf->device_lock, flags);
959 mddev->degraded++;
960 set_bit(Faulty, &rdev->flags);
961 spin_unlock_irqrestore(&conf->device_lock, flags);
962
963
964
965 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
966 } else
967 set_bit(Faulty, &rdev->flags);
968 set_bit(MD_CHANGE_DEVS, &mddev->flags);
969 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
970 " Operation continuing on %d devices\n",
971 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
972}
973
974static void print_conf(conf_t *conf)
975{
976 int i;
977
978 printk("RAID1 conf printout:\n");
979 if (!conf) {
980 printk("(!conf)\n");
981 return;
982 }
983 printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
984 conf->raid_disks);
985
986 rcu_read_lock();
987 for (i = 0; i < conf->raid_disks; i++) {
988 char b[BDEVNAME_SIZE];
989 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
990 if (rdev)
991 printk(" disk %d, wo:%d, o:%d, dev:%s\n",
992 i, !test_bit(In_sync, &rdev->flags),
993 !test_bit(Faulty, &rdev->flags),
994 bdevname(rdev->bdev,b));
995 }
996 rcu_read_unlock();
997}
998
999static void close_sync(conf_t *conf)
1000{
1001 wait_barrier(conf);
1002 allow_barrier(conf);
1003
1004 mempool_destroy(conf->r1buf_pool);
1005 conf->r1buf_pool = NULL;
1006}
1007
1008static int raid1_spare_active(mddev_t *mddev)
1009{
1010 int i;
1011 conf_t *conf = mddev->private;
1012
1013
1014
1015
1016
1017
1018 for (i = 0; i < conf->raid_disks; i++) {
1019 mdk_rdev_t *rdev = conf->mirrors[i].rdev;
1020 if (rdev
1021 && !test_bit(Faulty, &rdev->flags)
1022 && !test_and_set_bit(In_sync, &rdev->flags)) {
1023 unsigned long flags;
1024 spin_lock_irqsave(&conf->device_lock, flags);
1025 mddev->degraded--;
1026 spin_unlock_irqrestore(&conf->device_lock, flags);
1027 }
1028 }
1029
1030 print_conf(conf);
1031 return 0;
1032}
1033
1034
1035static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1036{
1037 conf_t *conf = mddev->private;
1038 int found = 0;
1039 int mirror = 0;
1040 mirror_info_t *p;
1041
1042 for (mirror=0; mirror < mddev->raid_disks; mirror++)
1043 if ( !(p=conf->mirrors+mirror)->rdev) {
1044
1045 blk_queue_stack_limits(mddev->queue,
1046 rdev->bdev->bd_disk->queue);
1047
1048
1049
1050
1051 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1052 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1053 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1054
1055 p->head_position = 0;
1056 rdev->raid_disk = mirror;
1057 found = 1;
1058
1059
1060
1061 if (rdev->saved_raid_disk < 0)
1062 conf->fullsync = 1;
1063 rcu_assign_pointer(p->rdev, rdev);
1064 break;
1065 }
1066
1067 print_conf(conf);
1068 return found;
1069}
1070
1071static int raid1_remove_disk(mddev_t *mddev, int number)
1072{
1073 conf_t *conf = mddev->private;
1074 int err = 0;
1075 mdk_rdev_t *rdev;
1076 mirror_info_t *p = conf->mirrors+ number;
1077
1078 print_conf(conf);
1079 rdev = p->rdev;
1080 if (rdev) {
1081 if (test_bit(In_sync, &rdev->flags) ||
1082 atomic_read(&rdev->nr_pending)) {
1083 err = -EBUSY;
1084 goto abort;
1085 }
1086 p->rdev = NULL;
1087 synchronize_rcu();
1088 if (atomic_read(&rdev->nr_pending)) {
1089
1090 err = -EBUSY;
1091 p->rdev = rdev;
1092 }
1093 }
1094abort:
1095
1096 print_conf(conf);
1097 return err;
1098}
1099
1100
1101static void end_sync_read(struct bio *bio, int error)
1102{
1103 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1104 int i;
1105
1106 for (i=r1_bio->mddev->raid_disks; i--; )
1107 if (r1_bio->bios[i] == bio)
1108 break;
1109 BUG_ON(i < 0);
1110 update_head_pos(i, r1_bio);
1111
1112
1113
1114
1115
1116 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1117 set_bit(R1BIO_Uptodate, &r1_bio->state);
1118
1119 if (atomic_dec_and_test(&r1_bio->remaining))
1120 reschedule_retry(r1_bio);
1121}
1122
1123static void end_sync_write(struct bio *bio, int error)
1124{
1125 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1126 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1127 mddev_t *mddev = r1_bio->mddev;
1128 conf_t *conf = mddev_to_conf(mddev);
1129 int i;
1130 int mirror=0;
1131
1132 for (i = 0; i < conf->raid_disks; i++)
1133 if (r1_bio->bios[i] == bio) {
1134 mirror = i;
1135 break;
1136 }
1137 if (!uptodate) {
1138 int sync_blocks = 0;
1139 sector_t s = r1_bio->sector;
1140 long sectors_to_go = r1_bio->sectors;
1141
1142 do {
1143 bitmap_end_sync(mddev->bitmap, s,
1144 &sync_blocks, 1);
1145 s += sync_blocks;
1146 sectors_to_go -= sync_blocks;
1147 } while (sectors_to_go > 0);
1148 md_error(mddev, conf->mirrors[mirror].rdev);
1149 }
1150
1151 update_head_pos(mirror, r1_bio);
1152
1153 if (atomic_dec_and_test(&r1_bio->remaining)) {
1154 md_done_sync(mddev, r1_bio->sectors, uptodate);
1155 put_buf(r1_bio);
1156 }
1157}
1158
1159static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1160{
1161 conf_t *conf = mddev_to_conf(mddev);
1162 int i;
1163 int disks = conf->raid_disks;
1164 struct bio *bio, *wbio;
1165
1166 bio = r1_bio->bios[r1_bio->read_disk];
1167
1168
1169 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1170
1171
1172
1173
1174
1175
1176
1177 int primary;
1178 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1179 for (i=0; i<mddev->raid_disks; i++)
1180 if (r1_bio->bios[i]->bi_end_io == end_sync_read)
1181 md_error(mddev, conf->mirrors[i].rdev);
1182
1183 md_done_sync(mddev, r1_bio->sectors, 1);
1184 put_buf(r1_bio);
1185 return;
1186 }
1187 for (primary=0; primary<mddev->raid_disks; primary++)
1188 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1189 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1190 r1_bio->bios[primary]->bi_end_io = NULL;
1191 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1192 break;
1193 }
1194 r1_bio->read_disk = primary;
1195 for (i=0; i<mddev->raid_disks; i++)
1196 if (r1_bio->bios[i]->bi_end_io == end_sync_read) {
1197 int j;
1198 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1199 struct bio *pbio = r1_bio->bios[primary];
1200 struct bio *sbio = r1_bio->bios[i];
1201
1202 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
1203 for (j = vcnt; j-- ; ) {
1204 struct page *p, *s;
1205 p = pbio->bi_io_vec[j].bv_page;
1206 s = sbio->bi_io_vec[j].bv_page;
1207 if (memcmp(page_address(p),
1208 page_address(s),
1209 PAGE_SIZE))
1210 break;
1211 }
1212 } else
1213 j = 0;
1214 if (j >= 0)
1215 mddev->resync_mismatches += r1_bio->sectors;
1216 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
1217 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
1218 sbio->bi_end_io = NULL;
1219 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1220 } else {
1221
1222 sbio->bi_vcnt = vcnt;
1223 sbio->bi_size = r1_bio->sectors << 9;
1224 sbio->bi_idx = 0;
1225 sbio->bi_phys_segments = 0;
1226 sbio->bi_hw_segments = 0;
1227 sbio->bi_hw_front_size = 0;
1228 sbio->bi_hw_back_size = 0;
1229 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1230 sbio->bi_flags |= 1 << BIO_UPTODATE;
1231 sbio->bi_next = NULL;
1232 sbio->bi_sector = r1_bio->sector +
1233 conf->mirrors[i].rdev->data_offset;
1234 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1235 for (j = 0; j < vcnt ; j++)
1236 memcpy(page_address(sbio->bi_io_vec[j].bv_page),
1237 page_address(pbio->bi_io_vec[j].bv_page),
1238 PAGE_SIZE);
1239
1240 }
1241 }
1242 }
1243 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253 sector_t sect = r1_bio->sector;
1254 int sectors = r1_bio->sectors;
1255 int idx = 0;
1256
1257 while(sectors) {
1258 int s = sectors;
1259 int d = r1_bio->read_disk;
1260 int success = 0;
1261 mdk_rdev_t *rdev;
1262
1263 if (s > (PAGE_SIZE>>9))
1264 s = PAGE_SIZE >> 9;
1265 do {
1266 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1267
1268
1269
1270
1271 rdev = conf->mirrors[d].rdev;
1272 if (sync_page_io(rdev->bdev,
1273 sect + rdev->data_offset,
1274 s<<9,
1275 bio->bi_io_vec[idx].bv_page,
1276 READ)) {
1277 success = 1;
1278 break;
1279 }
1280 }
1281 d++;
1282 if (d == conf->raid_disks)
1283 d = 0;
1284 } while (!success && d != r1_bio->read_disk);
1285
1286 if (success) {
1287 int start = d;
1288
1289 set_bit(R1BIO_Uptodate, &r1_bio->state);
1290 while (d != r1_bio->read_disk) {
1291 if (d == 0)
1292 d = conf->raid_disks;
1293 d--;
1294 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1295 continue;
1296 rdev = conf->mirrors[d].rdev;
1297 atomic_add(s, &rdev->corrected_errors);
1298 if (sync_page_io(rdev->bdev,
1299 sect + rdev->data_offset,
1300 s<<9,
1301 bio->bi_io_vec[idx].bv_page,
1302 WRITE) == 0)
1303 md_error(mddev, rdev);
1304 }
1305 d = start;
1306 while (d != r1_bio->read_disk) {
1307 if (d == 0)
1308 d = conf->raid_disks;
1309 d--;
1310 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1311 continue;
1312 rdev = conf->mirrors[d].rdev;
1313 if (sync_page_io(rdev->bdev,
1314 sect + rdev->data_offset,
1315 s<<9,
1316 bio->bi_io_vec[idx].bv_page,
1317 READ) == 0)
1318 md_error(mddev, rdev);
1319 }
1320 } else {
1321 char b[BDEVNAME_SIZE];
1322
1323 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1324 printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
1325 " for block %llu\n",
1326 bdevname(bio->bi_bdev,b),
1327 (unsigned long long)r1_bio->sector);
1328 md_done_sync(mddev, r1_bio->sectors, 0);
1329 put_buf(r1_bio);
1330 return;
1331 }
1332 sectors -= s;
1333 sect += s;
1334 idx ++;
1335 }
1336 }
1337
1338
1339
1340
1341 atomic_set(&r1_bio->remaining, 1);
1342 for (i = 0; i < disks ; i++) {
1343 wbio = r1_bio->bios[i];
1344 if (wbio->bi_end_io == NULL ||
1345 (wbio->bi_end_io == end_sync_read &&
1346 (i == r1_bio->read_disk ||
1347 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
1348 continue;
1349
1350 wbio->bi_rw = WRITE;
1351 wbio->bi_end_io = end_sync_write;
1352 atomic_inc(&r1_bio->remaining);
1353 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
1354
1355 generic_make_request(wbio);
1356 }
1357
1358 if (atomic_dec_and_test(&r1_bio->remaining)) {
1359
1360 md_done_sync(mddev, r1_bio->sectors, 1);
1361 put_buf(r1_bio);
1362 }
1363}
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373static void fix_read_error(conf_t *conf, int read_disk,
1374 sector_t sect, int sectors)
1375{
1376 mddev_t *mddev = conf->mddev;
1377 while(sectors) {
1378 int s = sectors;
1379 int d = read_disk;
1380 int success = 0;
1381 int start;
1382 mdk_rdev_t *rdev;
1383
1384 if (s > (PAGE_SIZE>>9))
1385 s = PAGE_SIZE >> 9;
1386
1387 do {
1388
1389
1390
1391
1392
1393 rdev = conf->mirrors[d].rdev;
1394 if (rdev &&
1395 test_bit(In_sync, &rdev->flags) &&
1396 sync_page_io(rdev->bdev,
1397 sect + rdev->data_offset,
1398 s<<9,
1399 conf->tmppage, READ))
1400 success = 1;
1401 else {
1402 d++;
1403 if (d == conf->raid_disks)
1404 d = 0;
1405 }
1406 } while (!success && d != read_disk);
1407
1408 if (!success) {
1409
1410 md_error(mddev, conf->mirrors[read_disk].rdev);
1411 break;
1412 }
1413
1414 start = d;
1415 while (d != read_disk) {
1416 if (d==0)
1417 d = conf->raid_disks;
1418 d--;
1419 rdev = conf->mirrors[d].rdev;
1420 if (rdev &&
1421 test_bit(In_sync, &rdev->flags)) {
1422 if (sync_page_io(rdev->bdev,
1423 sect + rdev->data_offset,
1424 s<<9, conf->tmppage, WRITE)
1425 == 0)
1426
1427 md_error(mddev, rdev);
1428 }
1429 }
1430 d = start;
1431 while (d != read_disk) {
1432 char b[BDEVNAME_SIZE];
1433 if (d==0)
1434 d = conf->raid_disks;
1435 d--;
1436 rdev = conf->mirrors[d].rdev;
1437 if (rdev &&
1438 test_bit(In_sync, &rdev->flags)) {
1439 if (sync_page_io(rdev->bdev,
1440 sect + rdev->data_offset,
1441 s<<9, conf->tmppage, READ)
1442 == 0)
1443
1444 md_error(mddev, rdev);
1445 else {
1446 atomic_add(s, &rdev->corrected_errors);
1447 printk(KERN_INFO
1448 "raid1:%s: read error corrected "
1449 "(%d sectors at %llu on %s)\n",
1450 mdname(mddev), s,
1451 (unsigned long long)(sect +
1452 rdev->data_offset),
1453 bdevname(rdev->bdev, b));
1454 }
1455 }
1456 }
1457 sectors -= s;
1458 sect += s;
1459 }
1460}
1461
1462static void raid1d(mddev_t *mddev)
1463{
1464 r1bio_t *r1_bio;
1465 struct bio *bio;
1466 unsigned long flags;
1467 conf_t *conf = mddev_to_conf(mddev);
1468 struct list_head *head = &conf->retry_list;
1469 int unplug=0;
1470 mdk_rdev_t *rdev;
1471
1472 md_check_recovery(mddev);
1473
1474 for (;;) {
1475 char b[BDEVNAME_SIZE];
1476 spin_lock_irqsave(&conf->device_lock, flags);
1477
1478 if (conf->pending_bio_list.head) {
1479 bio = bio_list_get(&conf->pending_bio_list);
1480 blk_remove_plug(mddev->queue);
1481 spin_unlock_irqrestore(&conf->device_lock, flags);
1482
1483 bitmap_unplug(mddev->bitmap);
1484
1485 while (bio) {
1486 struct bio *next = bio->bi_next;
1487 bio->bi_next = NULL;
1488 generic_make_request(bio);
1489 bio = next;
1490 }
1491 unplug = 1;
1492
1493 continue;
1494 }
1495
1496 if (list_empty(head))
1497 break;
1498 r1_bio = list_entry(head->prev, r1bio_t, retry_list);
1499 list_del(head->prev);
1500 conf->nr_queued--;
1501 spin_unlock_irqrestore(&conf->device_lock, flags);
1502
1503 mddev = r1_bio->mddev;
1504 conf = mddev_to_conf(mddev);
1505 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
1506 sync_request_write(mddev, r1_bio);
1507 unplug = 1;
1508 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
1509
1510
1511
1512
1513
1514
1515
1516 int i;
1517 const int do_sync = bio_sync(r1_bio->master_bio);
1518 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1519 clear_bit(R1BIO_Barrier, &r1_bio->state);
1520 for (i=0; i < conf->raid_disks; i++)
1521 if (r1_bio->bios[i])
1522 atomic_inc(&r1_bio->remaining);
1523 for (i=0; i < conf->raid_disks; i++)
1524 if (r1_bio->bios[i]) {
1525 struct bio_vec *bvec;
1526 int j;
1527
1528 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1529
1530
1531 __bio_for_each_segment(bvec, bio, j, 0)
1532 bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page;
1533 bio_put(r1_bio->bios[i]);
1534 bio->bi_sector = r1_bio->sector +
1535 conf->mirrors[i].rdev->data_offset;
1536 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1537 bio->bi_end_io = raid1_end_write_request;
1538 bio->bi_rw = WRITE | do_sync;
1539 bio->bi_private = r1_bio;
1540 r1_bio->bios[i] = bio;
1541 generic_make_request(bio);
1542 }
1543 } else {
1544 int disk;
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554 if (mddev->ro == 0) {
1555 freeze_array(conf);
1556 fix_read_error(conf, r1_bio->read_disk,
1557 r1_bio->sector,
1558 r1_bio->sectors);
1559 unfreeze_array(conf);
1560 }
1561
1562 bio = r1_bio->bios[r1_bio->read_disk];
1563 if ((disk=read_balance(conf, r1_bio)) == -1) {
1564 printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
1565 " read error for block %llu\n",
1566 bdevname(bio->bi_bdev,b),
1567 (unsigned long long)r1_bio->sector);
1568 raid_end_bio_io(r1_bio);
1569 } else {
1570 const int do_sync = bio_sync(r1_bio->master_bio);
1571 r1_bio->bios[r1_bio->read_disk] =
1572 mddev->ro ? IO_BLOCKED : NULL;
1573 r1_bio->read_disk = disk;
1574 bio_put(bio);
1575 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1576 r1_bio->bios[r1_bio->read_disk] = bio;
1577 rdev = conf->mirrors[disk].rdev;
1578 if (printk_ratelimit())
1579 printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
1580 " another mirror\n",
1581 bdevname(rdev->bdev,b),
1582 (unsigned long long)r1_bio->sector);
1583 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1584 bio->bi_bdev = rdev->bdev;
1585 bio->bi_end_io = raid1_end_read_request;
1586 bio->bi_rw = READ | do_sync;
1587 bio->bi_private = r1_bio;
1588 unplug = 1;
1589 generic_make_request(bio);
1590 }
1591 }
1592 }
1593 spin_unlock_irqrestore(&conf->device_lock, flags);
1594 if (unplug)
1595 unplug_slaves(mddev);
1596}
1597
1598
1599static int init_resync(conf_t *conf)
1600{
1601 int buffs;
1602
1603 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1604 BUG_ON(conf->r1buf_pool);
1605 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
1606 conf->poolinfo);
1607 if (!conf->r1buf_pool)
1608 return -ENOMEM;
1609 conf->next_resync = 0;
1610 return 0;
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1624{
1625 conf_t *conf = mddev_to_conf(mddev);
1626 r1bio_t *r1_bio;
1627 struct bio *bio;
1628 sector_t max_sector, nr_sectors;
1629 int disk = -1;
1630 int i;
1631 int wonly = -1;
1632 int write_targets = 0, read_targets = 0;
1633 int sync_blocks;
1634 int still_degraded = 0;
1635
1636 if (!conf->r1buf_pool)
1637 {
1638
1639
1640
1641 if (init_resync(conf))
1642 return 0;
1643 }
1644
1645 max_sector = mddev->size << 1;
1646 if (sector_nr >= max_sector) {
1647
1648
1649
1650
1651
1652 if (mddev->curr_resync < max_sector)
1653 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1654 &sync_blocks, 1);
1655 else
1656 conf->fullsync = 0;
1657
1658 bitmap_close_sync(mddev->bitmap);
1659 close_sync(conf);
1660 return 0;
1661 }
1662
1663 if (mddev->bitmap == NULL &&
1664 mddev->recovery_cp == MaxSector &&
1665 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1666 conf->fullsync == 0) {
1667 *skipped = 1;
1668 return max_sector - sector_nr;
1669 }
1670
1671
1672
1673 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1674 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1675
1676 *skipped = 1;
1677 return sync_blocks;
1678 }
1679
1680
1681
1682
1683
1684 if (!go_faster && conf->nr_waiting)
1685 msleep_interruptible(1000);
1686
1687 raise_barrier(conf);
1688
1689 conf->next_resync = sector_nr;
1690
1691 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
1692 rcu_read_lock();
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702 r1_bio->mddev = mddev;
1703 r1_bio->sector = sector_nr;
1704 r1_bio->state = 0;
1705 set_bit(R1BIO_IsSync, &r1_bio->state);
1706
1707 for (i=0; i < conf->raid_disks; i++) {
1708 mdk_rdev_t *rdev;
1709 bio = r1_bio->bios[i];
1710
1711
1712 bio->bi_next = NULL;
1713 bio->bi_flags |= 1 << BIO_UPTODATE;
1714 bio->bi_rw = READ;
1715 bio->bi_vcnt = 0;
1716 bio->bi_idx = 0;
1717 bio->bi_phys_segments = 0;
1718 bio->bi_hw_segments = 0;
1719 bio->bi_size = 0;
1720 bio->bi_end_io = NULL;
1721 bio->bi_private = NULL;
1722
1723 rdev = rcu_dereference(conf->mirrors[i].rdev);
1724 if (rdev == NULL ||
1725 test_bit(Faulty, &rdev->flags)) {
1726 still_degraded = 1;
1727 continue;
1728 } else if (!test_bit(In_sync, &rdev->flags)) {
1729 bio->bi_rw = WRITE;
1730 bio->bi_end_io = end_sync_write;
1731 write_targets ++;
1732 } else {
1733
1734 bio->bi_rw = READ;
1735 bio->bi_end_io = end_sync_read;
1736 if (test_bit(WriteMostly, &rdev->flags)) {
1737 if (wonly < 0)
1738 wonly = i;
1739 } else {
1740 if (disk < 0)
1741 disk = i;
1742 }
1743 read_targets++;
1744 }
1745 atomic_inc(&rdev->nr_pending);
1746 bio->bi_sector = sector_nr + rdev->data_offset;
1747 bio->bi_bdev = rdev->bdev;
1748 bio->bi_private = r1_bio;
1749 }
1750 rcu_read_unlock();
1751 if (disk < 0)
1752 disk = wonly;
1753 r1_bio->read_disk = disk;
1754
1755 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
1756
1757 write_targets += read_targets-1;
1758
1759 if (write_targets == 0 || read_targets == 0) {
1760
1761
1762
1763 sector_t rv = max_sector - sector_nr;
1764 *skipped = 1;
1765 put_buf(r1_bio);
1766 return rv;
1767 }
1768
1769 nr_sectors = 0;
1770 sync_blocks = 0;
1771 do {
1772 struct page *page;
1773 int len = PAGE_SIZE;
1774 if (sector_nr + (len>>9) > max_sector)
1775 len = (max_sector - sector_nr) << 9;
1776 if (len == 0)
1777 break;
1778 if (sync_blocks == 0) {
1779 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1780 &sync_blocks, still_degraded) &&
1781 !conf->fullsync &&
1782 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1783 break;
1784 BUG_ON(sync_blocks < (PAGE_SIZE>>9));
1785 if (len > (sync_blocks<<9))
1786 len = sync_blocks<<9;
1787 }
1788
1789 for (i=0 ; i < conf->raid_disks; i++) {
1790 bio = r1_bio->bios[i];
1791 if (bio->bi_end_io) {
1792 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
1793 if (bio_add_page(bio, page, len, 0) == 0) {
1794
1795 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
1796 while (i > 0) {
1797 i--;
1798 bio = r1_bio->bios[i];
1799 if (bio->bi_end_io==NULL)
1800 continue;
1801
1802 bio->bi_vcnt--;
1803 bio->bi_size -= len;
1804 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
1805 }
1806 goto bio_full;
1807 }
1808 }
1809 }
1810 nr_sectors += len>>9;
1811 sector_nr += len>>9;
1812 sync_blocks -= (len>>9);
1813 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
1814 bio_full:
1815 r1_bio->sectors = nr_sectors;
1816
1817
1818
1819
1820 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1821 atomic_set(&r1_bio->remaining, read_targets);
1822 for (i=0; i<conf->raid_disks; i++) {
1823 bio = r1_bio->bios[i];
1824 if (bio->bi_end_io == end_sync_read) {
1825 md_sync_acct(bio->bi_bdev, nr_sectors);
1826 generic_make_request(bio);
1827 }
1828 }
1829 } else {
1830 atomic_set(&r1_bio->remaining, 1);
1831 bio = r1_bio->bios[r1_bio->read_disk];
1832 md_sync_acct(bio->bi_bdev, nr_sectors);
1833 generic_make_request(bio);
1834
1835 }
1836 return nr_sectors;
1837}
1838
1839static int run(mddev_t *mddev)
1840{
1841 conf_t *conf;
1842 int i, j, disk_idx;
1843 mirror_info_t *disk;
1844 mdk_rdev_t *rdev;
1845 struct list_head *tmp;
1846
1847 if (mddev->level != 1) {
1848 printk("raid1: %s: raid level not set to mirroring (%d)\n",
1849 mdname(mddev), mddev->level);
1850 goto out;
1851 }
1852 if (mddev->reshape_position != MaxSector) {
1853 printk("raid1: %s: reshape_position set but not supported\n",
1854 mdname(mddev));
1855 goto out;
1856 }
1857
1858
1859
1860
1861
1862 conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
1863 mddev->private = conf;
1864 if (!conf)
1865 goto out_no_mem;
1866
1867 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1868 GFP_KERNEL);
1869 if (!conf->mirrors)
1870 goto out_no_mem;
1871
1872 conf->tmppage = alloc_page(GFP_KERNEL);
1873 if (!conf->tmppage)
1874 goto out_no_mem;
1875
1876 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
1877 if (!conf->poolinfo)
1878 goto out_no_mem;
1879 conf->poolinfo->mddev = mddev;
1880 conf->poolinfo->raid_disks = mddev->raid_disks;
1881 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
1882 r1bio_pool_free,
1883 conf->poolinfo);
1884 if (!conf->r1bio_pool)
1885 goto out_no_mem;
1886
1887 ITERATE_RDEV(mddev, rdev, tmp) {
1888 disk_idx = rdev->raid_disk;
1889 if (disk_idx >= mddev->raid_disks
1890 || disk_idx < 0)
1891 continue;
1892 disk = conf->mirrors + disk_idx;
1893
1894 disk->rdev = rdev;
1895
1896 blk_queue_stack_limits(mddev->queue,
1897 rdev->bdev->bd_disk->queue);
1898
1899
1900
1901
1902 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1903 mddev->queue->max_sectors > (PAGE_SIZE>>9))
1904 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1905
1906 disk->head_position = 0;
1907 }
1908 conf->raid_disks = mddev->raid_disks;
1909 conf->mddev = mddev;
1910 spin_lock_init(&conf->device_lock);
1911 INIT_LIST_HEAD(&conf->retry_list);
1912
1913 spin_lock_init(&conf->resync_lock);
1914 init_waitqueue_head(&conf->wait_barrier);
1915
1916 bio_list_init(&conf->pending_bio_list);
1917 bio_list_init(&conf->flushing_bio_list);
1918
1919
1920 mddev->degraded = 0;
1921 for (i = 0; i < conf->raid_disks; i++) {
1922
1923 disk = conf->mirrors + i;
1924
1925 if (!disk->rdev ||
1926 !test_bit(In_sync, &disk->rdev->flags)) {
1927 disk->head_position = 0;
1928 mddev->degraded++;
1929 if (disk->rdev)
1930 conf->fullsync = 1;
1931 }
1932 }
1933 if (mddev->degraded == conf->raid_disks) {
1934 printk(KERN_ERR "raid1: no operational mirrors for %s\n",
1935 mdname(mddev));
1936 goto out_free_conf;
1937 }
1938 if (conf->raid_disks - mddev->degraded == 1)
1939 mddev->recovery_cp = MaxSector;
1940
1941
1942
1943
1944
1945 for (j = 0; j < conf->raid_disks &&
1946 (!conf->mirrors[j].rdev ||
1947 !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++)
1948 ;
1949 conf->last_used = j;
1950
1951
1952 mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1");
1953 if (!mddev->thread) {
1954 printk(KERN_ERR
1955 "raid1: couldn't allocate thread for %s\n",
1956 mdname(mddev));
1957 goto out_free_conf;
1958 }
1959
1960 printk(KERN_INFO
1961 "raid1: raid set %s active with %d out of %d mirrors\n",
1962 mdname(mddev), mddev->raid_disks - mddev->degraded,
1963 mddev->raid_disks);
1964
1965
1966
1967 mddev->array_size = mddev->size;
1968
1969 mddev->queue->unplug_fn = raid1_unplug;
1970 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
1971 mddev->queue->backing_dev_info.congested_data = mddev;
1972
1973 return 0;
1974
1975out_no_mem:
1976 printk(KERN_ERR "raid1: couldn't allocate memory for %s\n",
1977 mdname(mddev));
1978
1979out_free_conf:
1980 if (conf) {
1981 if (conf->r1bio_pool)
1982 mempool_destroy(conf->r1bio_pool);
1983 kfree(conf->mirrors);
1984 safe_put_page(conf->tmppage);
1985 kfree(conf->poolinfo);
1986 kfree(conf);
1987 mddev->private = NULL;
1988 }
1989out:
1990 return -EIO;
1991}
1992
1993static int stop(mddev_t *mddev)
1994{
1995 conf_t *conf = mddev_to_conf(mddev);
1996 struct bitmap *bitmap = mddev->bitmap;
1997 int behind_wait = 0;
1998
1999
2000 while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
2001 behind_wait++;
2002 printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
2003 set_current_state(TASK_UNINTERRUPTIBLE);
2004 schedule_timeout(HZ);
2005
2006 }
2007
2008 md_unregister_thread(mddev->thread);
2009 mddev->thread = NULL;
2010 blk_sync_queue(mddev->queue);
2011 if (conf->r1bio_pool)
2012 mempool_destroy(conf->r1bio_pool);
2013 kfree(conf->mirrors);
2014 kfree(conf->poolinfo);
2015 kfree(conf);
2016 mddev->private = NULL;
2017 return 0;
2018}
2019
2020static int raid1_resize(mddev_t *mddev, sector_t sectors)
2021{
2022
2023
2024
2025
2026
2027
2028
2029 mddev->array_size = sectors>>1;
2030 set_capacity(mddev->gendisk, mddev->array_size << 1);
2031 mddev->changed = 1;
2032 if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) {
2033 mddev->recovery_cp = mddev->size << 1;
2034 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2035 }
2036 mddev->size = mddev->array_size;
2037 mddev->resync_max_sectors = sectors;
2038 return 0;
2039}
2040
2041static int raid1_reshape(mddev_t *mddev)
2042{
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054 mempool_t *newpool, *oldpool;
2055 struct pool_info *newpoolinfo;
2056 mirror_info_t *newmirrors;
2057 conf_t *conf = mddev_to_conf(mddev);
2058 int cnt, raid_disks;
2059 unsigned long flags;
2060 int d, d2;
2061
2062
2063 if (mddev->chunk_size != mddev->new_chunk ||
2064 mddev->layout != mddev->new_layout ||
2065 mddev->level != mddev->new_level) {
2066 mddev->new_chunk = mddev->chunk_size;
2067 mddev->new_layout = mddev->layout;
2068 mddev->new_level = mddev->level;
2069 return -EINVAL;
2070 }
2071
2072 md_allow_write(mddev);
2073
2074 raid_disks = mddev->raid_disks + mddev->delta_disks;
2075
2076 if (raid_disks < conf->raid_disks) {
2077 cnt=0;
2078 for (d= 0; d < conf->raid_disks; d++)
2079 if (conf->mirrors[d].rdev)
2080 cnt++;
2081 if (cnt > raid_disks)
2082 return -EBUSY;
2083 }
2084
2085 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
2086 if (!newpoolinfo)
2087 return -ENOMEM;
2088 newpoolinfo->mddev = mddev;
2089 newpoolinfo->raid_disks = raid_disks;
2090
2091 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2092 r1bio_pool_free, newpoolinfo);
2093 if (!newpool) {
2094 kfree(newpoolinfo);
2095 return -ENOMEM;
2096 }
2097 newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
2098 if (!newmirrors) {
2099 kfree(newpoolinfo);
2100 mempool_destroy(newpool);
2101 return -ENOMEM;
2102 }
2103
2104 raise_barrier(conf);
2105
2106
2107 oldpool = conf->r1bio_pool;
2108 conf->r1bio_pool = newpool;
2109
2110 for (d = d2 = 0; d < conf->raid_disks; d++) {
2111 mdk_rdev_t *rdev = conf->mirrors[d].rdev;
2112 if (rdev && rdev->raid_disk != d2) {
2113 char nm[20];
2114 sprintf(nm, "rd%d", rdev->raid_disk);
2115 sysfs_remove_link(&mddev->kobj, nm);
2116 rdev->raid_disk = d2;
2117 sprintf(nm, "rd%d", rdev->raid_disk);
2118 sysfs_remove_link(&mddev->kobj, nm);
2119 if (sysfs_create_link(&mddev->kobj,
2120 &rdev->kobj, nm))
2121 printk(KERN_WARNING
2122 "md/raid1: cannot register "
2123 "%s for %s\n",
2124 nm, mdname(mddev));
2125 }
2126 if (rdev)
2127 newmirrors[d2++].rdev = rdev;
2128 }
2129 kfree(conf->mirrors);
2130 conf->mirrors = newmirrors;
2131 kfree(conf->poolinfo);
2132 conf->poolinfo = newpoolinfo;
2133
2134 spin_lock_irqsave(&conf->device_lock, flags);
2135 mddev->degraded += (raid_disks - conf->raid_disks);
2136 spin_unlock_irqrestore(&conf->device_lock, flags);
2137 conf->raid_disks = mddev->raid_disks = raid_disks;
2138 mddev->delta_disks = 0;
2139
2140 conf->last_used = 0;
2141 lower_barrier(conf);
2142
2143 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2144 md_wakeup_thread(mddev->thread);
2145
2146 mempool_destroy(oldpool);
2147 return 0;
2148}
2149
2150static void raid1_quiesce(mddev_t *mddev, int state)
2151{
2152 conf_t *conf = mddev_to_conf(mddev);
2153
2154 switch(state) {
2155 case 1:
2156 raise_barrier(conf);
2157 break;
2158 case 0:
2159 lower_barrier(conf);
2160 break;
2161 }
2162}
2163
2164
2165static struct mdk_personality raid1_personality =
2166{
2167 .name = "raid1",
2168 .level = 1,
2169 .owner = THIS_MODULE,
2170 .make_request = make_request,
2171 .run = run,
2172 .stop = stop,
2173 .status = status,
2174 .error_handler = error,
2175 .hot_add_disk = raid1_add_disk,
2176 .hot_remove_disk= raid1_remove_disk,
2177 .spare_active = raid1_spare_active,
2178 .sync_request = sync_request,
2179 .resize = raid1_resize,
2180 .check_reshape = raid1_reshape,
2181 .quiesce = raid1_quiesce,
2182};
2183
2184static int __init raid_init(void)
2185{
2186 return register_md_personality(&raid1_personality);
2187}
2188
2189static void raid_exit(void)
2190{
2191 unregister_md_personality(&raid1_personality);
2192}
2193
2194module_init(raid_init);
2195module_exit(raid_exit);
2196MODULE_LICENSE("GPL");
2197MODULE_ALIAS("md-personality-3");
2198MODULE_ALIAS("md-raid1");
2199MODULE_ALIAS("md-level-1");
2200