1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/blkdev.h>
47#include <linux/kthread.h>
48#include <linux/raid/pq.h>
49#include <linux/async_tx.h>
50#include <linux/module.h>
51#include <linux/async.h>
52#include <linux/seq_file.h>
53#include <linux/cpu.h>
54#include <linux/slab.h>
55#include <linux/ratelimit.h>
56#include <linux/nodemask.h>
57#include <trace/events/block.h>
58
59#include "md.h"
60#include "raid5.h"
61#include "raid0.h"
62#include "bitmap.h"
63
64#define cpu_to_group(cpu) cpu_to_node(cpu)
65#define ANY_GROUP NUMA_NO_NODE
66
67static bool devices_handle_discard_safely = false;
68module_param(devices_handle_discard_safely, bool, 0644);
69MODULE_PARM_DESC(devices_handle_discard_safely,
70 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
71static struct workqueue_struct *raid5_wq;
72
73
74
75
76#define NR_STRIPES 256
77#define STRIPE_SIZE PAGE_SIZE
78#define STRIPE_SHIFT (PAGE_SHIFT - 9)
79#define STRIPE_SECTORS (STRIPE_SIZE>>9)
80#define IO_THRESHOLD 1
81#define BYPASS_THRESHOLD 1
82#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
83#define HASH_MASK (NR_HASH - 1)
84#define MAX_STRIPE_BATCH 8
85
86static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
87{
88 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
89 return &conf->stripe_hashtbl[hash];
90}
91
92static inline int stripe_hash_locks_hash(sector_t sect)
93{
94 return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK;
95}
96
97static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
98{
99 spin_lock_irq(conf->hash_locks + hash);
100 spin_lock(&conf->device_lock);
101}
102
103static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
104{
105 spin_unlock(&conf->device_lock);
106 spin_unlock_irq(conf->hash_locks + hash);
107}
108
109static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
110{
111 int i;
112 local_irq_disable();
113 spin_lock(conf->hash_locks);
114 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
115 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
116 spin_lock(&conf->device_lock);
117}
118
119static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
120{
121 int i;
122 spin_unlock(&conf->device_lock);
123 for (i = NR_STRIPE_HASH_LOCKS; i; i--)
124 spin_unlock(conf->hash_locks + i - 1);
125 local_irq_enable();
126}
127
128
129
130
131
132
133
134
135
136
137static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
138{
139 int sectors = bio_sectors(bio);
140 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
141 return bio->bi_next;
142 else
143 return NULL;
144}
145
146
147
148
149
150static inline int raid5_bi_processed_stripes(struct bio *bio)
151{
152 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
153 return (atomic_read(segments) >> 16) & 0xffff;
154}
155
156static inline int raid5_dec_bi_active_stripes(struct bio *bio)
157{
158 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
159 return atomic_sub_return(1, segments) & 0xffff;
160}
161
162static inline void raid5_inc_bi_active_stripes(struct bio *bio)
163{
164 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
165 atomic_inc(segments);
166}
167
168static inline void raid5_set_bi_processed_stripes(struct bio *bio,
169 unsigned int cnt)
170{
171 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
172 int old, new;
173
174 do {
175 old = atomic_read(segments);
176 new = (old & 0xffff) | (cnt << 16);
177 } while (atomic_cmpxchg(segments, old, new) != old);
178}
179
180static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
181{
182 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
183 atomic_set(segments, cnt);
184}
185
186
187static inline int raid6_d0(struct stripe_head *sh)
188{
189 if (sh->ddf_layout)
190
191 return 0;
192
193 if (sh->qd_idx == sh->disks - 1)
194 return 0;
195 else
196 return sh->qd_idx + 1;
197}
198static inline int raid6_next_disk(int disk, int raid_disks)
199{
200 disk++;
201 return (disk < raid_disks) ? disk : 0;
202}
203
204
205
206
207
208
209static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
210 int *count, int syndrome_disks)
211{
212 int slot = *count;
213
214 if (sh->ddf_layout)
215 (*count)++;
216 if (idx == sh->pd_idx)
217 return syndrome_disks;
218 if (idx == sh->qd_idx)
219 return syndrome_disks + 1;
220 if (!sh->ddf_layout)
221 (*count)++;
222 return slot;
223}
224
225static void return_io(struct bio *return_bi)
226{
227 struct bio *bi = return_bi;
228 while (bi) {
229
230 return_bi = bi->bi_next;
231 bi->bi_next = NULL;
232 bi->bi_iter.bi_size = 0;
233 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
234 bi, 0);
235 bio_endio(bi, 0);
236 bi = return_bi;
237 }
238}
239
240static void print_raid5_conf (struct r5conf *conf);
241
242static int stripe_operations_active(struct stripe_head *sh)
243{
244 return sh->check_state || sh->reconstruct_state ||
245 test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
246 test_bit(STRIPE_COMPUTE_RUN, &sh->state);
247}
248
249static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
250{
251 struct r5conf *conf = sh->raid_conf;
252 struct r5worker_group *group;
253 int thread_cnt;
254 int i, cpu = sh->cpu;
255
256 if (!cpu_online(cpu)) {
257 cpu = cpumask_any(cpu_online_mask);
258 sh->cpu = cpu;
259 }
260
261 if (list_empty(&sh->lru)) {
262 struct r5worker_group *group;
263 group = conf->worker_groups + cpu_to_group(cpu);
264 list_add_tail(&sh->lru, &group->handle_list);
265 group->stripes_cnt++;
266 sh->group = group;
267 }
268
269 if (conf->worker_cnt_per_group == 0) {
270 md_wakeup_thread(conf->mddev->thread);
271 return;
272 }
273
274 group = conf->worker_groups + cpu_to_group(sh->cpu);
275
276 group->workers[0].working = true;
277
278 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
279
280 thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1;
281
282 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) {
283 if (group->workers[i].working == false) {
284 group->workers[i].working = true;
285 queue_work_on(sh->cpu, raid5_wq,
286 &group->workers[i].work);
287 thread_cnt--;
288 }
289 }
290}
291
292static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
293 struct list_head *temp_inactive_list)
294{
295 BUG_ON(!list_empty(&sh->lru));
296 BUG_ON(atomic_read(&conf->active_stripes)==0);
297 if (test_bit(STRIPE_HANDLE, &sh->state)) {
298 if (test_bit(STRIPE_DELAYED, &sh->state) &&
299 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
300 list_add_tail(&sh->lru, &conf->delayed_list);
301 if (atomic_read(&conf->preread_active_stripes)
302 < IO_THRESHOLD)
303 md_wakeup_thread(conf->mddev->thread);
304 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
305 sh->bm_seq - conf->seq_write > 0)
306 list_add_tail(&sh->lru, &conf->bitmap_list);
307 else {
308 clear_bit(STRIPE_DELAYED, &sh->state);
309 clear_bit(STRIPE_BIT_DELAY, &sh->state);
310 if (conf->worker_cnt_per_group == 0) {
311 list_add_tail(&sh->lru, &conf->handle_list);
312 } else {
313 raid5_wakeup_stripe_thread(sh);
314 return;
315 }
316 }
317 md_wakeup_thread(conf->mddev->thread);
318 } else {
319 BUG_ON(stripe_operations_active(sh));
320 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
321 if (atomic_dec_return(&conf->preread_active_stripes)
322 < IO_THRESHOLD)
323 md_wakeup_thread(conf->mddev->thread);
324 atomic_dec(&conf->active_stripes);
325 if (!test_bit(STRIPE_EXPANDING, &sh->state))
326 list_add_tail(&sh->lru, temp_inactive_list);
327 }
328}
329
330static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
331 struct list_head *temp_inactive_list)
332{
333 if (atomic_dec_and_test(&sh->count))
334 do_release_stripe(conf, sh, temp_inactive_list);
335}
336
337
338
339
340
341
342
343
344static void release_inactive_stripe_list(struct r5conf *conf,
345 struct list_head *temp_inactive_list,
346 int hash)
347{
348 int size;
349 bool do_wakeup = false;
350 unsigned long flags;
351
352 if (hash == NR_STRIPE_HASH_LOCKS) {
353 size = NR_STRIPE_HASH_LOCKS;
354 hash = NR_STRIPE_HASH_LOCKS - 1;
355 } else
356 size = 1;
357 while (size) {
358 struct list_head *list = &temp_inactive_list[size - 1];
359
360
361
362
363
364 if (!list_empty_careful(list)) {
365 spin_lock_irqsave(conf->hash_locks + hash, flags);
366 if (list_empty(conf->inactive_list + hash) &&
367 !list_empty(list))
368 atomic_dec(&conf->empty_inactive_list_nr);
369 list_splice_tail_init(list, conf->inactive_list + hash);
370 do_wakeup = true;
371 spin_unlock_irqrestore(conf->hash_locks + hash, flags);
372 }
373 size--;
374 hash--;
375 }
376
377 if (do_wakeup) {
378 wake_up(&conf->wait_for_stripe);
379 if (conf->retry_read_aligned)
380 md_wakeup_thread(conf->mddev->thread);
381 }
382}
383
384
385static int release_stripe_list(struct r5conf *conf,
386 struct list_head *temp_inactive_list)
387{
388 struct stripe_head *sh;
389 int count = 0;
390 struct llist_node *head;
391
392 head = llist_del_all(&conf->released_stripes);
393 head = llist_reverse_order(head);
394 while (head) {
395 int hash;
396
397 sh = llist_entry(head, struct stripe_head, release_list);
398 head = llist_next(head);
399
400 smp_mb();
401 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
402
403
404
405
406
407 hash = sh->hash_lock_index;
408 __release_stripe(conf, sh, &temp_inactive_list[hash]);
409 count++;
410 }
411
412 return count;
413}
414
415static void release_stripe(struct stripe_head *sh)
416{
417 struct r5conf *conf = sh->raid_conf;
418 unsigned long flags;
419 struct list_head list;
420 int hash;
421 bool wakeup;
422
423
424
425 if (atomic_add_unless(&sh->count, -1, 1))
426 return;
427
428 if (unlikely(!conf->mddev->thread) ||
429 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
430 goto slow_path;
431 wakeup = llist_add(&sh->release_list, &conf->released_stripes);
432 if (wakeup)
433 md_wakeup_thread(conf->mddev->thread);
434 return;
435slow_path:
436 local_irq_save(flags);
437
438 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
439 INIT_LIST_HEAD(&list);
440 hash = sh->hash_lock_index;
441 do_release_stripe(conf, sh, &list);
442 spin_unlock(&conf->device_lock);
443 release_inactive_stripe_list(conf, &list, hash);
444 }
445 local_irq_restore(flags);
446}
447
448static inline void remove_hash(struct stripe_head *sh)
449{
450 pr_debug("remove_hash(), stripe %llu\n",
451 (unsigned long long)sh->sector);
452
453 hlist_del_init(&sh->hash);
454}
455
456static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
457{
458 struct hlist_head *hp = stripe_hash(conf, sh->sector);
459
460 pr_debug("insert_hash(), stripe %llu\n",
461 (unsigned long long)sh->sector);
462
463 hlist_add_head(&sh->hash, hp);
464}
465
466
467static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
468{
469 struct stripe_head *sh = NULL;
470 struct list_head *first;
471
472 if (list_empty(conf->inactive_list + hash))
473 goto out;
474 first = (conf->inactive_list + hash)->next;
475 sh = list_entry(first, struct stripe_head, lru);
476 list_del_init(first);
477 remove_hash(sh);
478 atomic_inc(&conf->active_stripes);
479 BUG_ON(hash != sh->hash_lock_index);
480 if (list_empty(conf->inactive_list + hash))
481 atomic_inc(&conf->empty_inactive_list_nr);
482out:
483 return sh;
484}
485
486static void shrink_buffers(struct stripe_head *sh)
487{
488 struct page *p;
489 int i;
490 int num = sh->raid_conf->pool_size;
491
492 for (i = 0; i < num ; i++) {
493 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
494 p = sh->dev[i].page;
495 if (!p)
496 continue;
497 sh->dev[i].page = NULL;
498 put_page(p);
499 }
500}
501
502static int grow_buffers(struct stripe_head *sh)
503{
504 int i;
505 int num = sh->raid_conf->pool_size;
506
507 for (i = 0; i < num; i++) {
508 struct page *page;
509
510 if (!(page = alloc_page(GFP_KERNEL))) {
511 return 1;
512 }
513 sh->dev[i].page = page;
514 sh->dev[i].orig_page = page;
515 }
516 return 0;
517}
518
519static void raid5_build_block(struct stripe_head *sh, int i, int previous);
520static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
521 struct stripe_head *sh);
522
523static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
524{
525 struct r5conf *conf = sh->raid_conf;
526 int i, seq;
527
528 BUG_ON(atomic_read(&sh->count) != 0);
529 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
530 BUG_ON(stripe_operations_active(sh));
531
532 pr_debug("init_stripe called, stripe %llu\n",
533 (unsigned long long)sector);
534retry:
535 seq = read_seqcount_begin(&conf->gen_lock);
536 sh->generation = conf->generation - previous;
537 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
538 sh->sector = sector;
539 stripe_set_idx(sector, conf, previous, sh);
540 sh->state = 0;
541
542 for (i = sh->disks; i--; ) {
543 struct r5dev *dev = &sh->dev[i];
544
545 if (dev->toread || dev->read || dev->towrite || dev->written ||
546 test_bit(R5_LOCKED, &dev->flags)) {
547 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
548 (unsigned long long)sh->sector, i, dev->toread,
549 dev->read, dev->towrite, dev->written,
550 test_bit(R5_LOCKED, &dev->flags));
551 WARN_ON(1);
552 }
553 dev->flags = 0;
554 raid5_build_block(sh, i, previous);
555 }
556 if (read_seqcount_retry(&conf->gen_lock, seq))
557 goto retry;
558 insert_hash(conf, sh);
559 sh->cpu = smp_processor_id();
560}
561
562static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
563 short generation)
564{
565 struct stripe_head *sh;
566
567 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
568 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
569 if (sh->sector == sector && sh->generation == generation)
570 return sh;
571 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
572 return NULL;
573}
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588static int calc_degraded(struct r5conf *conf)
589{
590 int degraded, degraded2;
591 int i;
592
593 rcu_read_lock();
594 degraded = 0;
595 for (i = 0; i < conf->previous_raid_disks; i++) {
596 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
597 if (rdev && test_bit(Faulty, &rdev->flags))
598 rdev = rcu_dereference(conf->disks[i].replacement);
599 if (!rdev || test_bit(Faulty, &rdev->flags))
600 degraded++;
601 else if (test_bit(In_sync, &rdev->flags))
602 ;
603 else
604
605
606
607
608
609
610
611
612
613 if (conf->raid_disks >= conf->previous_raid_disks)
614 degraded++;
615 }
616 rcu_read_unlock();
617 if (conf->raid_disks == conf->previous_raid_disks)
618 return degraded;
619 rcu_read_lock();
620 degraded2 = 0;
621 for (i = 0; i < conf->raid_disks; i++) {
622 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
623 if (rdev && test_bit(Faulty, &rdev->flags))
624 rdev = rcu_dereference(conf->disks[i].replacement);
625 if (!rdev || test_bit(Faulty, &rdev->flags))
626 degraded2++;
627 else if (test_bit(In_sync, &rdev->flags))
628 ;
629 else
630
631
632
633
634
635 if (conf->raid_disks <= conf->previous_raid_disks)
636 degraded2++;
637 }
638 rcu_read_unlock();
639 if (degraded2 > degraded)
640 return degraded2;
641 return degraded;
642}
643
644static int has_failed(struct r5conf *conf)
645{
646 int degraded;
647
648 if (conf->mddev->reshape_position == MaxSector)
649 return conf->mddev->degraded > conf->max_degraded;
650
651 degraded = calc_degraded(conf);
652 if (degraded > conf->max_degraded)
653 return 1;
654 return 0;
655}
656
657static struct stripe_head *
658get_active_stripe(struct r5conf *conf, sector_t sector,
659 int previous, int noblock, int noquiesce)
660{
661 struct stripe_head *sh;
662 int hash = stripe_hash_locks_hash(sector);
663
664 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
665
666 spin_lock_irq(conf->hash_locks + hash);
667
668 do {
669 wait_event_lock_irq(conf->wait_for_stripe,
670 conf->quiesce == 0 || noquiesce,
671 *(conf->hash_locks + hash));
672 sh = __find_stripe(conf, sector, conf->generation - previous);
673 if (!sh) {
674 if (!conf->inactive_blocked)
675 sh = get_free_stripe(conf, hash);
676 if (noblock && sh == NULL)
677 break;
678 if (!sh) {
679 conf->inactive_blocked = 1;
680 wait_event_lock_irq(
681 conf->wait_for_stripe,
682 !list_empty(conf->inactive_list + hash) &&
683 (atomic_read(&conf->active_stripes)
684 < (conf->max_nr_stripes * 3 / 4)
685 || !conf->inactive_blocked),
686 *(conf->hash_locks + hash));
687 conf->inactive_blocked = 0;
688 } else {
689 init_stripe(sh, sector, previous);
690 atomic_inc(&sh->count);
691 }
692 } else if (!atomic_inc_not_zero(&sh->count)) {
693 spin_lock(&conf->device_lock);
694 if (!atomic_read(&sh->count)) {
695 if (!test_bit(STRIPE_HANDLE, &sh->state))
696 atomic_inc(&conf->active_stripes);
697 BUG_ON(list_empty(&sh->lru) &&
698 !test_bit(STRIPE_EXPANDING, &sh->state));
699 list_del_init(&sh->lru);
700 if (sh->group) {
701 sh->group->stripes_cnt--;
702 sh->group = NULL;
703 }
704 }
705 atomic_inc(&sh->count);
706 spin_unlock(&conf->device_lock);
707 }
708 } while (sh == NULL);
709
710 spin_unlock_irq(conf->hash_locks + hash);
711 return sh;
712}
713
714
715
716
717static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
718{
719 sector_t progress = conf->reshape_progress;
720
721
722
723
724 smp_rmb();
725 if (progress == MaxSector)
726 return 0;
727 if (sh->generation == conf->generation - 1)
728 return 0;
729
730
731
732 return 1;
733}
734
735static void
736raid5_end_read_request(struct bio *bi, int error);
737static void
738raid5_end_write_request(struct bio *bi, int error);
739
740static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
741{
742 struct r5conf *conf = sh->raid_conf;
743 int i, disks = sh->disks;
744
745 might_sleep();
746
747 for (i = disks; i--; ) {
748 int rw;
749 int replace_only = 0;
750 struct bio *bi, *rbi;
751 struct md_rdev *rdev, *rrdev = NULL;
752 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
753 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
754 rw = WRITE_FUA;
755 else
756 rw = WRITE;
757 if (test_bit(R5_Discard, &sh->dev[i].flags))
758 rw |= REQ_DISCARD;
759 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
760 rw = READ;
761 else if (test_and_clear_bit(R5_WantReplace,
762 &sh->dev[i].flags)) {
763 rw = WRITE;
764 replace_only = 1;
765 } else
766 continue;
767 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
768 rw |= REQ_SYNC;
769
770 bi = &sh->dev[i].req;
771 rbi = &sh->dev[i].rreq;
772
773 rcu_read_lock();
774 rrdev = rcu_dereference(conf->disks[i].replacement);
775 smp_mb();
776 rdev = rcu_dereference(conf->disks[i].rdev);
777 if (!rdev) {
778 rdev = rrdev;
779 rrdev = NULL;
780 }
781 if (rw & WRITE) {
782 if (replace_only)
783 rdev = NULL;
784 if (rdev == rrdev)
785
786 rrdev = NULL;
787 } else {
788 if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
789 rdev = rrdev;
790 rrdev = NULL;
791 }
792
793 if (rdev && test_bit(Faulty, &rdev->flags))
794 rdev = NULL;
795 if (rdev)
796 atomic_inc(&rdev->nr_pending);
797 if (rrdev && test_bit(Faulty, &rrdev->flags))
798 rrdev = NULL;
799 if (rrdev)
800 atomic_inc(&rrdev->nr_pending);
801 rcu_read_unlock();
802
803
804
805
806
807 while ((rw & WRITE) && rdev &&
808 test_bit(WriteErrorSeen, &rdev->flags)) {
809 sector_t first_bad;
810 int bad_sectors;
811 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
812 &first_bad, &bad_sectors);
813 if (!bad)
814 break;
815
816 if (bad < 0) {
817 set_bit(BlockedBadBlocks, &rdev->flags);
818 if (!conf->mddev->external &&
819 conf->mddev->flags) {
820
821
822
823
824 md_check_recovery(conf->mddev);
825 }
826
827
828
829
830
831 atomic_inc(&rdev->nr_pending);
832 md_wait_for_blocked_rdev(rdev, conf->mddev);
833 } else {
834
835 rdev_dec_pending(rdev, conf->mddev);
836 rdev = NULL;
837 }
838 }
839
840 if (rdev) {
841 if (s->syncing || s->expanding || s->expanded
842 || s->replacing)
843 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
844
845 set_bit(STRIPE_IO_STARTED, &sh->state);
846
847 bio_reset(bi);
848 bi->bi_bdev = rdev->bdev;
849 bi->bi_rw = rw;
850 bi->bi_end_io = (rw & WRITE)
851 ? raid5_end_write_request
852 : raid5_end_read_request;
853 bi->bi_private = sh;
854
855 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
856 __func__, (unsigned long long)sh->sector,
857 bi->bi_rw, i);
858 atomic_inc(&sh->count);
859 if (use_new_offset(conf, sh))
860 bi->bi_iter.bi_sector = (sh->sector
861 + rdev->new_data_offset);
862 else
863 bi->bi_iter.bi_sector = (sh->sector
864 + rdev->data_offset);
865 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
866 bi->bi_rw |= REQ_NOMERGE;
867
868 if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
869 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
870 sh->dev[i].vec.bv_page = sh->dev[i].page;
871 bi->bi_vcnt = 1;
872 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
873 bi->bi_io_vec[0].bv_offset = 0;
874 bi->bi_iter.bi_size = STRIPE_SIZE;
875
876
877
878
879 if (rw & REQ_DISCARD)
880 bi->bi_vcnt = 0;
881 if (rrdev)
882 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
883
884 if (conf->mddev->gendisk)
885 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
886 bi, disk_devt(conf->mddev->gendisk),
887 sh->dev[i].sector);
888 generic_make_request(bi);
889 }
890 if (rrdev) {
891 if (s->syncing || s->expanding || s->expanded
892 || s->replacing)
893 md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
894
895 set_bit(STRIPE_IO_STARTED, &sh->state);
896
897 bio_reset(rbi);
898 rbi->bi_bdev = rrdev->bdev;
899 rbi->bi_rw = rw;
900 BUG_ON(!(rw & WRITE));
901 rbi->bi_end_io = raid5_end_write_request;
902 rbi->bi_private = sh;
903
904 pr_debug("%s: for %llu schedule op %ld on "
905 "replacement disc %d\n",
906 __func__, (unsigned long long)sh->sector,
907 rbi->bi_rw, i);
908 atomic_inc(&sh->count);
909 if (use_new_offset(conf, sh))
910 rbi->bi_iter.bi_sector = (sh->sector
911 + rrdev->new_data_offset);
912 else
913 rbi->bi_iter.bi_sector = (sh->sector
914 + rrdev->data_offset);
915 if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
916 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
917 sh->dev[i].rvec.bv_page = sh->dev[i].page;
918 rbi->bi_vcnt = 1;
919 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
920 rbi->bi_io_vec[0].bv_offset = 0;
921 rbi->bi_iter.bi_size = STRIPE_SIZE;
922
923
924
925
926 if (rw & REQ_DISCARD)
927 rbi->bi_vcnt = 0;
928 if (conf->mddev->gendisk)
929 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
930 rbi, disk_devt(conf->mddev->gendisk),
931 sh->dev[i].sector);
932 generic_make_request(rbi);
933 }
934 if (!rdev && !rrdev) {
935 if (rw & WRITE)
936 set_bit(STRIPE_DEGRADED, &sh->state);
937 pr_debug("skip op %ld on disc %d for sector %llu\n",
938 bi->bi_rw, i, (unsigned long long)sh->sector);
939 clear_bit(R5_LOCKED, &sh->dev[i].flags);
940 set_bit(STRIPE_HANDLE, &sh->state);
941 }
942 }
943}
944
945static struct dma_async_tx_descriptor *
946async_copy_data(int frombio, struct bio *bio, struct page **page,
947 sector_t sector, struct dma_async_tx_descriptor *tx,
948 struct stripe_head *sh)
949{
950 struct bio_vec bvl;
951 struct bvec_iter iter;
952 struct page *bio_page;
953 int page_offset;
954 struct async_submit_ctl submit;
955 enum async_tx_flags flags = 0;
956
957 if (bio->bi_iter.bi_sector >= sector)
958 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
959 else
960 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
961
962 if (frombio)
963 flags |= ASYNC_TX_FENCE;
964 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
965
966 bio_for_each_segment(bvl, bio, iter) {
967 int len = bvl.bv_len;
968 int clen;
969 int b_offset = 0;
970
971 if (page_offset < 0) {
972 b_offset = -page_offset;
973 page_offset += b_offset;
974 len -= b_offset;
975 }
976
977 if (len > 0 && page_offset + len > STRIPE_SIZE)
978 clen = STRIPE_SIZE - page_offset;
979 else
980 clen = len;
981
982 if (clen > 0) {
983 b_offset += bvl.bv_offset;
984 bio_page = bvl.bv_page;
985 if (frombio) {
986 if (sh->raid_conf->skip_copy &&
987 b_offset == 0 && page_offset == 0 &&
988 clen == STRIPE_SIZE)
989 *page = bio_page;
990 else
991 tx = async_memcpy(*page, bio_page, page_offset,
992 b_offset, clen, &submit);
993 } else
994 tx = async_memcpy(bio_page, *page, b_offset,
995 page_offset, clen, &submit);
996 }
997
998 submit.depend_tx = tx;
999
1000 if (clen < len)
1001 break;
1002 page_offset += len;
1003 }
1004
1005 return tx;
1006}
1007
1008static void ops_complete_biofill(void *stripe_head_ref)
1009{
1010 struct stripe_head *sh = stripe_head_ref;
1011 struct bio *return_bi = NULL;
1012 int i;
1013
1014 pr_debug("%s: stripe %llu\n", __func__,
1015 (unsigned long long)sh->sector);
1016
1017
1018 for (i = sh->disks; i--; ) {
1019 struct r5dev *dev = &sh->dev[i];
1020
1021
1022
1023
1024
1025
1026 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
1027 struct bio *rbi, *rbi2;
1028
1029 BUG_ON(!dev->read);
1030 rbi = dev->read;
1031 dev->read = NULL;
1032 while (rbi && rbi->bi_iter.bi_sector <
1033 dev->sector + STRIPE_SECTORS) {
1034 rbi2 = r5_next_bio(rbi, dev->sector);
1035 if (!raid5_dec_bi_active_stripes(rbi)) {
1036 rbi->bi_next = return_bi;
1037 return_bi = rbi;
1038 }
1039 rbi = rbi2;
1040 }
1041 }
1042 }
1043 clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
1044
1045 return_io(return_bi);
1046
1047 set_bit(STRIPE_HANDLE, &sh->state);
1048 release_stripe(sh);
1049}
1050
1051static void ops_run_biofill(struct stripe_head *sh)
1052{
1053 struct dma_async_tx_descriptor *tx = NULL;
1054 struct async_submit_ctl submit;
1055 int i;
1056
1057 pr_debug("%s: stripe %llu\n", __func__,
1058 (unsigned long long)sh->sector);
1059
1060 for (i = sh->disks; i--; ) {
1061 struct r5dev *dev = &sh->dev[i];
1062 if (test_bit(R5_Wantfill, &dev->flags)) {
1063 struct bio *rbi;
1064 spin_lock_irq(&sh->stripe_lock);
1065 dev->read = rbi = dev->toread;
1066 dev->toread = NULL;
1067 spin_unlock_irq(&sh->stripe_lock);
1068 while (rbi && rbi->bi_iter.bi_sector <
1069 dev->sector + STRIPE_SECTORS) {
1070 tx = async_copy_data(0, rbi, &dev->page,
1071 dev->sector, tx, sh);
1072 rbi = r5_next_bio(rbi, dev->sector);
1073 }
1074 }
1075 }
1076
1077 atomic_inc(&sh->count);
1078 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
1079 async_trigger_callback(&submit);
1080}
1081
1082static void mark_target_uptodate(struct stripe_head *sh, int target)
1083{
1084 struct r5dev *tgt;
1085
1086 if (target < 0)
1087 return;
1088
1089 tgt = &sh->dev[target];
1090 set_bit(R5_UPTODATE, &tgt->flags);
1091 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1092 clear_bit(R5_Wantcompute, &tgt->flags);
1093}
1094
1095static void ops_complete_compute(void *stripe_head_ref)
1096{
1097 struct stripe_head *sh = stripe_head_ref;
1098
1099 pr_debug("%s: stripe %llu\n", __func__,
1100 (unsigned long long)sh->sector);
1101
1102
1103 mark_target_uptodate(sh, sh->ops.target);
1104 mark_target_uptodate(sh, sh->ops.target2);
1105
1106 clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
1107 if (sh->check_state == check_state_compute_run)
1108 sh->check_state = check_state_compute_result;
1109 set_bit(STRIPE_HANDLE, &sh->state);
1110 release_stripe(sh);
1111}
1112
1113
1114static addr_conv_t *to_addr_conv(struct stripe_head *sh,
1115 struct raid5_percpu *percpu)
1116{
1117 return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
1118}
1119
1120static struct dma_async_tx_descriptor *
1121ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
1122{
1123 int disks = sh->disks;
1124 struct page **xor_srcs = percpu->scribble;
1125 int target = sh->ops.target;
1126 struct r5dev *tgt = &sh->dev[target];
1127 struct page *xor_dest = tgt->page;
1128 int count = 0;
1129 struct dma_async_tx_descriptor *tx;
1130 struct async_submit_ctl submit;
1131 int i;
1132
1133 pr_debug("%s: stripe %llu block: %d\n",
1134 __func__, (unsigned long long)sh->sector, target);
1135 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1136
1137 for (i = disks; i--; )
1138 if (i != target)
1139 xor_srcs[count++] = sh->dev[i].page;
1140
1141 atomic_inc(&sh->count);
1142
1143 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
1144 ops_complete_compute, sh, to_addr_conv(sh, percpu));
1145 if (unlikely(count == 1))
1146 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1147 else
1148 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1149
1150 return tx;
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
1163{
1164 int disks = sh->disks;
1165 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
1166 int d0_idx = raid6_d0(sh);
1167 int count;
1168 int i;
1169
1170 for (i = 0; i < disks; i++)
1171 srcs[i] = NULL;
1172
1173 count = 0;
1174 i = d0_idx;
1175 do {
1176 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1177
1178 srcs[slot] = sh->dev[i].page;
1179 i = raid6_next_disk(i, disks);
1180 } while (i != d0_idx);
1181
1182 return syndrome_disks;
1183}
1184
1185static struct dma_async_tx_descriptor *
1186ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
1187{
1188 int disks = sh->disks;
1189 struct page **blocks = percpu->scribble;
1190 int target;
1191 int qd_idx = sh->qd_idx;
1192 struct dma_async_tx_descriptor *tx;
1193 struct async_submit_ctl submit;
1194 struct r5dev *tgt;
1195 struct page *dest;
1196 int i;
1197 int count;
1198
1199 if (sh->ops.target < 0)
1200 target = sh->ops.target2;
1201 else if (sh->ops.target2 < 0)
1202 target = sh->ops.target;
1203 else
1204
1205 BUG();
1206 BUG_ON(target < 0);
1207 pr_debug("%s: stripe %llu block: %d\n",
1208 __func__, (unsigned long long)sh->sector, target);
1209
1210 tgt = &sh->dev[target];
1211 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1212 dest = tgt->page;
1213
1214 atomic_inc(&sh->count);
1215
1216 if (target == qd_idx) {
1217 count = set_syndrome_sources(blocks, sh);
1218 blocks[count] = NULL;
1219 BUG_ON(blocks[count+1] != dest);
1220 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1221 ops_complete_compute, sh,
1222 to_addr_conv(sh, percpu));
1223 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
1224 } else {
1225
1226 count = 0;
1227 for (i = disks; i-- ; ) {
1228 if (i == target || i == qd_idx)
1229 continue;
1230 blocks[count++] = sh->dev[i].page;
1231 }
1232
1233 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1234 NULL, ops_complete_compute, sh,
1235 to_addr_conv(sh, percpu));
1236 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
1237 }
1238
1239 return tx;
1240}
1241
1242static struct dma_async_tx_descriptor *
1243ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
1244{
1245 int i, count, disks = sh->disks;
1246 int syndrome_disks = sh->ddf_layout ? disks : disks-2;
1247 int d0_idx = raid6_d0(sh);
1248 int faila = -1, failb = -1;
1249 int target = sh->ops.target;
1250 int target2 = sh->ops.target2;
1251 struct r5dev *tgt = &sh->dev[target];
1252 struct r5dev *tgt2 = &sh->dev[target2];
1253 struct dma_async_tx_descriptor *tx;
1254 struct page **blocks = percpu->scribble;
1255 struct async_submit_ctl submit;
1256
1257 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1258 __func__, (unsigned long long)sh->sector, target, target2);
1259 BUG_ON(target < 0 || target2 < 0);
1260 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1261 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
1262
1263
1264
1265
1266 for (i = 0; i < disks ; i++)
1267 blocks[i] = NULL;
1268 count = 0;
1269 i = d0_idx;
1270 do {
1271 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1272
1273 blocks[slot] = sh->dev[i].page;
1274
1275 if (i == target)
1276 faila = slot;
1277 if (i == target2)
1278 failb = slot;
1279 i = raid6_next_disk(i, disks);
1280 } while (i != d0_idx);
1281
1282 BUG_ON(faila == failb);
1283 if (failb < faila)
1284 swap(faila, failb);
1285 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1286 __func__, (unsigned long long)sh->sector, faila, failb);
1287
1288 atomic_inc(&sh->count);
1289
1290 if (failb == syndrome_disks+1) {
1291
1292 if (faila == syndrome_disks) {
1293
1294 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1295 ops_complete_compute, sh,
1296 to_addr_conv(sh, percpu));
1297 return async_gen_syndrome(blocks, 0, syndrome_disks+2,
1298 STRIPE_SIZE, &submit);
1299 } else {
1300 struct page *dest;
1301 int data_target;
1302 int qd_idx = sh->qd_idx;
1303
1304
1305 if (target == qd_idx)
1306 data_target = target2;
1307 else
1308 data_target = target;
1309
1310 count = 0;
1311 for (i = disks; i-- ; ) {
1312 if (i == data_target || i == qd_idx)
1313 continue;
1314 blocks[count++] = sh->dev[i].page;
1315 }
1316 dest = sh->dev[data_target].page;
1317 init_async_submit(&submit,
1318 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1319 NULL, NULL, NULL,
1320 to_addr_conv(sh, percpu));
1321 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
1322 &submit);
1323
1324 count = set_syndrome_sources(blocks, sh);
1325 init_async_submit(&submit, ASYNC_TX_FENCE, tx,
1326 ops_complete_compute, sh,
1327 to_addr_conv(sh, percpu));
1328 return async_gen_syndrome(blocks, 0, count+2,
1329 STRIPE_SIZE, &submit);
1330 }
1331 } else {
1332 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1333 ops_complete_compute, sh,
1334 to_addr_conv(sh, percpu));
1335 if (failb == syndrome_disks) {
1336
1337 return async_raid6_datap_recov(syndrome_disks+2,
1338 STRIPE_SIZE, faila,
1339 blocks, &submit);
1340 } else {
1341
1342 return async_raid6_2data_recov(syndrome_disks+2,
1343 STRIPE_SIZE, faila, failb,
1344 blocks, &submit);
1345 }
1346 }
1347}
1348
1349static void ops_complete_prexor(void *stripe_head_ref)
1350{
1351 struct stripe_head *sh = stripe_head_ref;
1352
1353 pr_debug("%s: stripe %llu\n", __func__,
1354 (unsigned long long)sh->sector);
1355}
1356
1357static struct dma_async_tx_descriptor *
1358ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
1359 struct dma_async_tx_descriptor *tx)
1360{
1361 int disks = sh->disks;
1362 struct page **xor_srcs = percpu->scribble;
1363 int count = 0, pd_idx = sh->pd_idx, i;
1364 struct async_submit_ctl submit;
1365
1366
1367 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1368
1369 pr_debug("%s: stripe %llu\n", __func__,
1370 (unsigned long long)sh->sector);
1371
1372 for (i = disks; i--; ) {
1373 struct r5dev *dev = &sh->dev[i];
1374
1375 if (test_bit(R5_Wantdrain, &dev->flags))
1376 xor_srcs[count++] = dev->page;
1377 }
1378
1379 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
1380 ops_complete_prexor, sh, to_addr_conv(sh, percpu));
1381 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1382
1383 return tx;
1384}
1385
1386static struct dma_async_tx_descriptor *
1387ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1388{
1389 int disks = sh->disks;
1390 int i;
1391
1392 pr_debug("%s: stripe %llu\n", __func__,
1393 (unsigned long long)sh->sector);
1394
1395 for (i = disks; i--; ) {
1396 struct r5dev *dev = &sh->dev[i];
1397 struct bio *chosen;
1398
1399 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
1400 struct bio *wbi;
1401
1402 spin_lock_irq(&sh->stripe_lock);
1403 chosen = dev->towrite;
1404 dev->towrite = NULL;
1405 BUG_ON(dev->written);
1406 wbi = dev->written = chosen;
1407 spin_unlock_irq(&sh->stripe_lock);
1408 WARN_ON(dev->page != dev->orig_page);
1409
1410 while (wbi && wbi->bi_iter.bi_sector <
1411 dev->sector + STRIPE_SECTORS) {
1412 if (wbi->bi_rw & REQ_FUA)
1413 set_bit(R5_WantFUA, &dev->flags);
1414 if (wbi->bi_rw & REQ_SYNC)
1415 set_bit(R5_SyncIO, &dev->flags);
1416 if (wbi->bi_rw & REQ_DISCARD)
1417 set_bit(R5_Discard, &dev->flags);
1418 else {
1419 tx = async_copy_data(1, wbi, &dev->page,
1420 dev->sector, tx, sh);
1421 if (dev->page != dev->orig_page) {
1422 set_bit(R5_SkipCopy, &dev->flags);
1423 clear_bit(R5_UPTODATE, &dev->flags);
1424 clear_bit(R5_OVERWRITE, &dev->flags);
1425 }
1426 }
1427 wbi = r5_next_bio(wbi, dev->sector);
1428 }
1429 }
1430 }
1431
1432 return tx;
1433}
1434
1435static void ops_complete_reconstruct(void *stripe_head_ref)
1436{
1437 struct stripe_head *sh = stripe_head_ref;
1438 int disks = sh->disks;
1439 int pd_idx = sh->pd_idx;
1440 int qd_idx = sh->qd_idx;
1441 int i;
1442 bool fua = false, sync = false, discard = false;
1443
1444 pr_debug("%s: stripe %llu\n", __func__,
1445 (unsigned long long)sh->sector);
1446
1447 for (i = disks; i--; ) {
1448 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1449 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
1450 discard |= test_bit(R5_Discard, &sh->dev[i].flags);
1451 }
1452
1453 for (i = disks; i--; ) {
1454 struct r5dev *dev = &sh->dev[i];
1455
1456 if (dev->written || i == pd_idx || i == qd_idx) {
1457 if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
1458 set_bit(R5_UPTODATE, &dev->flags);
1459 if (fua)
1460 set_bit(R5_WantFUA, &dev->flags);
1461 if (sync)
1462 set_bit(R5_SyncIO, &dev->flags);
1463 }
1464 }
1465
1466 if (sh->reconstruct_state == reconstruct_state_drain_run)
1467 sh->reconstruct_state = reconstruct_state_drain_result;
1468 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
1469 sh->reconstruct_state = reconstruct_state_prexor_drain_result;
1470 else {
1471 BUG_ON(sh->reconstruct_state != reconstruct_state_run);
1472 sh->reconstruct_state = reconstruct_state_result;
1473 }
1474
1475 set_bit(STRIPE_HANDLE, &sh->state);
1476 release_stripe(sh);
1477}
1478
1479static void
1480ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1481 struct dma_async_tx_descriptor *tx)
1482{
1483 int disks = sh->disks;
1484 struct page **xor_srcs = percpu->scribble;
1485 struct async_submit_ctl submit;
1486 int count = 0, pd_idx = sh->pd_idx, i;
1487 struct page *xor_dest;
1488 int prexor = 0;
1489 unsigned long flags;
1490
1491 pr_debug("%s: stripe %llu\n", __func__,
1492 (unsigned long long)sh->sector);
1493
1494 for (i = 0; i < sh->disks; i++) {
1495 if (pd_idx == i)
1496 continue;
1497 if (!test_bit(R5_Discard, &sh->dev[i].flags))
1498 break;
1499 }
1500 if (i >= sh->disks) {
1501 atomic_inc(&sh->count);
1502 set_bit(R5_Discard, &sh->dev[pd_idx].flags);
1503 ops_complete_reconstruct(sh);
1504 return;
1505 }
1506
1507
1508
1509 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1510 prexor = 1;
1511 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1512 for (i = disks; i--; ) {
1513 struct r5dev *dev = &sh->dev[i];
1514 if (dev->written)
1515 xor_srcs[count++] = dev->page;
1516 }
1517 } else {
1518 xor_dest = sh->dev[pd_idx].page;
1519 for (i = disks; i--; ) {
1520 struct r5dev *dev = &sh->dev[i];
1521 if (i != pd_idx)
1522 xor_srcs[count++] = dev->page;
1523 }
1524 }
1525
1526
1527
1528
1529
1530
1531 flags = ASYNC_TX_ACK |
1532 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1533
1534 atomic_inc(&sh->count);
1535
1536 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
1537 to_addr_conv(sh, percpu));
1538 if (unlikely(count == 1))
1539 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1540 else
1541 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1542}
1543
1544static void
1545ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1546 struct dma_async_tx_descriptor *tx)
1547{
1548 struct async_submit_ctl submit;
1549 struct page **blocks = percpu->scribble;
1550 int count, i;
1551
1552 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1553
1554 for (i = 0; i < sh->disks; i++) {
1555 if (sh->pd_idx == i || sh->qd_idx == i)
1556 continue;
1557 if (!test_bit(R5_Discard, &sh->dev[i].flags))
1558 break;
1559 }
1560 if (i >= sh->disks) {
1561 atomic_inc(&sh->count);
1562 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
1563 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
1564 ops_complete_reconstruct(sh);
1565 return;
1566 }
1567
1568 count = set_syndrome_sources(blocks, sh);
1569
1570 atomic_inc(&sh->count);
1571
1572 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1573 sh, to_addr_conv(sh, percpu));
1574 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
1575}
1576
1577static void ops_complete_check(void *stripe_head_ref)
1578{
1579 struct stripe_head *sh = stripe_head_ref;
1580
1581 pr_debug("%s: stripe %llu\n", __func__,
1582 (unsigned long long)sh->sector);
1583
1584 sh->check_state = check_state_check_result;
1585 set_bit(STRIPE_HANDLE, &sh->state);
1586 release_stripe(sh);
1587}
1588
1589static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
1590{
1591 int disks = sh->disks;
1592 int pd_idx = sh->pd_idx;
1593 int qd_idx = sh->qd_idx;
1594 struct page *xor_dest;
1595 struct page **xor_srcs = percpu->scribble;
1596 struct dma_async_tx_descriptor *tx;
1597 struct async_submit_ctl submit;
1598 int count;
1599 int i;
1600
1601 pr_debug("%s: stripe %llu\n", __func__,
1602 (unsigned long long)sh->sector);
1603
1604 count = 0;
1605 xor_dest = sh->dev[pd_idx].page;
1606 xor_srcs[count++] = xor_dest;
1607 for (i = disks; i--; ) {
1608 if (i == pd_idx || i == qd_idx)
1609 continue;
1610 xor_srcs[count++] = sh->dev[i].page;
1611 }
1612
1613 init_async_submit(&submit, 0, NULL, NULL, NULL,
1614 to_addr_conv(sh, percpu));
1615 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
1616 &sh->ops.zero_sum_result, &submit);
1617
1618 atomic_inc(&sh->count);
1619 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1620 tx = async_trigger_callback(&submit);
1621}
1622
1623static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1624{
1625 struct page **srcs = percpu->scribble;
1626 struct async_submit_ctl submit;
1627 int count;
1628
1629 pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1630 (unsigned long long)sh->sector, checkp);
1631
1632 count = set_syndrome_sources(srcs, sh);
1633 if (!checkp)
1634 srcs[count] = NULL;
1635
1636 atomic_inc(&sh->count);
1637 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1638 sh, to_addr_conv(sh, percpu));
1639 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1640 &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1641}
1642
1643static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1644{
1645 int overlap_clear = 0, i, disks = sh->disks;
1646 struct dma_async_tx_descriptor *tx = NULL;
1647 struct r5conf *conf = sh->raid_conf;
1648 int level = conf->level;
1649 struct raid5_percpu *percpu;
1650 unsigned long cpu;
1651
1652 cpu = get_cpu();
1653 percpu = per_cpu_ptr(conf->percpu, cpu);
1654 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
1655 ops_run_biofill(sh);
1656 overlap_clear++;
1657 }
1658
1659 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
1660 if (level < 6)
1661 tx = ops_run_compute5(sh, percpu);
1662 else {
1663 if (sh->ops.target2 < 0 || sh->ops.target < 0)
1664 tx = ops_run_compute6_1(sh, percpu);
1665 else
1666 tx = ops_run_compute6_2(sh, percpu);
1667 }
1668
1669 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
1670 async_tx_ack(tx);
1671 }
1672
1673 if (test_bit(STRIPE_OP_PREXOR, &ops_request))
1674 tx = ops_run_prexor(sh, percpu, tx);
1675
1676 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
1677 tx = ops_run_biodrain(sh, tx);
1678 overlap_clear++;
1679 }
1680
1681 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1682 if (level < 6)
1683 ops_run_reconstruct5(sh, percpu, tx);
1684 else
1685 ops_run_reconstruct6(sh, percpu, tx);
1686 }
1687
1688 if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1689 if (sh->check_state == check_state_run)
1690 ops_run_check_p(sh, percpu);
1691 else if (sh->check_state == check_state_run_q)
1692 ops_run_check_pq(sh, percpu, 0);
1693 else if (sh->check_state == check_state_run_pq)
1694 ops_run_check_pq(sh, percpu, 1);
1695 else
1696 BUG();
1697 }
1698
1699 if (overlap_clear)
1700 for (i = disks; i--; ) {
1701 struct r5dev *dev = &sh->dev[i];
1702 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1703 wake_up(&sh->raid_conf->wait_for_overlap);
1704 }
1705 put_cpu();
1706}
1707
1708static int grow_one_stripe(struct r5conf *conf, int hash)
1709{
1710 struct stripe_head *sh;
1711 sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
1712 if (!sh)
1713 return 0;
1714
1715 sh->raid_conf = conf;
1716
1717 spin_lock_init(&sh->stripe_lock);
1718
1719 if (grow_buffers(sh)) {
1720 shrink_buffers(sh);
1721 kmem_cache_free(conf->slab_cache, sh);
1722 return 0;
1723 }
1724 sh->hash_lock_index = hash;
1725
1726 atomic_set(&sh->count, 1);
1727 atomic_inc(&conf->active_stripes);
1728 INIT_LIST_HEAD(&sh->lru);
1729 release_stripe(sh);
1730 return 1;
1731}
1732
1733static int grow_stripes(struct r5conf *conf, int num)
1734{
1735 struct kmem_cache *sc;
1736 int devs = max(conf->raid_disks, conf->previous_raid_disks);
1737 int hash;
1738
1739 if (conf->mddev->gendisk)
1740 sprintf(conf->cache_name[0],
1741 "raid%d-%s", conf->level, mdname(conf->mddev));
1742 else
1743 sprintf(conf->cache_name[0],
1744 "raid%d-%p", conf->level, conf->mddev);
1745 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
1746
1747 conf->active_name = 0;
1748 sc = kmem_cache_create(conf->cache_name[conf->active_name],
1749 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
1750 0, 0, NULL);
1751 if (!sc)
1752 return 1;
1753 conf->slab_cache = sc;
1754 conf->pool_size = devs;
1755 hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
1756 while (num--) {
1757 if (!grow_one_stripe(conf, hash))
1758 return 1;
1759 conf->max_nr_stripes++;
1760 hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
1761 }
1762 return 0;
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778static size_t scribble_len(int num)
1779{
1780 size_t len;
1781
1782 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
1783
1784 return len;
1785}
1786
1787static int resize_stripes(struct r5conf *conf, int newsize)
1788{
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812 struct stripe_head *osh, *nsh;
1813 LIST_HEAD(newstripes);
1814 struct disk_info *ndisks;
1815 unsigned long cpu;
1816 int err;
1817 struct kmem_cache *sc;
1818 int i;
1819 int hash, cnt;
1820
1821 if (newsize <= conf->pool_size)
1822 return 0;
1823
1824 err = md_allow_write(conf->mddev);
1825 if (err)
1826 return err;
1827
1828
1829 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1830 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
1831 0, 0, NULL);
1832 if (!sc)
1833 return -ENOMEM;
1834
1835 for (i = conf->max_nr_stripes; i; i--) {
1836 nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
1837 if (!nsh)
1838 break;
1839
1840 nsh->raid_conf = conf;
1841 spin_lock_init(&nsh->stripe_lock);
1842
1843 list_add(&nsh->lru, &newstripes);
1844 }
1845 if (i) {
1846
1847 while (!list_empty(&newstripes)) {
1848 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1849 list_del(&nsh->lru);
1850 kmem_cache_free(sc, nsh);
1851 }
1852 kmem_cache_destroy(sc);
1853 return -ENOMEM;
1854 }
1855
1856
1857
1858
1859 hash = 0;
1860 cnt = 0;
1861 list_for_each_entry(nsh, &newstripes, lru) {
1862 lock_device_hash_lock(conf, hash);
1863 wait_event_cmd(conf->wait_for_stripe,
1864 !list_empty(conf->inactive_list + hash),
1865 unlock_device_hash_lock(conf, hash),
1866 lock_device_hash_lock(conf, hash));
1867 osh = get_free_stripe(conf, hash);
1868 unlock_device_hash_lock(conf, hash);
1869 atomic_set(&nsh->count, 1);
1870 for(i=0; i<conf->pool_size; i++) {
1871 nsh->dev[i].page = osh->dev[i].page;
1872 nsh->dev[i].orig_page = osh->dev[i].page;
1873 }
1874 for( ; i<newsize; i++)
1875 nsh->dev[i].page = NULL;
1876 nsh->hash_lock_index = hash;
1877 kmem_cache_free(conf->slab_cache, osh);
1878 cnt++;
1879 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
1880 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
1881 hash++;
1882 cnt = 0;
1883 }
1884 }
1885 kmem_cache_destroy(conf->slab_cache);
1886
1887
1888
1889
1890
1891
1892 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1893 if (ndisks) {
1894 for (i=0; i<conf->raid_disks; i++)
1895 ndisks[i] = conf->disks[i];
1896 kfree(conf->disks);
1897 conf->disks = ndisks;
1898 } else
1899 err = -ENOMEM;
1900
1901 get_online_cpus();
1902 conf->scribble_len = scribble_len(newsize);
1903 for_each_present_cpu(cpu) {
1904 struct raid5_percpu *percpu;
1905 void *scribble;
1906
1907 percpu = per_cpu_ptr(conf->percpu, cpu);
1908 scribble = kmalloc(conf->scribble_len, GFP_NOIO);
1909
1910 if (scribble) {
1911 kfree(percpu->scribble);
1912 percpu->scribble = scribble;
1913 } else {
1914 err = -ENOMEM;
1915 break;
1916 }
1917 }
1918 put_online_cpus();
1919
1920
1921 while(!list_empty(&newstripes)) {
1922 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1923 list_del_init(&nsh->lru);
1924
1925 for (i=conf->raid_disks; i < newsize; i++)
1926 if (nsh->dev[i].page == NULL) {
1927 struct page *p = alloc_page(GFP_NOIO);
1928 nsh->dev[i].page = p;
1929 nsh->dev[i].orig_page = p;
1930 if (!p)
1931 err = -ENOMEM;
1932 }
1933 release_stripe(nsh);
1934 }
1935
1936
1937 conf->slab_cache = sc;
1938 conf->active_name = 1-conf->active_name;
1939 conf->pool_size = newsize;
1940 return err;
1941}
1942
1943static int drop_one_stripe(struct r5conf *conf, int hash)
1944{
1945 struct stripe_head *sh;
1946
1947 spin_lock_irq(conf->hash_locks + hash);
1948 sh = get_free_stripe(conf, hash);
1949 spin_unlock_irq(conf->hash_locks + hash);
1950 if (!sh)
1951 return 0;
1952 BUG_ON(atomic_read(&sh->count));
1953 shrink_buffers(sh);
1954 kmem_cache_free(conf->slab_cache, sh);
1955 atomic_dec(&conf->active_stripes);
1956 return 1;
1957}
1958
1959static void shrink_stripes(struct r5conf *conf)
1960{
1961 int hash;
1962 for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++)
1963 while (drop_one_stripe(conf, hash))
1964 ;
1965
1966 if (conf->slab_cache)
1967 kmem_cache_destroy(conf->slab_cache);
1968 conf->slab_cache = NULL;
1969}
1970
1971static void raid5_end_read_request(struct bio * bi, int error)
1972{
1973 struct stripe_head *sh = bi->bi_private;
1974 struct r5conf *conf = sh->raid_conf;
1975 int disks = sh->disks, i;
1976 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1977 char b[BDEVNAME_SIZE];
1978 struct md_rdev *rdev = NULL;
1979 sector_t s;
1980
1981 for (i=0 ; i<disks; i++)
1982 if (bi == &sh->dev[i].req)
1983 break;
1984
1985 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1986 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1987 uptodate);
1988 if (i == disks) {
1989 BUG();
1990 return;
1991 }
1992 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
1993
1994
1995
1996
1997
1998 rdev = conf->disks[i].replacement;
1999 if (!rdev)
2000 rdev = conf->disks[i].rdev;
2001
2002 if (use_new_offset(conf, sh))
2003 s = sh->sector + rdev->new_data_offset;
2004 else
2005 s = sh->sector + rdev->data_offset;
2006 if (uptodate) {
2007 set_bit(R5_UPTODATE, &sh->dev[i].flags);
2008 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2009
2010
2011
2012
2013 printk_ratelimited(
2014 KERN_INFO
2015 "md/raid:%s: read error corrected"
2016 " (%lu sectors at %llu on %s)\n",
2017 mdname(conf->mddev), STRIPE_SECTORS,
2018 (unsigned long long)s,
2019 bdevname(rdev->bdev, b));
2020 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
2021 clear_bit(R5_ReadError, &sh->dev[i].flags);
2022 clear_bit(R5_ReWrite, &sh->dev[i].flags);
2023 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2024 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2025
2026 if (atomic_read(&rdev->read_errors))
2027 atomic_set(&rdev->read_errors, 0);
2028 } else {
2029 const char *bdn = bdevname(rdev->bdev, b);
2030 int retry = 0;
2031 int set_bad = 0;
2032
2033 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
2034 atomic_inc(&rdev->read_errors);
2035 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
2036 printk_ratelimited(
2037 KERN_WARNING
2038 "md/raid:%s: read error on replacement device "
2039 "(sector %llu on %s).\n",
2040 mdname(conf->mddev),
2041 (unsigned long long)s,
2042 bdn);
2043 else if (conf->mddev->degraded >= conf->max_degraded) {
2044 set_bad = 1;
2045 printk_ratelimited(
2046 KERN_WARNING
2047 "md/raid:%s: read error not correctable "
2048 "(sector %llu on %s).\n",
2049 mdname(conf->mddev),
2050 (unsigned long long)s,
2051 bdn);
2052 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
2053
2054 set_bad = 1;
2055 printk_ratelimited(
2056 KERN_WARNING
2057 "md/raid:%s: read error NOT corrected!! "
2058 "(sector %llu on %s).\n",
2059 mdname(conf->mddev),
2060 (unsigned long long)s,
2061 bdn);
2062 } else if (atomic_read(&rdev->read_errors)
2063 > conf->max_nr_stripes)
2064 printk(KERN_WARNING
2065 "md/raid:%s: Too many read errors, failing device %s.\n",
2066 mdname(conf->mddev), bdn);
2067 else
2068 retry = 1;
2069 if (set_bad && test_bit(In_sync, &rdev->flags)
2070 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2071 retry = 1;
2072 if (retry)
2073 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
2074 set_bit(R5_ReadError, &sh->dev[i].flags);
2075 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2076 } else
2077 set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2078 else {
2079 clear_bit(R5_ReadError, &sh->dev[i].flags);
2080 clear_bit(R5_ReWrite, &sh->dev[i].flags);
2081 if (!(set_bad
2082 && test_bit(In_sync, &rdev->flags)
2083 && rdev_set_badblocks(
2084 rdev, sh->sector, STRIPE_SECTORS, 0)))
2085 md_error(conf->mddev, rdev);
2086 }
2087 }
2088 rdev_dec_pending(rdev, conf->mddev);
2089 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2090 set_bit(STRIPE_HANDLE, &sh->state);
2091 release_stripe(sh);
2092}
2093
2094static void raid5_end_write_request(struct bio *bi, int error)
2095{
2096 struct stripe_head *sh = bi->bi_private;
2097 struct r5conf *conf = sh->raid_conf;
2098 int disks = sh->disks, i;
2099 struct md_rdev *uninitialized_var(rdev);
2100 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
2101 sector_t first_bad;
2102 int bad_sectors;
2103 int replacement = 0;
2104
2105 for (i = 0 ; i < disks; i++) {
2106 if (bi == &sh->dev[i].req) {
2107 rdev = conf->disks[i].rdev;
2108 break;
2109 }
2110 if (bi == &sh->dev[i].rreq) {
2111 rdev = conf->disks[i].replacement;
2112 if (rdev)
2113 replacement = 1;
2114 else
2115
2116
2117
2118
2119 rdev = conf->disks[i].rdev;
2120 break;
2121 }
2122 }
2123 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
2124 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
2125 uptodate);
2126 if (i == disks) {
2127 BUG();
2128 return;
2129 }
2130
2131 if (replacement) {
2132 if (!uptodate)
2133 md_error(conf->mddev, rdev);
2134 else if (is_badblock(rdev, sh->sector,
2135 STRIPE_SECTORS,
2136 &first_bad, &bad_sectors))
2137 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
2138 } else {
2139 if (!uptodate) {
2140 set_bit(STRIPE_DEGRADED, &sh->state);
2141 set_bit(WriteErrorSeen, &rdev->flags);
2142 set_bit(R5_WriteError, &sh->dev[i].flags);
2143 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2144 set_bit(MD_RECOVERY_NEEDED,
2145 &rdev->mddev->recovery);
2146 } else if (is_badblock(rdev, sh->sector,
2147 STRIPE_SECTORS,
2148 &first_bad, &bad_sectors)) {
2149 set_bit(R5_MadeGood, &sh->dev[i].flags);
2150 if (test_bit(R5_ReadError, &sh->dev[i].flags))
2151
2152
2153
2154
2155 set_bit(R5_ReWrite, &sh->dev[i].flags);
2156 }
2157 }
2158 rdev_dec_pending(rdev, conf->mddev);
2159
2160 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
2161 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2162 set_bit(STRIPE_HANDLE, &sh->state);
2163 release_stripe(sh);
2164}
2165
2166static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
2167
2168static void raid5_build_block(struct stripe_head *sh, int i, int previous)
2169{
2170 struct r5dev *dev = &sh->dev[i];
2171
2172 bio_init(&dev->req);
2173 dev->req.bi_io_vec = &dev->vec;
2174 dev->req.bi_max_vecs = 1;
2175 dev->req.bi_private = sh;
2176
2177 bio_init(&dev->rreq);
2178 dev->rreq.bi_io_vec = &dev->rvec;
2179 dev->rreq.bi_max_vecs = 1;
2180 dev->rreq.bi_private = sh;
2181
2182 dev->flags = 0;
2183 dev->sector = compute_blocknr(sh, i, previous);
2184}
2185
2186static void error(struct mddev *mddev, struct md_rdev *rdev)
2187{
2188 char b[BDEVNAME_SIZE];
2189 struct r5conf *conf = mddev->private;
2190 unsigned long flags;
2191 pr_debug("raid456: error called\n");
2192
2193 spin_lock_irqsave(&conf->device_lock, flags);
2194 clear_bit(In_sync, &rdev->flags);
2195 mddev->degraded = calc_degraded(conf);
2196 spin_unlock_irqrestore(&conf->device_lock, flags);
2197 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2198
2199 set_bit(Blocked, &rdev->flags);
2200 set_bit(Faulty, &rdev->flags);
2201 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2202 printk(KERN_ALERT
2203 "md/raid:%s: Disk failure on %s, disabling device.\n"
2204 "md/raid:%s: Operation continuing on %d devices.\n",
2205 mdname(mddev),
2206 bdevname(rdev->bdev, b),
2207 mdname(mddev),
2208 conf->raid_disks - mddev->degraded);
2209}
2210
2211
2212
2213
2214
2215static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
2216 int previous, int *dd_idx,
2217 struct stripe_head *sh)
2218{
2219 sector_t stripe, stripe2;
2220 sector_t chunk_number;
2221 unsigned int chunk_offset;
2222 int pd_idx, qd_idx;
2223 int ddf_layout = 0;
2224 sector_t new_sector;
2225 int algorithm = previous ? conf->prev_algo
2226 : conf->algorithm;
2227 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
2228 : conf->chunk_sectors;
2229 int raid_disks = previous ? conf->previous_raid_disks
2230 : conf->raid_disks;
2231 int data_disks = raid_disks - conf->max_degraded;
2232
2233
2234
2235
2236
2237
2238 chunk_offset = sector_div(r_sector, sectors_per_chunk);
2239 chunk_number = r_sector;
2240
2241
2242
2243
2244 stripe = chunk_number;
2245 *dd_idx = sector_div(stripe, data_disks);
2246 stripe2 = stripe;
2247
2248
2249
2250 pd_idx = qd_idx = -1;
2251 switch(conf->level) {
2252 case 4:
2253 pd_idx = data_disks;
2254 break;
2255 case 5:
2256 switch (algorithm) {
2257 case ALGORITHM_LEFT_ASYMMETRIC:
2258 pd_idx = data_disks - sector_div(stripe2, raid_disks);
2259 if (*dd_idx >= pd_idx)
2260 (*dd_idx)++;
2261 break;
2262 case ALGORITHM_RIGHT_ASYMMETRIC:
2263 pd_idx = sector_div(stripe2, raid_disks);
2264 if (*dd_idx >= pd_idx)
2265 (*dd_idx)++;
2266 break;
2267 case ALGORITHM_LEFT_SYMMETRIC:
2268 pd_idx = data_disks - sector_div(stripe2, raid_disks);
2269 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2270 break;
2271 case ALGORITHM_RIGHT_SYMMETRIC:
2272 pd_idx = sector_div(stripe2, raid_disks);
2273 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2274 break;
2275 case ALGORITHM_PARITY_0:
2276 pd_idx = 0;
2277 (*dd_idx)++;
2278 break;
2279 case ALGORITHM_PARITY_N:
2280 pd_idx = data_disks;
2281 break;
2282 default:
2283 BUG();
2284 }
2285 break;
2286 case 6:
2287
2288 switch (algorithm) {
2289 case ALGORITHM_LEFT_ASYMMETRIC:
2290 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2291 qd_idx = pd_idx + 1;
2292 if (pd_idx == raid_disks-1) {
2293 (*dd_idx)++;
2294 qd_idx = 0;
2295 } else if (*dd_idx >= pd_idx)
2296 (*dd_idx) += 2;
2297 break;
2298 case ALGORITHM_RIGHT_ASYMMETRIC:
2299 pd_idx = sector_div(stripe2, raid_disks);
2300 qd_idx = pd_idx + 1;
2301 if (pd_idx == raid_disks-1) {
2302 (*dd_idx)++;
2303 qd_idx = 0;
2304 } else if (*dd_idx >= pd_idx)
2305 (*dd_idx) += 2;
2306 break;
2307 case ALGORITHM_LEFT_SYMMETRIC:
2308 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2309 qd_idx = (pd_idx + 1) % raid_disks;
2310 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2311 break;
2312 case ALGORITHM_RIGHT_SYMMETRIC:
2313 pd_idx = sector_div(stripe2, raid_disks);
2314 qd_idx = (pd_idx + 1) % raid_disks;
2315 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2316 break;
2317
2318 case ALGORITHM_PARITY_0:
2319 pd_idx = 0;
2320 qd_idx = 1;
2321 (*dd_idx) += 2;
2322 break;
2323 case ALGORITHM_PARITY_N:
2324 pd_idx = data_disks;
2325 qd_idx = data_disks + 1;
2326 break;
2327
2328 case ALGORITHM_ROTATING_ZERO_RESTART:
2329
2330
2331
2332 pd_idx = sector_div(stripe2, raid_disks);
2333 qd_idx = pd_idx + 1;
2334 if (pd_idx == raid_disks-1) {
2335 (*dd_idx)++;
2336 qd_idx = 0;
2337 } else if (*dd_idx >= pd_idx)
2338 (*dd_idx) += 2;
2339 ddf_layout = 1;
2340 break;
2341
2342 case ALGORITHM_ROTATING_N_RESTART:
2343
2344
2345
2346
2347 stripe2 += 1;
2348 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2349 qd_idx = pd_idx + 1;
2350 if (pd_idx == raid_disks-1) {
2351 (*dd_idx)++;
2352 qd_idx = 0;
2353 } else if (*dd_idx >= pd_idx)
2354 (*dd_idx) += 2;
2355 ddf_layout = 1;
2356 break;
2357
2358 case ALGORITHM_ROTATING_N_CONTINUE:
2359
2360 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2361 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
2362 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2363 ddf_layout = 1;
2364 break;
2365
2366 case ALGORITHM_LEFT_ASYMMETRIC_6:
2367
2368 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2369 if (*dd_idx >= pd_idx)
2370 (*dd_idx)++;
2371 qd_idx = raid_disks - 1;
2372 break;
2373
2374 case ALGORITHM_RIGHT_ASYMMETRIC_6:
2375 pd_idx = sector_div(stripe2, raid_disks-1);
2376 if (*dd_idx >= pd_idx)
2377 (*dd_idx)++;
2378 qd_idx = raid_disks - 1;
2379 break;
2380
2381 case ALGORITHM_LEFT_SYMMETRIC_6:
2382 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2383 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2384 qd_idx = raid_disks - 1;
2385 break;
2386
2387 case ALGORITHM_RIGHT_SYMMETRIC_6:
2388 pd_idx = sector_div(stripe2, raid_disks-1);
2389 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2390 qd_idx = raid_disks - 1;
2391 break;
2392
2393 case ALGORITHM_PARITY_0_6:
2394 pd_idx = 0;
2395 (*dd_idx)++;
2396 qd_idx = raid_disks - 1;
2397 break;
2398
2399 default:
2400 BUG();
2401 }
2402 break;
2403 }
2404
2405 if (sh) {
2406 sh->pd_idx = pd_idx;
2407 sh->qd_idx = qd_idx;
2408 sh->ddf_layout = ddf_layout;
2409 }
2410
2411
2412
2413 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
2414 return new_sector;
2415}
2416
2417static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
2418{
2419 struct r5conf *conf = sh->raid_conf;
2420 int raid_disks = sh->disks;
2421 int data_disks = raid_disks - conf->max_degraded;
2422 sector_t new_sector = sh->sector, check;
2423 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
2424 : conf->chunk_sectors;
2425 int algorithm = previous ? conf->prev_algo
2426 : conf->algorithm;
2427 sector_t stripe;
2428 int chunk_offset;
2429 sector_t chunk_number;
2430 int dummy1, dd_idx = i;
2431 sector_t r_sector;
2432 struct stripe_head sh2;
2433
2434 chunk_offset = sector_div(new_sector, sectors_per_chunk);
2435 stripe = new_sector;
2436
2437 if (i == sh->pd_idx)
2438 return 0;
2439 switch(conf->level) {
2440 case 4: break;
2441 case 5:
2442 switch (algorithm) {
2443 case ALGORITHM_LEFT_ASYMMETRIC:
2444 case ALGORITHM_RIGHT_ASYMMETRIC:
2445 if (i > sh->pd_idx)
2446 i--;
2447 break;
2448 case ALGORITHM_LEFT_SYMMETRIC:
2449 case ALGORITHM_RIGHT_SYMMETRIC:
2450 if (i < sh->pd_idx)
2451 i += raid_disks;
2452 i -= (sh->pd_idx + 1);
2453 break;
2454 case ALGORITHM_PARITY_0:
2455 i -= 1;
2456 break;
2457 case ALGORITHM_PARITY_N:
2458 break;
2459 default:
2460 BUG();
2461 }
2462 break;
2463 case 6:
2464 if (i == sh->qd_idx)
2465 return 0;
2466 switch (algorithm) {
2467 case ALGORITHM_LEFT_ASYMMETRIC:
2468 case ALGORITHM_RIGHT_ASYMMETRIC:
2469 case ALGORITHM_ROTATING_ZERO_RESTART:
2470 case ALGORITHM_ROTATING_N_RESTART:
2471 if (sh->pd_idx == raid_disks-1)
2472 i--;
2473 else if (i > sh->pd_idx)
2474 i -= 2;
2475 break;
2476 case ALGORITHM_LEFT_SYMMETRIC:
2477 case ALGORITHM_RIGHT_SYMMETRIC:
2478 if (sh->pd_idx == raid_disks-1)
2479 i--;
2480 else {
2481
2482 if (i < sh->pd_idx)
2483 i += raid_disks;
2484 i -= (sh->pd_idx + 2);
2485 }
2486 break;
2487 case ALGORITHM_PARITY_0:
2488 i -= 2;
2489 break;
2490 case ALGORITHM_PARITY_N:
2491 break;
2492 case ALGORITHM_ROTATING_N_CONTINUE:
2493
2494 if (sh->pd_idx == 0)
2495 i--;
2496 else {
2497
2498 if (i < sh->pd_idx)
2499 i += raid_disks;
2500 i -= (sh->pd_idx + 1);
2501 }
2502 break;
2503 case ALGORITHM_LEFT_ASYMMETRIC_6:
2504 case ALGORITHM_RIGHT_ASYMMETRIC_6:
2505 if (i > sh->pd_idx)
2506 i--;
2507 break;
2508 case ALGORITHM_LEFT_SYMMETRIC_6:
2509 case ALGORITHM_RIGHT_SYMMETRIC_6:
2510 if (i < sh->pd_idx)
2511 i += data_disks + 1;
2512 i -= (sh->pd_idx + 1);
2513 break;
2514 case ALGORITHM_PARITY_0_6:
2515 i -= 1;
2516 break;
2517 default:
2518 BUG();
2519 }
2520 break;
2521 }
2522
2523 chunk_number = stripe * data_disks + i;
2524 r_sector = chunk_number * sectors_per_chunk + chunk_offset;
2525
2526 check = raid5_compute_sector(conf, r_sector,
2527 previous, &dummy1, &sh2);
2528 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
2529 || sh2.qd_idx != sh->qd_idx) {
2530 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
2531 mdname(conf->mddev));
2532 return 0;
2533 }
2534 return r_sector;
2535}
2536
2537static void
2538schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2539 int rcw, int expand)
2540{
2541 int i, pd_idx = sh->pd_idx, disks = sh->disks;
2542 struct r5conf *conf = sh->raid_conf;
2543 int level = conf->level;
2544
2545 if (rcw) {
2546
2547 for (i = disks; i--; ) {
2548 struct r5dev *dev = &sh->dev[i];
2549
2550 if (dev->towrite) {
2551 set_bit(R5_LOCKED, &dev->flags);
2552 set_bit(R5_Wantdrain, &dev->flags);
2553 if (!expand)
2554 clear_bit(R5_UPTODATE, &dev->flags);
2555 s->locked++;
2556 }
2557 }
2558
2559
2560
2561
2562 if (!expand) {
2563 if (!s->locked)
2564
2565 return;
2566 sh->reconstruct_state = reconstruct_state_drain_run;
2567 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2568 } else
2569 sh->reconstruct_state = reconstruct_state_run;
2570
2571 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2572
2573 if (s->locked + conf->max_degraded == disks)
2574 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2575 atomic_inc(&conf->pending_full_writes);
2576 } else {
2577 BUG_ON(level == 6);
2578 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2579 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2580
2581 for (i = disks; i--; ) {
2582 struct r5dev *dev = &sh->dev[i];
2583 if (i == pd_idx)
2584 continue;
2585
2586 if (dev->towrite &&
2587 (test_bit(R5_UPTODATE, &dev->flags) ||
2588 test_bit(R5_Wantcompute, &dev->flags))) {
2589 set_bit(R5_Wantdrain, &dev->flags);
2590 set_bit(R5_LOCKED, &dev->flags);
2591 clear_bit(R5_UPTODATE, &dev->flags);
2592 s->locked++;
2593 }
2594 }
2595 if (!s->locked)
2596
2597 return;
2598 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2599 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2600 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2601 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2602 }
2603
2604
2605
2606
2607 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2608 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2609 s->locked++;
2610
2611 if (level == 6) {
2612 int qd_idx = sh->qd_idx;
2613 struct r5dev *dev = &sh->dev[qd_idx];
2614
2615 set_bit(R5_LOCKED, &dev->flags);
2616 clear_bit(R5_UPTODATE, &dev->flags);
2617 s->locked++;
2618 }
2619
2620 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2621 __func__, (unsigned long long)sh->sector,
2622 s->locked, s->ops_request);
2623}
2624
2625
2626
2627
2628
2629
2630static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
2631{
2632 struct bio **bip;
2633 struct r5conf *conf = sh->raid_conf;
2634 int firstwrite=0;
2635
2636 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2637 (unsigned long long)bi->bi_iter.bi_sector,
2638 (unsigned long long)sh->sector);
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648 spin_lock_irq(&sh->stripe_lock);
2649 if (forwrite) {
2650 bip = &sh->dev[dd_idx].towrite;
2651 if (*bip == NULL)
2652 firstwrite = 1;
2653 } else
2654 bip = &sh->dev[dd_idx].toread;
2655 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
2656 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
2657 goto overlap;
2658 bip = & (*bip)->bi_next;
2659 }
2660 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
2661 goto overlap;
2662
2663 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
2664 if (*bip)
2665 bi->bi_next = *bip;
2666 *bip = bi;
2667 raid5_inc_bi_active_stripes(bi);
2668
2669 if (forwrite) {
2670
2671 sector_t sector = sh->dev[dd_idx].sector;
2672 for (bi=sh->dev[dd_idx].towrite;
2673 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2674 bi && bi->bi_iter.bi_sector <= sector;
2675 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2676 if (bio_end_sector(bi) >= sector)
2677 sector = bio_end_sector(bi);
2678 }
2679 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2680 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2681 }
2682
2683 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2684 (unsigned long long)(*bip)->bi_iter.bi_sector,
2685 (unsigned long long)sh->sector, dd_idx);
2686 spin_unlock_irq(&sh->stripe_lock);
2687
2688 if (conf->mddev->bitmap && firstwrite) {
2689 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2690 STRIPE_SECTORS, 0);
2691 sh->bm_seq = conf->seq_flush+1;
2692 set_bit(STRIPE_BIT_DELAY, &sh->state);
2693 }
2694 return 1;
2695
2696 overlap:
2697 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2698 spin_unlock_irq(&sh->stripe_lock);
2699 return 0;
2700}
2701
2702static void end_reshape(struct r5conf *conf);
2703
2704static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
2705 struct stripe_head *sh)
2706{
2707 int sectors_per_chunk =
2708 previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
2709 int dd_idx;
2710 int chunk_offset = sector_div(stripe, sectors_per_chunk);
2711 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2712
2713 raid5_compute_sector(conf,
2714 stripe * (disks - conf->max_degraded)
2715 *sectors_per_chunk + chunk_offset,
2716 previous,
2717 &dd_idx, sh);
2718}
2719
2720static void
2721handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2722 struct stripe_head_state *s, int disks,
2723 struct bio **return_bi)
2724{
2725 int i;
2726 for (i = disks; i--; ) {
2727 struct bio *bi;
2728 int bitmap_end = 0;
2729
2730 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2731 struct md_rdev *rdev;
2732 rcu_read_lock();
2733 rdev = rcu_dereference(conf->disks[i].rdev);
2734 if (rdev && test_bit(In_sync, &rdev->flags))
2735 atomic_inc(&rdev->nr_pending);
2736 else
2737 rdev = NULL;
2738 rcu_read_unlock();
2739 if (rdev) {
2740 if (!rdev_set_badblocks(
2741 rdev,
2742 sh->sector,
2743 STRIPE_SECTORS, 0))
2744 md_error(conf->mddev, rdev);
2745 rdev_dec_pending(rdev, conf->mddev);
2746 }
2747 }
2748 spin_lock_irq(&sh->stripe_lock);
2749
2750 bi = sh->dev[i].towrite;
2751 sh->dev[i].towrite = NULL;
2752 spin_unlock_irq(&sh->stripe_lock);
2753 if (bi)
2754 bitmap_end = 1;
2755
2756 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2757 wake_up(&conf->wait_for_overlap);
2758
2759 while (bi && bi->bi_iter.bi_sector <
2760 sh->dev[i].sector + STRIPE_SECTORS) {
2761 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2762 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2763 if (!raid5_dec_bi_active_stripes(bi)) {
2764 md_write_end(conf->mddev);
2765 bi->bi_next = *return_bi;
2766 *return_bi = bi;
2767 }
2768 bi = nextbi;
2769 }
2770 if (bitmap_end)
2771 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2772 STRIPE_SECTORS, 0, 0);
2773 bitmap_end = 0;
2774
2775 bi = sh->dev[i].written;
2776 sh->dev[i].written = NULL;
2777 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) {
2778 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
2779 sh->dev[i].page = sh->dev[i].orig_page;
2780 }
2781
2782 if (bi) bitmap_end = 1;
2783 while (bi && bi->bi_iter.bi_sector <
2784 sh->dev[i].sector + STRIPE_SECTORS) {
2785 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2786 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2787 if (!raid5_dec_bi_active_stripes(bi)) {
2788 md_write_end(conf->mddev);
2789 bi->bi_next = *return_bi;
2790 *return_bi = bi;
2791 }
2792 bi = bi2;
2793 }
2794
2795
2796
2797
2798 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2799 (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2800 test_bit(R5_ReadError, &sh->dev[i].flags))) {
2801 spin_lock_irq(&sh->stripe_lock);
2802 bi = sh->dev[i].toread;
2803 sh->dev[i].toread = NULL;
2804 spin_unlock_irq(&sh->stripe_lock);
2805 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2806 wake_up(&conf->wait_for_overlap);
2807 while (bi && bi->bi_iter.bi_sector <
2808 sh->dev[i].sector + STRIPE_SECTORS) {
2809 struct bio *nextbi =
2810 r5_next_bio(bi, sh->dev[i].sector);
2811 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2812 if (!raid5_dec_bi_active_stripes(bi)) {
2813 bi->bi_next = *return_bi;
2814 *return_bi = bi;
2815 }
2816 bi = nextbi;
2817 }
2818 }
2819 if (bitmap_end)
2820 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2821 STRIPE_SECTORS, 0, 0);
2822
2823
2824
2825 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2826 }
2827
2828 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2829 if (atomic_dec_and_test(&conf->pending_full_writes))
2830 md_wakeup_thread(conf->mddev->thread);
2831}
2832
2833static void
2834handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
2835 struct stripe_head_state *s)
2836{
2837 int abort = 0;
2838 int i;
2839
2840 clear_bit(STRIPE_SYNCING, &sh->state);
2841 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
2842 wake_up(&conf->wait_for_overlap);
2843 s->syncing = 0;
2844 s->replacing = 0;
2845
2846
2847
2848
2849
2850
2851
2852 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
2853
2854
2855
2856 for (i = 0; i < conf->raid_disks; i++) {
2857 struct md_rdev *rdev = conf->disks[i].rdev;
2858 if (rdev
2859 && !test_bit(Faulty, &rdev->flags)
2860 && !test_bit(In_sync, &rdev->flags)
2861 && !rdev_set_badblocks(rdev, sh->sector,
2862 STRIPE_SECTORS, 0))
2863 abort = 1;
2864 rdev = conf->disks[i].replacement;
2865 if (rdev
2866 && !test_bit(Faulty, &rdev->flags)
2867 && !test_bit(In_sync, &rdev->flags)
2868 && !rdev_set_badblocks(rdev, sh->sector,
2869 STRIPE_SECTORS, 0))
2870 abort = 1;
2871 }
2872 if (abort)
2873 conf->recovery_disabled =
2874 conf->mddev->recovery_disabled;
2875 }
2876 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
2877}
2878
2879static int want_replace(struct stripe_head *sh, int disk_idx)
2880{
2881 struct md_rdev *rdev;
2882 int rv = 0;
2883
2884 rdev = sh->raid_conf->disks[disk_idx].replacement;
2885 if (rdev
2886 && !test_bit(Faulty, &rdev->flags)
2887 && !test_bit(In_sync, &rdev->flags)
2888 && (rdev->recovery_offset <= sh->sector
2889 || rdev->mddev->recovery_cp <= sh->sector))
2890 rv = 1;
2891
2892 return rv;
2893}
2894
2895
2896
2897
2898
2899
2900
2901static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
2902 int disk_idx, int disks)
2903{
2904 struct r5dev *dev = &sh->dev[disk_idx];
2905 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
2906 &sh->dev[s->failed_num[1]] };
2907
2908
2909 if (!test_bit(R5_LOCKED, &dev->flags) &&
2910 !test_bit(R5_UPTODATE, &dev->flags) &&
2911 (dev->toread ||
2912 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2913 s->syncing || s->expanding ||
2914 (s->replacing && want_replace(sh, disk_idx)) ||
2915 (s->failed >= 1 && fdev[0]->toread) ||
2916 (s->failed >= 2 && fdev[1]->toread) ||
2917 (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
2918 (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) &&
2919 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
2920 ((sh->raid_conf->level == 6 ||
2921 sh->sector >= sh->raid_conf->mddev->recovery_cp)
2922 && s->failed && s->to_write &&
2923 (s->to_write - s->non_overwrite <
2924 sh->raid_conf->raid_disks - sh->raid_conf->max_degraded) &&
2925 (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) {
2926
2927
2928
2929 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2930 BUG_ON(test_bit(R5_Wantread, &dev->flags));
2931 if ((s->uptodate == disks - 1) &&
2932 (s->failed && (disk_idx == s->failed_num[0] ||
2933 disk_idx == s->failed_num[1]))) {
2934
2935
2936
2937 pr_debug("Computing stripe %llu block %d\n",
2938 (unsigned long long)sh->sector, disk_idx);
2939 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2940 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2941 set_bit(R5_Wantcompute, &dev->flags);
2942 sh->ops.target = disk_idx;
2943 sh->ops.target2 = -1;
2944 s->req_compute = 1;
2945
2946
2947
2948
2949
2950
2951 s->uptodate++;
2952 return 1;
2953 } else if (s->uptodate == disks-2 && s->failed >= 2) {
2954
2955
2956
2957 int other;
2958 for (other = disks; other--; ) {
2959 if (other == disk_idx)
2960 continue;
2961 if (!test_bit(R5_UPTODATE,
2962 &sh->dev[other].flags))
2963 break;
2964 }
2965 BUG_ON(other < 0);
2966 pr_debug("Computing stripe %llu blocks %d,%d\n",
2967 (unsigned long long)sh->sector,
2968 disk_idx, other);
2969 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2970 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2971 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
2972 set_bit(R5_Wantcompute, &sh->dev[other].flags);
2973 sh->ops.target = disk_idx;
2974 sh->ops.target2 = other;
2975 s->uptodate += 2;
2976 s->req_compute = 1;
2977 return 1;
2978 } else if (test_bit(R5_Insync, &dev->flags)) {
2979 set_bit(R5_LOCKED, &dev->flags);
2980 set_bit(R5_Wantread, &dev->flags);
2981 s->locked++;
2982 pr_debug("Reading block %d (sync=%d)\n",
2983 disk_idx, s->syncing);
2984 }
2985 }
2986
2987 return 0;
2988}
2989
2990
2991
2992
2993static void handle_stripe_fill(struct stripe_head *sh,
2994 struct stripe_head_state *s,
2995 int disks)
2996{
2997 int i;
2998
2999
3000
3001
3002
3003 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
3004 !sh->reconstruct_state)
3005 for (i = disks; i--; )
3006 if (fetch_block(sh, s, i, disks))
3007 break;
3008 set_bit(STRIPE_HANDLE, &sh->state);
3009}
3010
3011
3012
3013
3014
3015
3016static void handle_stripe_clean_event(struct r5conf *conf,
3017 struct stripe_head *sh, int disks, struct bio **return_bi)
3018{
3019 int i;
3020 struct r5dev *dev;
3021 int discard_pending = 0;
3022
3023 for (i = disks; i--; )
3024 if (sh->dev[i].written) {
3025 dev = &sh->dev[i];
3026 if (!test_bit(R5_LOCKED, &dev->flags) &&
3027 (test_bit(R5_UPTODATE, &dev->flags) ||
3028 test_bit(R5_Discard, &dev->flags) ||
3029 test_bit(R5_SkipCopy, &dev->flags))) {
3030
3031 struct bio *wbi, *wbi2;
3032 pr_debug("Return write for disc %d\n", i);
3033 if (test_and_clear_bit(R5_Discard, &dev->flags))
3034 clear_bit(R5_UPTODATE, &dev->flags);
3035 if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
3036 WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
3037 dev->page = dev->orig_page;
3038 }
3039 wbi = dev->written;
3040 dev->written = NULL;
3041 while (wbi && wbi->bi_iter.bi_sector <
3042 dev->sector + STRIPE_SECTORS) {
3043 wbi2 = r5_next_bio(wbi, dev->sector);
3044 if (!raid5_dec_bi_active_stripes(wbi)) {
3045 md_write_end(conf->mddev);
3046 wbi->bi_next = *return_bi;
3047 *return_bi = wbi;
3048 }
3049 wbi = wbi2;
3050 }
3051 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3052 STRIPE_SECTORS,
3053 !test_bit(STRIPE_DEGRADED, &sh->state),
3054 0);
3055 } else if (test_bit(R5_Discard, &dev->flags))
3056 discard_pending = 1;
3057 WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
3058 WARN_ON(dev->page != dev->orig_page);
3059 }
3060 if (!discard_pending &&
3061 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
3062 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
3063 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
3064 if (sh->qd_idx >= 0) {
3065 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
3066 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
3067 }
3068
3069 clear_bit(STRIPE_DISCARD, &sh->state);
3070
3071
3072
3073
3074
3075 spin_lock_irq(&conf->device_lock);
3076 remove_hash(sh);
3077 spin_unlock_irq(&conf->device_lock);
3078 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
3079 set_bit(STRIPE_HANDLE, &sh->state);
3080
3081 }
3082
3083 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
3084 if (atomic_dec_and_test(&conf->pending_full_writes))
3085 md_wakeup_thread(conf->mddev->thread);
3086}
3087
3088static void handle_stripe_dirtying(struct r5conf *conf,
3089 struct stripe_head *sh,
3090 struct stripe_head_state *s,
3091 int disks)
3092{
3093 int rmw = 0, rcw = 0, i;
3094 sector_t recovery_cp = conf->mddev->recovery_cp;
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104 if (conf->max_degraded == 2 ||
3105 (recovery_cp < MaxSector && sh->sector >= recovery_cp)) {
3106
3107
3108
3109 rcw = 1; rmw = 2;
3110 pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n",
3111 conf->max_degraded, (unsigned long long)recovery_cp,
3112 (unsigned long long)sh->sector);
3113 } else for (i = disks; i--; ) {
3114
3115 struct r5dev *dev = &sh->dev[i];
3116 if ((dev->towrite || i == sh->pd_idx) &&
3117 !test_bit(R5_LOCKED, &dev->flags) &&
3118 !(test_bit(R5_UPTODATE, &dev->flags) ||
3119 test_bit(R5_Wantcompute, &dev->flags))) {
3120 if (test_bit(R5_Insync, &dev->flags))
3121 rmw++;
3122 else
3123 rmw += 2*disks;
3124 }
3125
3126 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
3127 !test_bit(R5_LOCKED, &dev->flags) &&
3128 !(test_bit(R5_UPTODATE, &dev->flags) ||
3129 test_bit(R5_Wantcompute, &dev->flags))) {
3130 if (test_bit(R5_Insync, &dev->flags))
3131 rcw++;
3132 else
3133 rcw += 2*disks;
3134 }
3135 }
3136 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
3137 (unsigned long long)sh->sector, rmw, rcw);
3138 set_bit(STRIPE_HANDLE, &sh->state);
3139 if (rmw < rcw && rmw > 0) {
3140
3141 if (conf->mddev->queue)
3142 blk_add_trace_msg(conf->mddev->queue,
3143 "raid5 rmw %llu %d",
3144 (unsigned long long)sh->sector, rmw);
3145 for (i = disks; i--; ) {
3146 struct r5dev *dev = &sh->dev[i];
3147 if ((dev->towrite || i == sh->pd_idx) &&
3148 !test_bit(R5_LOCKED, &dev->flags) &&
3149 !(test_bit(R5_UPTODATE, &dev->flags) ||
3150 test_bit(R5_Wantcompute, &dev->flags)) &&
3151 test_bit(R5_Insync, &dev->flags)) {
3152 if (test_bit(STRIPE_PREREAD_ACTIVE,
3153 &sh->state)) {
3154 pr_debug("Read_old block %d for r-m-w\n",
3155 i);
3156 set_bit(R5_LOCKED, &dev->flags);
3157 set_bit(R5_Wantread, &dev->flags);
3158 s->locked++;
3159 } else {
3160 set_bit(STRIPE_DELAYED, &sh->state);
3161 set_bit(STRIPE_HANDLE, &sh->state);
3162 }
3163 }
3164 }
3165 }
3166 if (rcw <= rmw && rcw > 0) {
3167
3168 int qread =0;
3169 rcw = 0;
3170 for (i = disks; i--; ) {
3171 struct r5dev *dev = &sh->dev[i];
3172 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
3173 i != sh->pd_idx && i != sh->qd_idx &&
3174 !test_bit(R5_LOCKED, &dev->flags) &&
3175 !(test_bit(R5_UPTODATE, &dev->flags) ||
3176 test_bit(R5_Wantcompute, &dev->flags))) {
3177 rcw++;
3178 if (test_bit(R5_Insync, &dev->flags) &&
3179 test_bit(STRIPE_PREREAD_ACTIVE,
3180 &sh->state)) {
3181 pr_debug("Read_old block "
3182 "%d for Reconstruct\n", i);
3183 set_bit(R5_LOCKED, &dev->flags);
3184 set_bit(R5_Wantread, &dev->flags);
3185 s->locked++;
3186 qread++;
3187 } else {
3188 set_bit(STRIPE_DELAYED, &sh->state);
3189 set_bit(STRIPE_HANDLE, &sh->state);
3190 }
3191 }
3192 }
3193 if (rcw && conf->mddev->queue)
3194 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
3195 (unsigned long long)sh->sector,
3196 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
3197 }
3198
3199 if (rcw > disks && rmw > disks &&
3200 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3201 set_bit(STRIPE_DELAYED, &sh->state);
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
3214 (s->locked == 0 && (rcw == 0 || rmw == 0) &&
3215 !test_bit(STRIPE_BIT_DELAY, &sh->state)))
3216 schedule_reconstruction(sh, s, rcw == 0, 0);
3217}
3218
3219static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
3220 struct stripe_head_state *s, int disks)
3221{
3222 struct r5dev *dev = NULL;
3223
3224 set_bit(STRIPE_HANDLE, &sh->state);
3225
3226 switch (sh->check_state) {
3227 case check_state_idle:
3228
3229 if (s->failed == 0) {
3230 BUG_ON(s->uptodate != disks);
3231 sh->check_state = check_state_run;
3232 set_bit(STRIPE_OP_CHECK, &s->ops_request);
3233 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
3234 s->uptodate--;
3235 break;
3236 }
3237 dev = &sh->dev[s->failed_num[0]];
3238
3239 case check_state_compute_result:
3240 sh->check_state = check_state_idle;
3241 if (!dev)
3242 dev = &sh->dev[sh->pd_idx];
3243
3244
3245 if (test_bit(STRIPE_INSYNC, &sh->state))
3246 break;
3247
3248
3249 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
3250 BUG_ON(s->uptodate != disks);
3251
3252 set_bit(R5_LOCKED, &dev->flags);
3253 s->locked++;
3254 set_bit(R5_Wantwrite, &dev->flags);
3255
3256 clear_bit(STRIPE_DEGRADED, &sh->state);
3257 set_bit(STRIPE_INSYNC, &sh->state);
3258 break;
3259 case check_state_run:
3260 break;
3261 case check_state_check_result:
3262 sh->check_state = check_state_idle;
3263
3264
3265
3266
3267 if (s->failed)
3268 break;
3269
3270
3271
3272
3273
3274 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
3275
3276
3277
3278 set_bit(STRIPE_INSYNC, &sh->state);
3279 else {
3280 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
3281 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
3282
3283 set_bit(STRIPE_INSYNC, &sh->state);
3284 else {
3285 sh->check_state = check_state_compute_run;
3286 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3287 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3288 set_bit(R5_Wantcompute,
3289 &sh->dev[sh->pd_idx].flags);
3290 sh->ops.target = sh->pd_idx;
3291 sh->ops.target2 = -1;
3292 s->uptodate++;
3293 }
3294 }
3295 break;
3296 case check_state_compute_run:
3297 break;
3298 default:
3299 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
3300 __func__, sh->check_state,
3301 (unsigned long long) sh->sector);
3302 BUG();
3303 }
3304}
3305
3306static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
3307 struct stripe_head_state *s,
3308 int disks)
3309{
3310 int pd_idx = sh->pd_idx;
3311 int qd_idx = sh->qd_idx;
3312 struct r5dev *dev;
3313
3314 set_bit(STRIPE_HANDLE, &sh->state);
3315
3316 BUG_ON(s->failed > 2);
3317
3318
3319
3320
3321
3322
3323
3324 switch (sh->check_state) {
3325 case check_state_idle:
3326
3327 if (s->failed == s->q_failed) {
3328
3329
3330
3331
3332 sh->check_state = check_state_run;
3333 }
3334 if (!s->q_failed && s->failed < 2) {
3335
3336
3337
3338 if (sh->check_state == check_state_run)
3339 sh->check_state = check_state_run_pq;
3340 else
3341 sh->check_state = check_state_run_q;
3342 }
3343
3344
3345 sh->ops.zero_sum_result = 0;
3346
3347 if (sh->check_state == check_state_run) {
3348
3349 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
3350 s->uptodate--;
3351 }
3352 if (sh->check_state >= check_state_run &&
3353 sh->check_state <= check_state_run_pq) {
3354
3355
3356
3357 set_bit(STRIPE_OP_CHECK, &s->ops_request);
3358 break;
3359 }
3360
3361
3362 BUG_ON(s->failed != 2);
3363
3364 case check_state_compute_result:
3365 sh->check_state = check_state_idle;
3366
3367
3368 if (test_bit(STRIPE_INSYNC, &sh->state))
3369 break;
3370
3371
3372
3373
3374 BUG_ON(s->uptodate < disks - 1);
3375 if (s->failed == 2) {
3376 dev = &sh->dev[s->failed_num[1]];
3377 s->locked++;
3378 set_bit(R5_LOCKED, &dev->flags);
3379 set_bit(R5_Wantwrite, &dev->flags);
3380 }
3381 if (s->failed >= 1) {
3382 dev = &sh->dev[s->failed_num[0]];
3383 s->locked++;
3384 set_bit(R5_LOCKED, &dev->flags);
3385 set_bit(R5_Wantwrite, &dev->flags);
3386 }
3387 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3388 dev = &sh->dev[pd_idx];
3389 s->locked++;
3390 set_bit(R5_LOCKED, &dev->flags);
3391 set_bit(R5_Wantwrite, &dev->flags);
3392 }
3393 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3394 dev = &sh->dev[qd_idx];
3395 s->locked++;
3396 set_bit(R5_LOCKED, &dev->flags);
3397 set_bit(R5_Wantwrite, &dev->flags);
3398 }
3399 clear_bit(STRIPE_DEGRADED, &sh->state);
3400
3401 set_bit(STRIPE_INSYNC, &sh->state);
3402 break;
3403 case check_state_run:
3404 case check_state_run_q:
3405 case check_state_run_pq:
3406 break;
3407 case check_state_check_result:
3408 sh->check_state = check_state_idle;
3409
3410
3411
3412
3413
3414 if (sh->ops.zero_sum_result == 0) {
3415
3416 if (!s->failed)
3417 set_bit(STRIPE_INSYNC, &sh->state);
3418 else {
3419
3420
3421
3422
3423 sh->check_state = check_state_compute_result;
3424
3425
3426
3427
3428
3429 }
3430 } else {
3431 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
3432 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
3433
3434 set_bit(STRIPE_INSYNC, &sh->state);
3435 else {
3436 int *target = &sh->ops.target;
3437
3438 sh->ops.target = -1;
3439 sh->ops.target2 = -1;
3440 sh->check_state = check_state_compute_run;
3441 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3442 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3443 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3444 set_bit(R5_Wantcompute,
3445 &sh->dev[pd_idx].flags);
3446 *target = pd_idx;
3447 target = &sh->ops.target2;
3448 s->uptodate++;
3449 }
3450 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3451 set_bit(R5_Wantcompute,
3452 &sh->dev[qd_idx].flags);
3453 *target = qd_idx;
3454 s->uptodate++;
3455 }
3456 }
3457 }
3458 break;
3459 case check_state_compute_run:
3460 break;
3461 default:
3462 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
3463 __func__, sh->check_state,
3464 (unsigned long long) sh->sector);
3465 BUG();
3466 }
3467}
3468
3469static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
3470{
3471 int i;
3472
3473
3474
3475
3476 struct dma_async_tx_descriptor *tx = NULL;
3477 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3478 for (i = 0; i < sh->disks; i++)
3479 if (i != sh->pd_idx && i != sh->qd_idx) {
3480 int dd_idx, j;
3481 struct stripe_head *sh2;
3482 struct async_submit_ctl submit;
3483
3484 sector_t bn = compute_blocknr(sh, i, 1);
3485 sector_t s = raid5_compute_sector(conf, bn, 0,
3486 &dd_idx, NULL);
3487 sh2 = get_active_stripe(conf, s, 0, 1, 1);
3488 if (sh2 == NULL)
3489
3490
3491
3492
3493 continue;
3494 if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
3495 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
3496
3497 release_stripe(sh2);
3498 continue;
3499 }
3500
3501
3502 init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
3503 tx = async_memcpy(sh2->dev[dd_idx].page,
3504 sh->dev[i].page, 0, 0, STRIPE_SIZE,
3505 &submit);
3506
3507 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
3508 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
3509 for (j = 0; j < conf->raid_disks; j++)
3510 if (j != sh2->pd_idx &&
3511 j != sh2->qd_idx &&
3512 !test_bit(R5_Expanded, &sh2->dev[j].flags))
3513 break;
3514 if (j == conf->raid_disks) {
3515 set_bit(STRIPE_EXPAND_READY, &sh2->state);
3516 set_bit(STRIPE_HANDLE, &sh2->state);
3517 }
3518 release_stripe(sh2);
3519
3520 }
3521
3522 async_tx_quiesce(&tx);
3523}
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3540{
3541 struct r5conf *conf = sh->raid_conf;
3542 int disks = sh->disks;
3543 struct r5dev *dev;
3544 int i;
3545 int do_recovery = 0;
3546
3547 memset(s, 0, sizeof(*s));
3548
3549 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3550 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
3551 s->failed_num[0] = -1;
3552 s->failed_num[1] = -1;
3553
3554
3555 rcu_read_lock();
3556 for (i=disks; i--; ) {
3557 struct md_rdev *rdev;
3558 sector_t first_bad;
3559 int bad_sectors;
3560 int is_bad = 0;
3561
3562 dev = &sh->dev[i];
3563
3564 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3565 i, dev->flags,
3566 dev->toread, dev->towrite, dev->written);
3567
3568
3569
3570
3571
3572 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
3573 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
3574 set_bit(R5_Wantfill, &dev->flags);
3575
3576
3577 if (test_bit(R5_LOCKED, &dev->flags))
3578 s->locked++;
3579 if (test_bit(R5_UPTODATE, &dev->flags))
3580 s->uptodate++;
3581 if (test_bit(R5_Wantcompute, &dev->flags)) {
3582 s->compute++;
3583 BUG_ON(s->compute > 2);
3584 }
3585
3586 if (test_bit(R5_Wantfill, &dev->flags))
3587 s->to_fill++;
3588 else if (dev->toread)
3589 s->to_read++;
3590 if (dev->towrite) {
3591 s->to_write++;
3592 if (!test_bit(R5_OVERWRITE, &dev->flags))
3593 s->non_overwrite++;
3594 }
3595 if (dev->written)
3596 s->written++;
3597
3598
3599
3600 rdev = rcu_dereference(conf->disks[i].replacement);
3601 if (rdev && !test_bit(Faulty, &rdev->flags) &&
3602 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
3603 !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3604 &first_bad, &bad_sectors))
3605 set_bit(R5_ReadRepl, &dev->flags);
3606 else {
3607 if (rdev)
3608 set_bit(R5_NeedReplace, &dev->flags);
3609 rdev = rcu_dereference(conf->disks[i].rdev);
3610 clear_bit(R5_ReadRepl, &dev->flags);
3611 }
3612 if (rdev && test_bit(Faulty, &rdev->flags))
3613 rdev = NULL;
3614 if (rdev) {
3615 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3616 &first_bad, &bad_sectors);
3617 if (s->blocked_rdev == NULL
3618 && (test_bit(Blocked, &rdev->flags)
3619 || is_bad < 0)) {
3620 if (is_bad < 0)
3621 set_bit(BlockedBadBlocks,
3622 &rdev->flags);
3623 s->blocked_rdev = rdev;
3624 atomic_inc(&rdev->nr_pending);
3625 }
3626 }
3627 clear_bit(R5_Insync, &dev->flags);
3628 if (!rdev)
3629 ;
3630 else if (is_bad) {
3631
3632 if (!test_bit(WriteErrorSeen, &rdev->flags) &&
3633 test_bit(R5_UPTODATE, &dev->flags)) {
3634
3635
3636
3637 set_bit(R5_Insync, &dev->flags);
3638 set_bit(R5_ReadError, &dev->flags);
3639 }
3640 } else if (test_bit(In_sync, &rdev->flags))
3641 set_bit(R5_Insync, &dev->flags);
3642 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
3643
3644 set_bit(R5_Insync, &dev->flags);
3645 else if (test_bit(R5_UPTODATE, &dev->flags) &&
3646 test_bit(R5_Expanded, &dev->flags))
3647
3648
3649
3650
3651 set_bit(R5_Insync, &dev->flags);
3652
3653 if (test_bit(R5_WriteError, &dev->flags)) {
3654
3655
3656 struct md_rdev *rdev2 = rcu_dereference(
3657 conf->disks[i].rdev);
3658 if (rdev2 == rdev)
3659 clear_bit(R5_Insync, &dev->flags);
3660 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
3661 s->handle_bad_blocks = 1;
3662 atomic_inc(&rdev2->nr_pending);
3663 } else
3664 clear_bit(R5_WriteError, &dev->flags);
3665 }
3666 if (test_bit(R5_MadeGood, &dev->flags)) {
3667
3668
3669 struct md_rdev *rdev2 = rcu_dereference(
3670 conf->disks[i].rdev);
3671 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
3672 s->handle_bad_blocks = 1;
3673 atomic_inc(&rdev2->nr_pending);
3674 } else
3675 clear_bit(R5_MadeGood, &dev->flags);
3676 }
3677 if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
3678 struct md_rdev *rdev2 = rcu_dereference(
3679 conf->disks[i].replacement);
3680 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
3681 s->handle_bad_blocks = 1;
3682 atomic_inc(&rdev2->nr_pending);
3683 } else
3684 clear_bit(R5_MadeGoodRepl, &dev->flags);
3685 }
3686 if (!test_bit(R5_Insync, &dev->flags)) {
3687
3688 clear_bit(R5_ReadError, &dev->flags);
3689 clear_bit(R5_ReWrite, &dev->flags);
3690 }
3691 if (test_bit(R5_ReadError, &dev->flags))
3692 clear_bit(R5_Insync, &dev->flags);
3693 if (!test_bit(R5_Insync, &dev->flags)) {
3694 if (s->failed < 2)
3695 s->failed_num[s->failed] = i;
3696 s->failed++;
3697 if (rdev && !test_bit(Faulty, &rdev->flags))
3698 do_recovery = 1;
3699 }
3700 }
3701 if (test_bit(STRIPE_SYNCING, &sh->state)) {
3702
3703
3704
3705
3706
3707
3708
3709
3710 if (do_recovery ||
3711 sh->sector >= conf->mddev->recovery_cp ||
3712 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
3713 s->syncing = 1;
3714 else
3715 s->replacing = 1;
3716 }
3717 rcu_read_unlock();
3718}
3719
3720static void handle_stripe(struct stripe_head *sh)
3721{
3722 struct stripe_head_state s;
3723 struct r5conf *conf = sh->raid_conf;
3724 int i;
3725 int prexor;
3726 int disks = sh->disks;
3727 struct r5dev *pdev, *qdev;
3728
3729 clear_bit(STRIPE_HANDLE, &sh->state);
3730 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
3731
3732
3733 set_bit(STRIPE_HANDLE, &sh->state);
3734 return;
3735 }
3736
3737 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3738 spin_lock(&sh->stripe_lock);
3739
3740 if (!test_bit(STRIPE_DISCARD, &sh->state) &&
3741 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3742 set_bit(STRIPE_SYNCING, &sh->state);
3743 clear_bit(STRIPE_INSYNC, &sh->state);
3744 clear_bit(STRIPE_REPLACED, &sh->state);
3745 }
3746 spin_unlock(&sh->stripe_lock);
3747 }
3748 clear_bit(STRIPE_DELAYED, &sh->state);
3749
3750 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3751 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3752 (unsigned long long)sh->sector, sh->state,
3753 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
3754 sh->check_state, sh->reconstruct_state);
3755
3756 analyse_stripe(sh, &s);
3757
3758 if (s.handle_bad_blocks) {
3759 set_bit(STRIPE_HANDLE, &sh->state);
3760 goto finish;
3761 }
3762
3763 if (unlikely(s.blocked_rdev)) {
3764 if (s.syncing || s.expanding || s.expanded ||
3765 s.replacing || s.to_write || s.written) {
3766 set_bit(STRIPE_HANDLE, &sh->state);
3767 goto finish;
3768 }
3769
3770 rdev_dec_pending(s.blocked_rdev, conf->mddev);
3771 s.blocked_rdev = NULL;
3772 }
3773
3774 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3775 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3776 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3777 }
3778
3779 pr_debug("locked=%d uptodate=%d to_read=%d"
3780 " to_write=%d failed=%d failed_num=%d,%d\n",
3781 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3782 s.failed_num[0], s.failed_num[1]);
3783
3784
3785
3786 if (s.failed > conf->max_degraded) {
3787 sh->check_state = 0;
3788 sh->reconstruct_state = 0;
3789 if (s.to_read+s.to_write+s.written)
3790 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3791 if (s.syncing + s.replacing)
3792 handle_failed_sync(conf, sh, &s);
3793 }
3794
3795
3796
3797
3798 prexor = 0;
3799 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
3800 prexor = 1;
3801 if (sh->reconstruct_state == reconstruct_state_drain_result ||
3802 sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
3803 sh->reconstruct_state = reconstruct_state_idle;
3804
3805
3806
3807
3808 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
3809 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
3810 BUG_ON(sh->qd_idx >= 0 &&
3811 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
3812 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
3813 for (i = disks; i--; ) {
3814 struct r5dev *dev = &sh->dev[i];
3815 if (test_bit(R5_LOCKED, &dev->flags) &&
3816 (i == sh->pd_idx || i == sh->qd_idx ||
3817 dev->written)) {
3818 pr_debug("Writing block %d\n", i);
3819 set_bit(R5_Wantwrite, &dev->flags);
3820 if (prexor)
3821 continue;
3822 if (s.failed > 1)
3823 continue;
3824 if (!test_bit(R5_Insync, &dev->flags) ||
3825 ((i == sh->pd_idx || i == sh->qd_idx) &&
3826 s.failed == 0))
3827 set_bit(STRIPE_INSYNC, &sh->state);
3828 }
3829 }
3830 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3831 s.dec_preread_active = 1;
3832 }
3833
3834
3835
3836
3837
3838 pdev = &sh->dev[sh->pd_idx];
3839 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
3840 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
3841 qdev = &sh->dev[sh->qd_idx];
3842 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
3843 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
3844 || conf->level < 6;
3845
3846 if (s.written &&
3847 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3848 && !test_bit(R5_LOCKED, &pdev->flags)
3849 && (test_bit(R5_UPTODATE, &pdev->flags) ||
3850 test_bit(R5_Discard, &pdev->flags))))) &&
3851 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3852 && !test_bit(R5_LOCKED, &qdev->flags)
3853 && (test_bit(R5_UPTODATE, &qdev->flags) ||
3854 test_bit(R5_Discard, &qdev->flags))))))
3855 handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3856
3857
3858
3859
3860
3861 if (s.to_read || s.non_overwrite
3862 || (conf->level == 6 && s.to_write && s.failed)
3863 || (s.syncing && (s.uptodate + s.compute < disks))
3864 || s.replacing
3865 || s.expanding)
3866 handle_stripe_fill(sh, &s, disks);
3867
3868
3869
3870
3871
3872
3873
3874 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3875 handle_stripe_dirtying(conf, sh, &s, disks);
3876
3877
3878
3879
3880
3881
3882 if (sh->check_state ||
3883 (s.syncing && s.locked == 0 &&
3884 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3885 !test_bit(STRIPE_INSYNC, &sh->state))) {
3886 if (conf->level == 6)
3887 handle_parity_checks6(conf, sh, &s, disks);
3888 else
3889 handle_parity_checks5(conf, sh, &s, disks);
3890 }
3891
3892 if ((s.replacing || s.syncing) && s.locked == 0
3893 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
3894 && !test_bit(STRIPE_REPLACED, &sh->state)) {
3895
3896 for (i = 0; i < conf->raid_disks; i++)
3897 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
3898 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
3899 set_bit(R5_WantReplace, &sh->dev[i].flags);
3900 set_bit(R5_LOCKED, &sh->dev[i].flags);
3901 s.locked++;
3902 }
3903 if (s.replacing)
3904 set_bit(STRIPE_INSYNC, &sh->state);
3905 set_bit(STRIPE_REPLACED, &sh->state);
3906 }
3907 if ((s.syncing || s.replacing) && s.locked == 0 &&
3908 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3909 test_bit(STRIPE_INSYNC, &sh->state)) {
3910 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3911 clear_bit(STRIPE_SYNCING, &sh->state);
3912 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
3913 wake_up(&conf->wait_for_overlap);
3914 }
3915
3916
3917
3918
3919 if (s.failed <= conf->max_degraded && !conf->mddev->ro)
3920 for (i = 0; i < s.failed; i++) {
3921 struct r5dev *dev = &sh->dev[s.failed_num[i]];
3922 if (test_bit(R5_ReadError, &dev->flags)
3923 && !test_bit(R5_LOCKED, &dev->flags)
3924 && test_bit(R5_UPTODATE, &dev->flags)
3925 ) {
3926 if (!test_bit(R5_ReWrite, &dev->flags)) {
3927 set_bit(R5_Wantwrite, &dev->flags);
3928 set_bit(R5_ReWrite, &dev->flags);
3929 set_bit(R5_LOCKED, &dev->flags);
3930 s.locked++;
3931 } else {
3932
3933 set_bit(R5_Wantread, &dev->flags);
3934 set_bit(R5_LOCKED, &dev->flags);
3935 s.locked++;
3936 }
3937 }
3938 }
3939
3940
3941 if (sh->reconstruct_state == reconstruct_state_result) {
3942 struct stripe_head *sh_src
3943 = get_active_stripe(conf, sh->sector, 1, 1, 1);
3944 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
3945
3946
3947
3948 set_bit(STRIPE_DELAYED, &sh->state);
3949 set_bit(STRIPE_HANDLE, &sh->state);
3950 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3951 &sh_src->state))
3952 atomic_inc(&conf->preread_active_stripes);
3953 release_stripe(sh_src);
3954 goto finish;
3955 }
3956 if (sh_src)
3957 release_stripe(sh_src);
3958
3959 sh->reconstruct_state = reconstruct_state_idle;
3960 clear_bit(STRIPE_EXPANDING, &sh->state);
3961 for (i = conf->raid_disks; i--; ) {
3962 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3963 set_bit(R5_LOCKED, &sh->dev[i].flags);
3964 s.locked++;
3965 }
3966 }
3967
3968 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3969 !sh->reconstruct_state) {
3970
3971 sh->disks = conf->raid_disks;
3972 stripe_set_idx(sh->sector, conf, 0, sh);
3973 schedule_reconstruction(sh, &s, 1, 1);
3974 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3975 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3976 atomic_dec(&conf->reshape_stripes);
3977 wake_up(&conf->wait_for_overlap);
3978 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3979 }
3980
3981 if (s.expanding && s.locked == 0 &&
3982 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3983 handle_stripe_expansion(conf, sh);
3984
3985finish:
3986
3987 if (unlikely(s.blocked_rdev)) {
3988 if (conf->mddev->external)
3989 md_wait_for_blocked_rdev(s.blocked_rdev,
3990 conf->mddev);
3991 else
3992
3993
3994
3995
3996 rdev_dec_pending(s.blocked_rdev,
3997 conf->mddev);
3998 }
3999
4000 if (s.handle_bad_blocks)
4001 for (i = disks; i--; ) {
4002 struct md_rdev *rdev;
4003 struct r5dev *dev = &sh->dev[i];
4004 if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
4005
4006 rdev = conf->disks[i].rdev;
4007 if (!rdev_set_badblocks(rdev, sh->sector,
4008 STRIPE_SECTORS, 0))
4009 md_error(conf->mddev, rdev);
4010 rdev_dec_pending(rdev, conf->mddev);
4011 }
4012 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
4013 rdev = conf->disks[i].rdev;
4014 rdev_clear_badblocks(rdev, sh->sector,
4015 STRIPE_SECTORS, 0);
4016 rdev_dec_pending(rdev, conf->mddev);
4017 }
4018 if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
4019 rdev = conf->disks[i].replacement;
4020 if (!rdev)
4021
4022 rdev = conf->disks[i].rdev;
4023 rdev_clear_badblocks(rdev, sh->sector,
4024 STRIPE_SECTORS, 0);
4025 rdev_dec_pending(rdev, conf->mddev);
4026 }
4027 }
4028
4029 if (s.ops_request)
4030 raid_run_ops(sh, s.ops_request);
4031
4032 ops_run_io(sh, &s);
4033
4034 if (s.dec_preread_active) {
4035
4036
4037
4038
4039 atomic_dec(&conf->preread_active_stripes);
4040 if (atomic_read(&conf->preread_active_stripes) <
4041 IO_THRESHOLD)
4042 md_wakeup_thread(conf->mddev->thread);
4043 }
4044
4045 return_io(s.return_bi);
4046
4047 clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
4048}
4049
4050static void raid5_activate_delayed(struct r5conf *conf)
4051{
4052 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
4053 while (!list_empty(&conf->delayed_list)) {
4054 struct list_head *l = conf->delayed_list.next;
4055 struct stripe_head *sh;
4056 sh = list_entry(l, struct stripe_head, lru);
4057 list_del_init(l);
4058 clear_bit(STRIPE_DELAYED, &sh->state);
4059 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4060 atomic_inc(&conf->preread_active_stripes);
4061 list_add_tail(&sh->lru, &conf->hold_list);
4062 raid5_wakeup_stripe_thread(sh);
4063 }
4064 }
4065}
4066
4067static void activate_bit_delay(struct r5conf *conf,
4068 struct list_head *temp_inactive_list)
4069{
4070
4071 struct list_head head;
4072 list_add(&head, &conf->bitmap_list);
4073 list_del_init(&conf->bitmap_list);
4074 while (!list_empty(&head)) {
4075 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
4076 int hash;
4077 list_del_init(&sh->lru);
4078 atomic_inc(&sh->count);
4079 hash = sh->hash_lock_index;
4080 __release_stripe(conf, sh, &temp_inactive_list[hash]);
4081 }
4082}
4083
4084int md_raid5_congested(struct mddev *mddev, int bits)
4085{
4086 struct r5conf *conf = mddev->private;
4087
4088
4089
4090
4091
4092 if (conf->inactive_blocked)
4093 return 1;
4094 if (conf->quiesce)
4095 return 1;
4096 if (atomic_read(&conf->empty_inactive_list_nr))
4097 return 1;
4098
4099 return 0;
4100}
4101EXPORT_SYMBOL_GPL(md_raid5_congested);
4102
4103static int raid5_congested(void *data, int bits)
4104{
4105 struct mddev *mddev = data;
4106
4107 return mddev_congested(mddev, bits) ||
4108 md_raid5_congested(mddev, bits);
4109}
4110
4111
4112
4113
4114static int raid5_mergeable_bvec(struct request_queue *q,
4115 struct bvec_merge_data *bvm,
4116 struct bio_vec *biovec)
4117{
4118 struct mddev *mddev = q->queuedata;
4119 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
4120 int max;
4121 unsigned int chunk_sectors = mddev->chunk_sectors;
4122 unsigned int bio_sectors = bvm->bi_size >> 9;
4123
4124 if ((bvm->bi_rw & 1) == WRITE)
4125 return biovec->bv_len;
4126
4127 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
4128 chunk_sectors = mddev->new_chunk_sectors;
4129 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
4130 if (max < 0) max = 0;
4131 if (max <= biovec->bv_len && bio_sectors == 0)
4132 return biovec->bv_len;
4133 else
4134 return max;
4135}
4136
4137static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
4138{
4139 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
4140 unsigned int chunk_sectors = mddev->chunk_sectors;
4141 unsigned int bio_sectors = bio_sectors(bio);
4142
4143 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
4144 chunk_sectors = mddev->new_chunk_sectors;
4145 return chunk_sectors >=
4146 ((sector & (chunk_sectors - 1)) + bio_sectors);
4147}
4148
4149
4150
4151
4152
4153static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
4154{
4155 unsigned long flags;
4156
4157 spin_lock_irqsave(&conf->device_lock, flags);
4158
4159 bi->bi_next = conf->retry_read_aligned_list;
4160 conf->retry_read_aligned_list = bi;
4161
4162 spin_unlock_irqrestore(&conf->device_lock, flags);
4163 md_wakeup_thread(conf->mddev->thread);
4164}
4165
4166static struct bio *remove_bio_from_retry(struct r5conf *conf)
4167{
4168 struct bio *bi;
4169
4170 bi = conf->retry_read_aligned;
4171 if (bi) {
4172 conf->retry_read_aligned = NULL;
4173 return bi;
4174 }
4175 bi = conf->retry_read_aligned_list;
4176 if(bi) {
4177 conf->retry_read_aligned_list = bi->bi_next;
4178 bi->bi_next = NULL;
4179
4180
4181
4182
4183 raid5_set_bi_stripes(bi, 1);
4184 }
4185
4186 return bi;
4187}
4188
4189
4190
4191
4192
4193
4194
4195static void raid5_align_endio(struct bio *bi, int error)
4196{
4197 struct bio* raid_bi = bi->bi_private;
4198 struct mddev *mddev;
4199 struct r5conf *conf;
4200 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
4201 struct md_rdev *rdev;
4202
4203 bio_put(bi);
4204
4205 rdev = (void*)raid_bi->bi_next;
4206 raid_bi->bi_next = NULL;
4207 mddev = rdev->mddev;
4208 conf = mddev->private;
4209
4210 rdev_dec_pending(rdev, conf->mddev);
4211
4212 if (!error && uptodate) {
4213 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
4214 raid_bi, 0);
4215 bio_endio(raid_bi, 0);
4216 if (atomic_dec_and_test(&conf->active_aligned_reads))
4217 wake_up(&conf->wait_for_stripe);
4218 return;
4219 }
4220
4221 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
4222
4223 add_bio_to_retry(raid_bi, conf);
4224}
4225
4226static int bio_fits_rdev(struct bio *bi)
4227{
4228 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
4229
4230 if (bio_sectors(bi) > queue_max_sectors(q))
4231 return 0;
4232 blk_recount_segments(q, bi);
4233 if (bi->bi_phys_segments > queue_max_segments(q))
4234 return 0;
4235
4236 if (q->merge_bvec_fn)
4237
4238
4239
4240 return 0;
4241
4242 return 1;
4243}
4244
4245static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4246{
4247 struct r5conf *conf = mddev->private;
4248 int dd_idx;
4249 struct bio* align_bi;
4250 struct md_rdev *rdev;
4251 sector_t end_sector;
4252
4253 if (!in_chunk_boundary(mddev, raid_bio)) {
4254 pr_debug("chunk_aligned_read : non aligned\n");
4255 return 0;
4256 }
4257
4258
4259
4260 align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
4261 if (!align_bi)
4262 return 0;
4263
4264
4265
4266
4267 align_bi->bi_end_io = raid5_align_endio;
4268 align_bi->bi_private = raid_bio;
4269
4270
4271
4272 align_bi->bi_iter.bi_sector =
4273 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
4274 0, &dd_idx, NULL);
4275
4276 end_sector = bio_end_sector(align_bi);
4277 rcu_read_lock();
4278 rdev = rcu_dereference(conf->disks[dd_idx].replacement);
4279 if (!rdev || test_bit(Faulty, &rdev->flags) ||
4280 rdev->recovery_offset < end_sector) {
4281 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
4282 if (rdev &&
4283 (test_bit(Faulty, &rdev->flags) ||
4284 !(test_bit(In_sync, &rdev->flags) ||
4285 rdev->recovery_offset >= end_sector)))
4286 rdev = NULL;
4287 }
4288 if (rdev) {
4289 sector_t first_bad;
4290 int bad_sectors;
4291
4292 atomic_inc(&rdev->nr_pending);
4293 rcu_read_unlock();
4294 raid_bio->bi_next = (void*)rdev;
4295 align_bi->bi_bdev = rdev->bdev;
4296 __clear_bit(BIO_SEG_VALID, &align_bi->bi_flags);
4297
4298 if (!bio_fits_rdev(align_bi) ||
4299 is_badblock(rdev, align_bi->bi_iter.bi_sector,
4300 bio_sectors(align_bi),
4301 &first_bad, &bad_sectors)) {
4302
4303 bio_put(align_bi);
4304 rdev_dec_pending(rdev, mddev);
4305 return 0;
4306 }
4307
4308
4309 align_bi->bi_iter.bi_sector += rdev->data_offset;
4310
4311 spin_lock_irq(&conf->device_lock);
4312 wait_event_lock_irq(conf->wait_for_stripe,
4313 conf->quiesce == 0,
4314 conf->device_lock);
4315 atomic_inc(&conf->active_aligned_reads);
4316 spin_unlock_irq(&conf->device_lock);
4317
4318 if (mddev->gendisk)
4319 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
4320 align_bi, disk_devt(mddev->gendisk),
4321 raid_bio->bi_iter.bi_sector);
4322 generic_make_request(align_bi);
4323 return 1;
4324 } else {
4325 rcu_read_unlock();
4326 bio_put(align_bi);
4327 return 0;
4328 }
4329}
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
4342{
4343 struct stripe_head *sh = NULL, *tmp;
4344 struct list_head *handle_list = NULL;
4345 struct r5worker_group *wg = NULL;
4346
4347 if (conf->worker_cnt_per_group == 0) {
4348 handle_list = &conf->handle_list;
4349 } else if (group != ANY_GROUP) {
4350 handle_list = &conf->worker_groups[group].handle_list;
4351 wg = &conf->worker_groups[group];
4352 } else {
4353 int i;
4354 for (i = 0; i < conf->group_cnt; i++) {
4355 handle_list = &conf->worker_groups[i].handle_list;
4356 wg = &conf->worker_groups[i];
4357 if (!list_empty(handle_list))
4358 break;
4359 }
4360 }
4361
4362 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
4363 __func__,
4364 list_empty(handle_list) ? "empty" : "busy",
4365 list_empty(&conf->hold_list) ? "empty" : "busy",
4366 atomic_read(&conf->pending_full_writes), conf->bypass_count);
4367
4368 if (!list_empty(handle_list)) {
4369 sh = list_entry(handle_list->next, typeof(*sh), lru);
4370
4371 if (list_empty(&conf->hold_list))
4372 conf->bypass_count = 0;
4373 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
4374 if (conf->hold_list.next == conf->last_hold)
4375 conf->bypass_count++;
4376 else {
4377 conf->last_hold = conf->hold_list.next;
4378 conf->bypass_count -= conf->bypass_threshold;
4379 if (conf->bypass_count < 0)
4380 conf->bypass_count = 0;
4381 }
4382 }
4383 } else if (!list_empty(&conf->hold_list) &&
4384 ((conf->bypass_threshold &&
4385 conf->bypass_count > conf->bypass_threshold) ||
4386 atomic_read(&conf->pending_full_writes) == 0)) {
4387
4388 list_for_each_entry(tmp, &conf->hold_list, lru) {
4389 if (conf->worker_cnt_per_group == 0 ||
4390 group == ANY_GROUP ||
4391 !cpu_online(tmp->cpu) ||
4392 cpu_to_group(tmp->cpu) == group) {
4393 sh = tmp;
4394 break;
4395 }
4396 }
4397
4398 if (sh) {
4399 conf->bypass_count -= conf->bypass_threshold;
4400 if (conf->bypass_count < 0)
4401 conf->bypass_count = 0;
4402 }
4403 wg = NULL;
4404 }
4405
4406 if (!sh)
4407 return NULL;
4408
4409 if (wg) {
4410 wg->stripes_cnt--;
4411 sh->group = NULL;
4412 }
4413 list_del_init(&sh->lru);
4414 BUG_ON(atomic_inc_return(&sh->count) != 1);
4415 return sh;
4416}
4417
4418struct raid5_plug_cb {
4419 struct blk_plug_cb cb;
4420 struct list_head list;
4421 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
4422};
4423
4424static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
4425{
4426 struct raid5_plug_cb *cb = container_of(
4427 blk_cb, struct raid5_plug_cb, cb);
4428 struct stripe_head *sh;
4429 struct mddev *mddev = cb->cb.data;
4430 struct r5conf *conf = mddev->private;
4431 int cnt = 0;
4432 int hash;
4433
4434 if (cb->list.next && !list_empty(&cb->list)) {
4435 spin_lock_irq(&conf->device_lock);
4436 while (!list_empty(&cb->list)) {
4437 sh = list_first_entry(&cb->list, struct stripe_head, lru);
4438 list_del_init(&sh->lru);
4439
4440
4441
4442
4443
4444 smp_mb__before_atomic();
4445 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
4446
4447
4448
4449
4450 hash = sh->hash_lock_index;
4451 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
4452 cnt++;
4453 }
4454 spin_unlock_irq(&conf->device_lock);
4455 }
4456 release_inactive_stripe_list(conf, cb->temp_inactive_list,
4457 NR_STRIPE_HASH_LOCKS);
4458 if (mddev->queue)
4459 trace_block_unplug(mddev->queue, cnt, !from_schedule);
4460 kfree(cb);
4461}
4462
4463static void release_stripe_plug(struct mddev *mddev,
4464 struct stripe_head *sh)
4465{
4466 struct blk_plug_cb *blk_cb = blk_check_plugged(
4467 raid5_unplug, mddev,
4468 sizeof(struct raid5_plug_cb));
4469 struct raid5_plug_cb *cb;
4470
4471 if (!blk_cb) {
4472 release_stripe(sh);
4473 return;
4474 }
4475
4476 cb = container_of(blk_cb, struct raid5_plug_cb, cb);
4477
4478 if (cb->list.next == NULL) {
4479 int i;
4480 INIT_LIST_HEAD(&cb->list);
4481 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
4482 INIT_LIST_HEAD(cb->temp_inactive_list + i);
4483 }
4484
4485 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
4486 list_add_tail(&sh->lru, &cb->list);
4487 else
4488 release_stripe(sh);
4489}
4490
4491static void make_discard_request(struct mddev *mddev, struct bio *bi)
4492{
4493 struct r5conf *conf = mddev->private;
4494 sector_t logical_sector, last_sector;
4495 struct stripe_head *sh;
4496 int remaining;
4497 int stripe_sectors;
4498
4499 if (mddev->reshape_position != MaxSector)
4500
4501 return;
4502
4503 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4504 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
4505
4506 bi->bi_next = NULL;
4507 bi->bi_phys_segments = 1;
4508
4509 stripe_sectors = conf->chunk_sectors *
4510 (conf->raid_disks - conf->max_degraded);
4511 logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
4512 stripe_sectors);
4513 sector_div(last_sector, stripe_sectors);
4514
4515 logical_sector *= conf->chunk_sectors;
4516 last_sector *= conf->chunk_sectors;
4517
4518 for (; logical_sector < last_sector;
4519 logical_sector += STRIPE_SECTORS) {
4520 DEFINE_WAIT(w);
4521 int d;
4522 again:
4523 sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
4524 prepare_to_wait(&conf->wait_for_overlap, &w,
4525 TASK_UNINTERRUPTIBLE);
4526 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
4527 if (test_bit(STRIPE_SYNCING, &sh->state)) {
4528 release_stripe(sh);
4529 schedule();
4530 goto again;
4531 }
4532 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
4533 spin_lock_irq(&sh->stripe_lock);
4534 for (d = 0; d < conf->raid_disks; d++) {
4535 if (d == sh->pd_idx || d == sh->qd_idx)
4536 continue;
4537 if (sh->dev[d].towrite || sh->dev[d].toread) {
4538 set_bit(R5_Overlap, &sh->dev[d].flags);
4539 spin_unlock_irq(&sh->stripe_lock);
4540 release_stripe(sh);
4541 schedule();
4542 goto again;
4543 }
4544 }
4545 set_bit(STRIPE_DISCARD, &sh->state);
4546 finish_wait(&conf->wait_for_overlap, &w);
4547 for (d = 0; d < conf->raid_disks; d++) {
4548 if (d == sh->pd_idx || d == sh->qd_idx)
4549 continue;
4550 sh->dev[d].towrite = bi;
4551 set_bit(R5_OVERWRITE, &sh->dev[d].flags);
4552 raid5_inc_bi_active_stripes(bi);
4553 }
4554 spin_unlock_irq(&sh->stripe_lock);
4555 if (conf->mddev->bitmap) {
4556 for (d = 0;
4557 d < conf->raid_disks - conf->max_degraded;
4558 d++)
4559 bitmap_startwrite(mddev->bitmap,
4560 sh->sector,
4561 STRIPE_SECTORS,
4562 0);
4563 sh->bm_seq = conf->seq_flush + 1;
4564 set_bit(STRIPE_BIT_DELAY, &sh->state);
4565 }
4566
4567 set_bit(STRIPE_HANDLE, &sh->state);
4568 clear_bit(STRIPE_DELAYED, &sh->state);
4569 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4570 atomic_inc(&conf->preread_active_stripes);
4571 release_stripe_plug(mddev, sh);
4572 }
4573
4574 remaining = raid5_dec_bi_active_stripes(bi);
4575 if (remaining == 0) {
4576 md_write_end(mddev);
4577 bio_endio(bi, 0);
4578 }
4579}
4580
4581static void make_request(struct mddev *mddev, struct bio * bi)
4582{
4583 struct r5conf *conf = mddev->private;
4584 int dd_idx;
4585 sector_t new_sector;
4586 sector_t logical_sector, last_sector;
4587 struct stripe_head *sh;
4588 const int rw = bio_data_dir(bi);
4589 int remaining;
4590 DEFINE_WAIT(w);
4591 bool do_prepare;
4592
4593 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
4594 md_flush_request(mddev, bi);
4595 return;
4596 }
4597
4598 md_write_start(mddev, bi);
4599
4600 if (rw == READ &&
4601 mddev->reshape_position == MaxSector &&
4602 chunk_aligned_read(mddev,bi))
4603 return;
4604
4605 if (unlikely(bi->bi_rw & REQ_DISCARD)) {
4606 make_discard_request(mddev, bi);
4607 return;
4608 }
4609
4610 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4611 last_sector = bio_end_sector(bi);
4612 bi->bi_next = NULL;
4613 bi->bi_phys_segments = 1;
4614
4615 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
4616 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
4617 int previous;
4618 int seq;
4619
4620 do_prepare = false;
4621 retry:
4622 seq = read_seqcount_begin(&conf->gen_lock);
4623 previous = 0;
4624 if (do_prepare)
4625 prepare_to_wait(&conf->wait_for_overlap, &w,
4626 TASK_UNINTERRUPTIBLE);
4627 if (unlikely(conf->reshape_progress != MaxSector)) {
4628
4629
4630
4631
4632
4633
4634
4635
4636 spin_lock_irq(&conf->device_lock);
4637 if (mddev->reshape_backwards
4638 ? logical_sector < conf->reshape_progress
4639 : logical_sector >= conf->reshape_progress) {
4640 previous = 1;
4641 } else {
4642 if (mddev->reshape_backwards
4643 ? logical_sector < conf->reshape_safe
4644 : logical_sector >= conf->reshape_safe) {
4645 spin_unlock_irq(&conf->device_lock);
4646 schedule();
4647 do_prepare = true;
4648 goto retry;
4649 }
4650 }
4651 spin_unlock_irq(&conf->device_lock);
4652 }
4653
4654 new_sector = raid5_compute_sector(conf, logical_sector,
4655 previous,
4656 &dd_idx, NULL);
4657 pr_debug("raid456: make_request, sector %llu logical %llu\n",
4658 (unsigned long long)new_sector,
4659 (unsigned long long)logical_sector);
4660
4661 sh = get_active_stripe(conf, new_sector, previous,
4662 (bi->bi_rw&RWA_MASK), 0);
4663 if (sh) {
4664 if (unlikely(previous)) {
4665
4666
4667
4668
4669
4670
4671
4672
4673 int must_retry = 0;
4674 spin_lock_irq(&conf->device_lock);
4675 if (mddev->reshape_backwards
4676 ? logical_sector >= conf->reshape_progress
4677 : logical_sector < conf->reshape_progress)
4678
4679 must_retry = 1;
4680 spin_unlock_irq(&conf->device_lock);
4681 if (must_retry) {
4682 release_stripe(sh);
4683 schedule();
4684 do_prepare = true;
4685 goto retry;
4686 }
4687 }
4688 if (read_seqcount_retry(&conf->gen_lock, seq)) {
4689
4690
4691
4692 release_stripe(sh);
4693 goto retry;
4694 }
4695
4696 if (rw == WRITE &&
4697 logical_sector >= mddev->suspend_lo &&
4698 logical_sector < mddev->suspend_hi) {
4699 release_stripe(sh);
4700
4701
4702
4703
4704 flush_signals(current);
4705 prepare_to_wait(&conf->wait_for_overlap,
4706 &w, TASK_INTERRUPTIBLE);
4707 if (logical_sector >= mddev->suspend_lo &&
4708 logical_sector < mddev->suspend_hi) {
4709 schedule();
4710 do_prepare = true;
4711 }
4712 goto retry;
4713 }
4714
4715 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
4716 !add_stripe_bio(sh, bi, dd_idx, rw)) {
4717
4718
4719
4720
4721 md_wakeup_thread(mddev->thread);
4722 release_stripe(sh);
4723 schedule();
4724 do_prepare = true;
4725 goto retry;
4726 }
4727 set_bit(STRIPE_HANDLE, &sh->state);
4728 clear_bit(STRIPE_DELAYED, &sh->state);
4729 if ((bi->bi_rw & REQ_SYNC) &&
4730 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4731 atomic_inc(&conf->preread_active_stripes);
4732 release_stripe_plug(mddev, sh);
4733 } else {
4734
4735 clear_bit(BIO_UPTODATE, &bi->bi_flags);
4736 break;
4737 }
4738 }
4739 finish_wait(&conf->wait_for_overlap, &w);
4740
4741 remaining = raid5_dec_bi_active_stripes(bi);
4742 if (remaining == 0) {
4743
4744 if ( rw == WRITE )
4745 md_write_end(mddev);
4746
4747 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
4748 bi, 0);
4749 bio_endio(bi, 0);
4750 }
4751}
4752
4753static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
4754
4755static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
4756{
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766 struct r5conf *conf = mddev->private;
4767 struct stripe_head *sh;
4768 sector_t first_sector, last_sector;
4769 int raid_disks = conf->previous_raid_disks;
4770 int data_disks = raid_disks - conf->max_degraded;
4771 int new_data_disks = conf->raid_disks - conf->max_degraded;
4772 int i;
4773 int dd_idx;
4774 sector_t writepos, readpos, safepos;
4775 sector_t stripe_addr;
4776 int reshape_sectors;
4777 struct list_head stripes;
4778
4779 if (sector_nr == 0) {
4780
4781 if (mddev->reshape_backwards &&
4782 conf->reshape_progress < raid5_size(mddev, 0, 0)) {
4783 sector_nr = raid5_size(mddev, 0, 0)
4784 - conf->reshape_progress;
4785 } else if (!mddev->reshape_backwards &&
4786 conf->reshape_progress > 0)
4787 sector_nr = conf->reshape_progress;
4788 sector_div(sector_nr, new_data_disks);
4789 if (sector_nr) {
4790 mddev->curr_resync_completed = sector_nr;
4791 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4792 *skipped = 1;
4793 return sector_nr;
4794 }
4795 }
4796
4797
4798
4799
4800
4801 if (mddev->new_chunk_sectors > mddev->chunk_sectors)
4802 reshape_sectors = mddev->new_chunk_sectors;
4803 else
4804 reshape_sectors = mddev->chunk_sectors;
4805
4806
4807
4808
4809
4810
4811
4812 writepos = conf->reshape_progress;
4813 sector_div(writepos, new_data_disks);
4814 readpos = conf->reshape_progress;
4815 sector_div(readpos, data_disks);
4816 safepos = conf->reshape_safe;
4817 sector_div(safepos, data_disks);
4818 if (mddev->reshape_backwards) {
4819 writepos -= min_t(sector_t, reshape_sectors, writepos);
4820 readpos += reshape_sectors;
4821 safepos += reshape_sectors;
4822 } else {
4823 writepos += reshape_sectors;
4824 readpos -= min_t(sector_t, reshape_sectors, readpos);
4825 safepos -= min_t(sector_t, reshape_sectors, safepos);
4826 }
4827
4828
4829
4830
4831 if (mddev->reshape_backwards) {
4832 BUG_ON(conf->reshape_progress == 0);
4833 stripe_addr = writepos;
4834 BUG_ON((mddev->dev_sectors &
4835 ~((sector_t)reshape_sectors - 1))
4836 - reshape_sectors - stripe_addr
4837 != sector_nr);
4838 } else {
4839 BUG_ON(writepos != sector_nr + reshape_sectors);
4840 stripe_addr = sector_nr;
4841 }
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863 if (conf->min_offset_diff < 0) {
4864 safepos += -conf->min_offset_diff;
4865 readpos += -conf->min_offset_diff;
4866 } else
4867 writepos += conf->min_offset_diff;
4868
4869 if ((mddev->reshape_backwards
4870 ? (safepos > writepos && readpos < writepos)
4871 : (safepos < writepos && readpos > writepos)) ||
4872 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4873
4874 wait_event(conf->wait_for_overlap,
4875 atomic_read(&conf->reshape_stripes)==0
4876 || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4877 if (atomic_read(&conf->reshape_stripes) != 0)
4878 return 0;
4879 mddev->reshape_position = conf->reshape_progress;
4880 mddev->curr_resync_completed = sector_nr;
4881 conf->reshape_checkpoint = jiffies;
4882 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4883 md_wakeup_thread(mddev->thread);
4884 wait_event(mddev->sb_wait, mddev->flags == 0 ||
4885 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4886 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4887 return 0;
4888 spin_lock_irq(&conf->device_lock);
4889 conf->reshape_safe = mddev->reshape_position;
4890 spin_unlock_irq(&conf->device_lock);
4891 wake_up(&conf->wait_for_overlap);
4892 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4893 }
4894
4895 INIT_LIST_HEAD(&stripes);
4896 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
4897 int j;
4898 int skipped_disk = 0;
4899 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
4900 set_bit(STRIPE_EXPANDING, &sh->state);
4901 atomic_inc(&conf->reshape_stripes);
4902
4903
4904
4905 for (j=sh->disks; j--;) {
4906 sector_t s;
4907 if (j == sh->pd_idx)
4908 continue;
4909 if (conf->level == 6 &&
4910 j == sh->qd_idx)
4911 continue;
4912 s = compute_blocknr(sh, j, 0);
4913 if (s < raid5_size(mddev, 0, 0)) {
4914 skipped_disk = 1;
4915 continue;
4916 }
4917 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
4918 set_bit(R5_Expanded, &sh->dev[j].flags);
4919 set_bit(R5_UPTODATE, &sh->dev[j].flags);
4920 }
4921 if (!skipped_disk) {
4922 set_bit(STRIPE_EXPAND_READY, &sh->state);
4923 set_bit(STRIPE_HANDLE, &sh->state);
4924 }
4925 list_add(&sh->lru, &stripes);
4926 }
4927 spin_lock_irq(&conf->device_lock);
4928 if (mddev->reshape_backwards)
4929 conf->reshape_progress -= reshape_sectors * new_data_disks;
4930 else
4931 conf->reshape_progress += reshape_sectors * new_data_disks;
4932 spin_unlock_irq(&conf->device_lock);
4933
4934
4935
4936
4937
4938 first_sector =
4939 raid5_compute_sector(conf, stripe_addr*(new_data_disks),
4940 1, &dd_idx, NULL);
4941 last_sector =
4942 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
4943 * new_data_disks - 1),
4944 1, &dd_idx, NULL);
4945 if (last_sector >= mddev->dev_sectors)
4946 last_sector = mddev->dev_sectors - 1;
4947 while (first_sector <= last_sector) {
4948 sh = get_active_stripe(conf, first_sector, 1, 0, 1);
4949 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4950 set_bit(STRIPE_HANDLE, &sh->state);
4951 release_stripe(sh);
4952 first_sector += STRIPE_SECTORS;
4953 }
4954
4955
4956
4957 while (!list_empty(&stripes)) {
4958 sh = list_entry(stripes.next, struct stripe_head, lru);
4959 list_del_init(&sh->lru);
4960 release_stripe(sh);
4961 }
4962
4963
4964
4965 sector_nr += reshape_sectors;
4966 if ((sector_nr - mddev->curr_resync_completed) * 2
4967 >= mddev->resync_max - mddev->curr_resync_completed) {
4968
4969 wait_event(conf->wait_for_overlap,
4970 atomic_read(&conf->reshape_stripes) == 0
4971 || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4972 if (atomic_read(&conf->reshape_stripes) != 0)
4973 goto ret;
4974 mddev->reshape_position = conf->reshape_progress;
4975 mddev->curr_resync_completed = sector_nr;
4976 conf->reshape_checkpoint = jiffies;
4977 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4978 md_wakeup_thread(mddev->thread);
4979 wait_event(mddev->sb_wait,
4980 !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4981 || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4982 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4983 goto ret;
4984 spin_lock_irq(&conf->device_lock);
4985 conf->reshape_safe = mddev->reshape_position;
4986 spin_unlock_irq(&conf->device_lock);
4987 wake_up(&conf->wait_for_overlap);
4988 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4989 }
4990ret:
4991 return reshape_sectors;
4992}
4993
4994
4995static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
4996{
4997 struct r5conf *conf = mddev->private;
4998 struct stripe_head *sh;
4999 sector_t max_sector = mddev->dev_sectors;
5000 sector_t sync_blocks;
5001 int still_degraded = 0;
5002 int i;
5003
5004 if (sector_nr >= max_sector) {
5005
5006
5007 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
5008 end_reshape(conf);
5009 return 0;
5010 }
5011
5012 if (mddev->curr_resync < max_sector)
5013 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
5014 &sync_blocks, 1);
5015 else
5016 conf->fullsync = 0;
5017 bitmap_close_sync(mddev->bitmap);
5018
5019 return 0;
5020 }
5021
5022
5023 wait_event(conf->wait_for_overlap, conf->quiesce != 2);
5024
5025 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5026 return reshape_request(mddev, sector_nr, skipped);
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038 if (mddev->degraded >= conf->max_degraded &&
5039 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5040 sector_t rv = mddev->dev_sectors - sector_nr;
5041 *skipped = 1;
5042 return rv;
5043 }
5044 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
5045 !conf->fullsync &&
5046 !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
5047 sync_blocks >= STRIPE_SECTORS) {
5048
5049 sync_blocks /= STRIPE_SECTORS;
5050 *skipped = 1;
5051 return sync_blocks * STRIPE_SECTORS;
5052 }
5053
5054 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
5055
5056 sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
5057 if (sh == NULL) {
5058 sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
5059
5060
5061
5062 schedule_timeout_uninterruptible(1);
5063 }
5064
5065
5066
5067
5068 for (i = 0; i < conf->raid_disks; i++)
5069 if (conf->disks[i].rdev == NULL)
5070 still_degraded = 1;
5071
5072 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
5073
5074 set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
5075 set_bit(STRIPE_HANDLE, &sh->state);
5076
5077 release_stripe(sh);
5078
5079 return STRIPE_SECTORS;
5080}
5081
5082static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5083{
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094 struct stripe_head *sh;
5095 int dd_idx;
5096 sector_t sector, logical_sector, last_sector;
5097 int scnt = 0;
5098 int remaining;
5099 int handled = 0;
5100
5101 logical_sector = raid_bio->bi_iter.bi_sector &
5102 ~((sector_t)STRIPE_SECTORS-1);
5103 sector = raid5_compute_sector(conf, logical_sector,
5104 0, &dd_idx, NULL);
5105 last_sector = bio_end_sector(raid_bio);
5106
5107 for (; logical_sector < last_sector;
5108 logical_sector += STRIPE_SECTORS,
5109 sector += STRIPE_SECTORS,
5110 scnt++) {
5111
5112 if (scnt < raid5_bi_processed_stripes(raid_bio))
5113
5114 continue;
5115
5116 sh = get_active_stripe(conf, sector, 0, 1, 1);
5117
5118 if (!sh) {
5119
5120 raid5_set_bi_processed_stripes(raid_bio, scnt);
5121 conf->retry_read_aligned = raid_bio;
5122 return handled;
5123 }
5124
5125 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
5126 release_stripe(sh);
5127 raid5_set_bi_processed_stripes(raid_bio, scnt);
5128 conf->retry_read_aligned = raid_bio;
5129 return handled;
5130 }
5131
5132 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
5133 handle_stripe(sh);
5134 release_stripe(sh);
5135 handled++;
5136 }
5137 remaining = raid5_dec_bi_active_stripes(raid_bio);
5138 if (remaining == 0) {
5139 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
5140 raid_bio, 0);
5141 bio_endio(raid_bio, 0);
5142 }
5143 if (atomic_dec_and_test(&conf->active_aligned_reads))
5144 wake_up(&conf->wait_for_stripe);
5145 return handled;
5146}
5147
5148static int handle_active_stripes(struct r5conf *conf, int group,
5149 struct r5worker *worker,
5150 struct list_head *temp_inactive_list)
5151{
5152 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
5153 int i, batch_size = 0, hash;
5154 bool release_inactive = false;
5155
5156 while (batch_size < MAX_STRIPE_BATCH &&
5157 (sh = __get_priority_stripe(conf, group)) != NULL)
5158 batch[batch_size++] = sh;
5159
5160 if (batch_size == 0) {
5161 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5162 if (!list_empty(temp_inactive_list + i))
5163 break;
5164 if (i == NR_STRIPE_HASH_LOCKS)
5165 return batch_size;
5166 release_inactive = true;
5167 }
5168 spin_unlock_irq(&conf->device_lock);
5169
5170 release_inactive_stripe_list(conf, temp_inactive_list,
5171 NR_STRIPE_HASH_LOCKS);
5172
5173 if (release_inactive) {
5174 spin_lock_irq(&conf->device_lock);
5175 return 0;
5176 }
5177
5178 for (i = 0; i < batch_size; i++)
5179 handle_stripe(batch[i]);
5180
5181 cond_resched();
5182
5183 spin_lock_irq(&conf->device_lock);
5184 for (i = 0; i < batch_size; i++) {
5185 hash = batch[i]->hash_lock_index;
5186 __release_stripe(conf, batch[i], &temp_inactive_list[hash]);
5187 }
5188 return batch_size;
5189}
5190
5191static void raid5_do_work(struct work_struct *work)
5192{
5193 struct r5worker *worker = container_of(work, struct r5worker, work);
5194 struct r5worker_group *group = worker->group;
5195 struct r5conf *conf = group->conf;
5196 int group_id = group - conf->worker_groups;
5197 int handled;
5198 struct blk_plug plug;
5199
5200 pr_debug("+++ raid5worker active\n");
5201
5202 blk_start_plug(&plug);
5203 handled = 0;
5204 spin_lock_irq(&conf->device_lock);
5205 while (1) {
5206 int batch_size, released;
5207
5208 released = release_stripe_list(conf, worker->temp_inactive_list);
5209
5210 batch_size = handle_active_stripes(conf, group_id, worker,
5211 worker->temp_inactive_list);
5212 worker->working = false;
5213 if (!batch_size && !released)
5214 break;
5215 handled += batch_size;
5216 }
5217 pr_debug("%d stripes handled\n", handled);
5218
5219 spin_unlock_irq(&conf->device_lock);
5220 blk_finish_plug(&plug);
5221
5222 pr_debug("--- raid5worker inactive\n");
5223}
5224
5225
5226
5227
5228
5229
5230
5231
5232static void raid5d(struct md_thread *thread)
5233{
5234 struct mddev *mddev = thread->mddev;
5235 struct r5conf *conf = mddev->private;
5236 int handled;
5237 struct blk_plug plug;
5238
5239 pr_debug("+++ raid5d active\n");
5240
5241 md_check_recovery(mddev);
5242
5243 blk_start_plug(&plug);
5244 handled = 0;
5245 spin_lock_irq(&conf->device_lock);
5246 while (1) {
5247 struct bio *bio;
5248 int batch_size, released;
5249
5250 released = release_stripe_list(conf, conf->temp_inactive_list);
5251
5252 if (
5253 !list_empty(&conf->bitmap_list)) {
5254
5255 conf->seq_flush++;
5256 spin_unlock_irq(&conf->device_lock);
5257 bitmap_unplug(mddev->bitmap);
5258 spin_lock_irq(&conf->device_lock);
5259 conf->seq_write = conf->seq_flush;
5260 activate_bit_delay(conf, conf->temp_inactive_list);
5261 }
5262 raid5_activate_delayed(conf);
5263
5264 while ((bio = remove_bio_from_retry(conf))) {
5265 int ok;
5266 spin_unlock_irq(&conf->device_lock);
5267 ok = retry_aligned_read(conf, bio);
5268 spin_lock_irq(&conf->device_lock);
5269 if (!ok)
5270 break;
5271 handled++;
5272 }
5273
5274 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
5275 conf->temp_inactive_list);
5276 if (!batch_size && !released)
5277 break;
5278 handled += batch_size;
5279
5280 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
5281 spin_unlock_irq(&conf->device_lock);
5282 md_check_recovery(mddev);
5283 spin_lock_irq(&conf->device_lock);
5284 }
5285 }
5286 pr_debug("%d stripes handled\n", handled);
5287
5288 spin_unlock_irq(&conf->device_lock);
5289
5290 async_tx_issue_pending_all();
5291 blk_finish_plug(&plug);
5292
5293 pr_debug("--- raid5d inactive\n");
5294}
5295
5296static ssize_t
5297raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
5298{
5299 struct r5conf *conf = mddev->private;
5300 if (conf)
5301 return sprintf(page, "%d\n", conf->max_nr_stripes);
5302 else
5303 return 0;
5304}
5305
5306int
5307raid5_set_cache_size(struct mddev *mddev, int size)
5308{
5309 struct r5conf *conf = mddev->private;
5310 int err;
5311 int hash;
5312
5313 if (size <= 16 || size > 32768)
5314 return -EINVAL;
5315 hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
5316 while (size < conf->max_nr_stripes) {
5317 if (drop_one_stripe(conf, hash))
5318 conf->max_nr_stripes--;
5319 else
5320 break;
5321 hash--;
5322 if (hash < 0)
5323 hash = NR_STRIPE_HASH_LOCKS - 1;
5324 }
5325 err = md_allow_write(mddev);
5326 if (err)
5327 return err;
5328 hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
5329 while (size > conf->max_nr_stripes) {
5330 if (grow_one_stripe(conf, hash))
5331 conf->max_nr_stripes++;
5332 else break;
5333 hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
5334 }
5335 return 0;
5336}
5337EXPORT_SYMBOL(raid5_set_cache_size);
5338
5339static ssize_t
5340raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
5341{
5342 struct r5conf *conf = mddev->private;
5343 unsigned long new;
5344 int err;
5345
5346 if (len >= PAGE_SIZE)
5347 return -EINVAL;
5348 if (!conf)
5349 return -ENODEV;
5350
5351 if (kstrtoul(page, 10, &new))
5352 return -EINVAL;
5353 err = raid5_set_cache_size(mddev, new);
5354 if (err)
5355 return err;
5356 return len;
5357}
5358
5359static struct md_sysfs_entry
5360raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
5361 raid5_show_stripe_cache_size,
5362 raid5_store_stripe_cache_size);
5363
5364static ssize_t
5365raid5_show_preread_threshold(struct mddev *mddev, char *page)
5366{
5367 struct r5conf *conf = mddev->private;
5368 if (conf)
5369 return sprintf(page, "%d\n", conf->bypass_threshold);
5370 else
5371 return 0;
5372}
5373
5374static ssize_t
5375raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
5376{
5377 struct r5conf *conf = mddev->private;
5378 unsigned long new;
5379 if (len >= PAGE_SIZE)
5380 return -EINVAL;
5381 if (!conf)
5382 return -ENODEV;
5383
5384 if (kstrtoul(page, 10, &new))
5385 return -EINVAL;
5386 if (new > conf->max_nr_stripes)
5387 return -EINVAL;
5388 conf->bypass_threshold = new;
5389 return len;
5390}
5391
5392static struct md_sysfs_entry
5393raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
5394 S_IRUGO | S_IWUSR,
5395 raid5_show_preread_threshold,
5396 raid5_store_preread_threshold);
5397
5398static ssize_t
5399raid5_show_skip_copy(struct mddev *mddev, char *page)
5400{
5401 struct r5conf *conf = mddev->private;
5402 if (conf)
5403 return sprintf(page, "%d\n", conf->skip_copy);
5404 else
5405 return 0;
5406}
5407
5408static ssize_t
5409raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
5410{
5411 struct r5conf *conf = mddev->private;
5412 unsigned long new;
5413 if (len >= PAGE_SIZE)
5414 return -EINVAL;
5415 if (!conf)
5416 return -ENODEV;
5417
5418 if (kstrtoul(page, 10, &new))
5419 return -EINVAL;
5420 new = !!new;
5421 if (new == conf->skip_copy)
5422 return len;
5423
5424 mddev_suspend(mddev);
5425 conf->skip_copy = new;
5426 if (new)
5427 mddev->queue->backing_dev_info.capabilities |=
5428 BDI_CAP_STABLE_WRITES;
5429 else
5430 mddev->queue->backing_dev_info.capabilities &=
5431 ~BDI_CAP_STABLE_WRITES;
5432 mddev_resume(mddev);
5433 return len;
5434}
5435
5436static struct md_sysfs_entry
5437raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
5438 raid5_show_skip_copy,
5439 raid5_store_skip_copy);
5440
5441static ssize_t
5442stripe_cache_active_show(struct mddev *mddev, char *page)
5443{
5444 struct r5conf *conf = mddev->private;
5445 if (conf)
5446 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
5447 else
5448 return 0;
5449}
5450
5451static struct md_sysfs_entry
5452raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
5453
5454static ssize_t
5455raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
5456{
5457 struct r5conf *conf = mddev->private;
5458 if (conf)
5459 return sprintf(page, "%d\n", conf->worker_cnt_per_group);
5460 else
5461 return 0;
5462}
5463
5464static int alloc_thread_groups(struct r5conf *conf, int cnt,
5465 int *group_cnt,
5466 int *worker_cnt_per_group,
5467 struct r5worker_group **worker_groups);
5468static ssize_t
5469raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
5470{
5471 struct r5conf *conf = mddev->private;
5472 unsigned long new;
5473 int err;
5474 struct r5worker_group *new_groups, *old_groups;
5475 int group_cnt, worker_cnt_per_group;
5476
5477 if (len >= PAGE_SIZE)
5478 return -EINVAL;
5479 if (!conf)
5480 return -ENODEV;
5481
5482 if (kstrtoul(page, 10, &new))
5483 return -EINVAL;
5484
5485 if (new == conf->worker_cnt_per_group)
5486 return len;
5487
5488 mddev_suspend(mddev);
5489
5490 old_groups = conf->worker_groups;
5491 if (old_groups)
5492 flush_workqueue(raid5_wq);
5493
5494 err = alloc_thread_groups(conf, new,
5495 &group_cnt, &worker_cnt_per_group,
5496 &new_groups);
5497 if (!err) {
5498 spin_lock_irq(&conf->device_lock);
5499 conf->group_cnt = group_cnt;
5500 conf->worker_cnt_per_group = worker_cnt_per_group;
5501 conf->worker_groups = new_groups;
5502 spin_unlock_irq(&conf->device_lock);
5503
5504 if (old_groups)
5505 kfree(old_groups[0].workers);
5506 kfree(old_groups);
5507 }
5508
5509 mddev_resume(mddev);
5510
5511 if (err)
5512 return err;
5513 return len;
5514}
5515
5516static struct md_sysfs_entry
5517raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR,
5518 raid5_show_group_thread_cnt,
5519 raid5_store_group_thread_cnt);
5520
5521static struct attribute *raid5_attrs[] = {
5522 &raid5_stripecache_size.attr,
5523 &raid5_stripecache_active.attr,
5524 &raid5_preread_bypass_threshold.attr,
5525 &raid5_group_thread_cnt.attr,
5526 &raid5_skip_copy.attr,
5527 NULL,
5528};
5529static struct attribute_group raid5_attrs_group = {
5530 .name = NULL,
5531 .attrs = raid5_attrs,
5532};
5533
5534static int alloc_thread_groups(struct r5conf *conf, int cnt,
5535 int *group_cnt,
5536 int *worker_cnt_per_group,
5537 struct r5worker_group **worker_groups)
5538{
5539 int i, j, k;
5540 ssize_t size;
5541 struct r5worker *workers;
5542
5543 *worker_cnt_per_group = cnt;
5544 if (cnt == 0) {
5545 *group_cnt = 0;
5546 *worker_groups = NULL;
5547 return 0;
5548 }
5549 *group_cnt = num_possible_nodes();
5550 size = sizeof(struct r5worker) * cnt;
5551 workers = kzalloc(size * *group_cnt, GFP_NOIO);
5552 *worker_groups = kzalloc(sizeof(struct r5worker_group) *
5553 *group_cnt, GFP_NOIO);
5554 if (!*worker_groups || !workers) {
5555 kfree(workers);
5556 kfree(*worker_groups);
5557 return -ENOMEM;
5558 }
5559
5560 for (i = 0; i < *group_cnt; i++) {
5561 struct r5worker_group *group;
5562
5563 group = &(*worker_groups)[i];
5564 INIT_LIST_HEAD(&group->handle_list);
5565 group->conf = conf;
5566 group->workers = workers + i * cnt;
5567
5568 for (j = 0; j < cnt; j++) {
5569 struct r5worker *worker = group->workers + j;
5570 worker->group = group;
5571 INIT_WORK(&worker->work, raid5_do_work);
5572
5573 for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++)
5574 INIT_LIST_HEAD(worker->temp_inactive_list + k);
5575 }
5576 }
5577
5578 return 0;
5579}
5580
5581static void free_thread_groups(struct r5conf *conf)
5582{
5583 if (conf->worker_groups)
5584 kfree(conf->worker_groups[0].workers);
5585 kfree(conf->worker_groups);
5586 conf->worker_groups = NULL;
5587}
5588
5589static sector_t
5590raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
5591{
5592 struct r5conf *conf = mddev->private;
5593
5594 if (!sectors)
5595 sectors = mddev->dev_sectors;
5596 if (!raid_disks)
5597
5598 raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
5599
5600 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
5601 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
5602 return sectors * (raid_disks - conf->max_degraded);
5603}
5604
5605static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
5606{
5607 safe_put_page(percpu->spare_page);
5608 kfree(percpu->scribble);
5609 percpu->spare_page = NULL;
5610 percpu->scribble = NULL;
5611}
5612
5613static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
5614{
5615 if (conf->level == 6 && !percpu->spare_page)
5616 percpu->spare_page = alloc_page(GFP_KERNEL);
5617 if (!percpu->scribble)
5618 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
5619
5620 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
5621 free_scratch_buffer(conf, percpu);
5622 return -ENOMEM;
5623 }
5624
5625 return 0;
5626}
5627
5628static void raid5_free_percpu(struct r5conf *conf)
5629{
5630 unsigned long cpu;
5631
5632 if (!conf->percpu)
5633 return;
5634
5635#ifdef CONFIG_HOTPLUG_CPU
5636 unregister_cpu_notifier(&conf->cpu_notify);
5637#endif
5638
5639 get_online_cpus();
5640 for_each_possible_cpu(cpu)
5641 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
5642 put_online_cpus();
5643
5644 free_percpu(conf->percpu);
5645}
5646
5647static void free_conf(struct r5conf *conf)
5648{
5649 free_thread_groups(conf);
5650 shrink_stripes(conf);
5651 raid5_free_percpu(conf);
5652 kfree(conf->disks);
5653 kfree(conf->stripe_hashtbl);
5654 kfree(conf);
5655}
5656
5657#ifdef CONFIG_HOTPLUG_CPU
5658static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
5659 void *hcpu)
5660{
5661 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
5662 long cpu = (long)hcpu;
5663 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
5664
5665 switch (action) {
5666 case CPU_UP_PREPARE:
5667 case CPU_UP_PREPARE_FROZEN:
5668 if (alloc_scratch_buffer(conf, percpu)) {
5669 pr_err("%s: failed memory allocation for cpu%ld\n",
5670 __func__, cpu);
5671 return notifier_from_errno(-ENOMEM);
5672 }
5673 break;
5674 case CPU_DEAD:
5675 case CPU_DEAD_FROZEN:
5676 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
5677 break;
5678 default:
5679 break;
5680 }
5681 return NOTIFY_OK;
5682}
5683#endif
5684
5685static int raid5_alloc_percpu(struct r5conf *conf)
5686{
5687 unsigned long cpu;
5688 int err = 0;
5689
5690 conf->percpu = alloc_percpu(struct raid5_percpu);
5691 if (!conf->percpu)
5692 return -ENOMEM;
5693
5694#ifdef CONFIG_HOTPLUG_CPU
5695 conf->cpu_notify.notifier_call = raid456_cpu_notify;
5696 conf->cpu_notify.priority = 0;
5697 err = register_cpu_notifier(&conf->cpu_notify);
5698 if (err)
5699 return err;
5700#endif
5701
5702 get_online_cpus();
5703 for_each_present_cpu(cpu) {
5704 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
5705 if (err) {
5706 pr_err("%s: failed memory allocation for cpu%ld\n",
5707 __func__, cpu);
5708 break;
5709 }
5710 }
5711 put_online_cpus();
5712
5713 return err;
5714}
5715
5716static struct r5conf *setup_conf(struct mddev *mddev)
5717{
5718 struct r5conf *conf;
5719 int raid_disk, memory, max_disks;
5720 struct md_rdev *rdev;
5721 struct disk_info *disk;
5722 char pers_name[6];
5723 int i;
5724 int group_cnt, worker_cnt_per_group;
5725 struct r5worker_group *new_group;
5726
5727 if (mddev->new_level != 5
5728 && mddev->new_level != 4
5729 && mddev->new_level != 6) {
5730 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
5731 mdname(mddev), mddev->new_level);
5732 return ERR_PTR(-EIO);
5733 }
5734 if ((mddev->new_level == 5
5735 && !algorithm_valid_raid5(mddev->new_layout)) ||
5736 (mddev->new_level == 6
5737 && !algorithm_valid_raid6(mddev->new_layout))) {
5738 printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
5739 mdname(mddev), mddev->new_layout);
5740 return ERR_PTR(-EIO);
5741 }
5742 if (mddev->new_level == 6 && mddev->raid_disks < 4) {
5743 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
5744 mdname(mddev), mddev->raid_disks);
5745 return ERR_PTR(-EINVAL);
5746 }
5747
5748 if (!mddev->new_chunk_sectors ||
5749 (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
5750 !is_power_of_2(mddev->new_chunk_sectors)) {
5751 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
5752 mdname(mddev), mddev->new_chunk_sectors << 9);
5753 return ERR_PTR(-EINVAL);
5754 }
5755
5756 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
5757 if (conf == NULL)
5758 goto abort;
5759
5760 if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
5761 &new_group)) {
5762 conf->group_cnt = group_cnt;
5763 conf->worker_cnt_per_group = worker_cnt_per_group;
5764 conf->worker_groups = new_group;
5765 } else
5766 goto abort;
5767 spin_lock_init(&conf->device_lock);
5768 seqcount_init(&conf->gen_lock);
5769 init_waitqueue_head(&conf->wait_for_stripe);
5770 init_waitqueue_head(&conf->wait_for_overlap);
5771 INIT_LIST_HEAD(&conf->handle_list);
5772 INIT_LIST_HEAD(&conf->hold_list);
5773 INIT_LIST_HEAD(&conf->delayed_list);
5774 INIT_LIST_HEAD(&conf->bitmap_list);
5775 init_llist_head(&conf->released_stripes);
5776 atomic_set(&conf->active_stripes, 0);
5777 atomic_set(&conf->preread_active_stripes, 0);
5778 atomic_set(&conf->active_aligned_reads, 0);
5779 conf->bypass_threshold = BYPASS_THRESHOLD;
5780 conf->recovery_disabled = mddev->recovery_disabled - 1;
5781
5782 conf->raid_disks = mddev->raid_disks;
5783 if (mddev->reshape_position == MaxSector)
5784 conf->previous_raid_disks = mddev->raid_disks;
5785 else
5786 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
5787 max_disks = max(conf->raid_disks, conf->previous_raid_disks);
5788 conf->scribble_len = scribble_len(max_disks);
5789
5790 conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
5791 GFP_KERNEL);
5792 if (!conf->disks)
5793 goto abort;
5794
5795 conf->mddev = mddev;
5796
5797 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
5798 goto abort;
5799
5800
5801
5802
5803
5804
5805 spin_lock_init(conf->hash_locks);
5806 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
5807 spin_lock_init(conf->hash_locks + i);
5808
5809 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5810 INIT_LIST_HEAD(conf->inactive_list + i);
5811
5812 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5813 INIT_LIST_HEAD(conf->temp_inactive_list + i);
5814
5815 conf->level = mddev->new_level;
5816 if (raid5_alloc_percpu(conf) != 0)
5817 goto abort;
5818
5819 pr_debug("raid456: run(%s) called.\n", mdname(mddev));
5820
5821 rdev_for_each(rdev, mddev) {
5822 raid_disk = rdev->raid_disk;
5823 if (raid_disk >= max_disks
5824 || raid_disk < 0)
5825 continue;
5826 disk = conf->disks + raid_disk;
5827
5828 if (test_bit(Replacement, &rdev->flags)) {
5829 if (disk->replacement)
5830 goto abort;
5831 disk->replacement = rdev;
5832 } else {
5833 if (disk->rdev)
5834 goto abort;
5835 disk->rdev = rdev;
5836 }
5837
5838 if (test_bit(In_sync, &rdev->flags)) {
5839 char b[BDEVNAME_SIZE];
5840 printk(KERN_INFO "md/raid:%s: device %s operational as raid"
5841 " disk %d\n",
5842 mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
5843 } else if (rdev->saved_raid_disk != raid_disk)
5844
5845 conf->fullsync = 1;
5846 }
5847
5848 conf->chunk_sectors = mddev->new_chunk_sectors;
5849 conf->level = mddev->new_level;
5850 if (conf->level == 6)
5851 conf->max_degraded = 2;
5852 else
5853 conf->max_degraded = 1;
5854 conf->algorithm = mddev->new_layout;
5855 conf->reshape_progress = mddev->reshape_position;
5856 if (conf->reshape_progress != MaxSector) {
5857 conf->prev_chunk_sectors = mddev->chunk_sectors;
5858 conf->prev_algo = mddev->layout;
5859 }
5860
5861 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
5862 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
5863 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
5864 if (grow_stripes(conf, NR_STRIPES)) {
5865 printk(KERN_ERR
5866 "md/raid:%s: couldn't allocate %dkB for buffers\n",
5867 mdname(mddev), memory);
5868 goto abort;
5869 } else
5870 printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
5871 mdname(mddev), memory);
5872
5873 sprintf(pers_name, "raid%d", mddev->new_level);
5874 conf->thread = md_register_thread(raid5d, mddev, pers_name);
5875 if (!conf->thread) {
5876 printk(KERN_ERR
5877 "md/raid:%s: couldn't allocate thread.\n",
5878 mdname(mddev));
5879 goto abort;
5880 }
5881
5882 return conf;
5883
5884 abort:
5885 if (conf) {
5886 free_conf(conf);
5887 return ERR_PTR(-EIO);
5888 } else
5889 return ERR_PTR(-ENOMEM);
5890}
5891
5892static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
5893{
5894 switch (algo) {
5895 case ALGORITHM_PARITY_0:
5896 if (raid_disk < max_degraded)
5897 return 1;
5898 break;
5899 case ALGORITHM_PARITY_N:
5900 if (raid_disk >= raid_disks - max_degraded)
5901 return 1;
5902 break;
5903 case ALGORITHM_PARITY_0_6:
5904 if (raid_disk == 0 ||
5905 raid_disk == raid_disks - 1)
5906 return 1;
5907 break;
5908 case ALGORITHM_LEFT_ASYMMETRIC_6:
5909 case ALGORITHM_RIGHT_ASYMMETRIC_6:
5910 case ALGORITHM_LEFT_SYMMETRIC_6:
5911 case ALGORITHM_RIGHT_SYMMETRIC_6:
5912 if (raid_disk == raid_disks - 1)
5913 return 1;
5914 }
5915 return 0;
5916}
5917
5918static int run(struct mddev *mddev)
5919{
5920 struct r5conf *conf;
5921 int working_disks = 0;
5922 int dirty_parity_disks = 0;
5923 struct md_rdev *rdev;
5924 sector_t reshape_offset = 0;
5925 int i;
5926 long long min_offset_diff = 0;
5927 int first = 1;
5928
5929 if (mddev->recovery_cp != MaxSector)
5930 printk(KERN_NOTICE "md/raid:%s: not clean"
5931 " -- starting background reconstruction\n",
5932 mdname(mddev));
5933
5934 rdev_for_each(rdev, mddev) {
5935 long long diff;
5936 if (rdev->raid_disk < 0)
5937 continue;
5938 diff = (rdev->new_data_offset - rdev->data_offset);
5939 if (first) {
5940 min_offset_diff = diff;
5941 first = 0;
5942 } else if (mddev->reshape_backwards &&
5943 diff < min_offset_diff)
5944 min_offset_diff = diff;
5945 else if (!mddev->reshape_backwards &&
5946 diff > min_offset_diff)
5947 min_offset_diff = diff;
5948 }
5949
5950 if (mddev->reshape_position != MaxSector) {
5951
5952
5953
5954
5955
5956
5957
5958
5959
5960
5961
5962
5963 sector_t here_new, here_old;
5964 int old_disks;
5965 int max_degraded = (mddev->level == 6 ? 2 : 1);
5966
5967 if (mddev->new_level != mddev->level) {
5968 printk(KERN_ERR "md/raid:%s: unsupported reshape "
5969 "required - aborting.\n",
5970 mdname(mddev));
5971 return -EINVAL;
5972 }
5973 old_disks = mddev->raid_disks - mddev->delta_disks;
5974
5975
5976
5977
5978 here_new = mddev->reshape_position;
5979 if (sector_div(here_new, mddev->new_chunk_sectors *
5980 (mddev->raid_disks - max_degraded))) {
5981 printk(KERN_ERR "md/raid:%s: reshape_position not "
5982 "on a stripe boundary\n", mdname(mddev));
5983 return -EINVAL;
5984 }
5985 reshape_offset = here_new * mddev->new_chunk_sectors;
5986
5987 here_old = mddev->reshape_position;
5988 sector_div(here_old, mddev->chunk_sectors *
5989 (old_disks-max_degraded));
5990
5991
5992 if (mddev->delta_disks == 0) {
5993 if ((here_new * mddev->new_chunk_sectors !=
5994 here_old * mddev->chunk_sectors)) {
5995 printk(KERN_ERR "md/raid:%s: reshape position is"
5996 " confused - aborting\n", mdname(mddev));
5997 return -EINVAL;
5998 }
5999
6000
6001
6002
6003
6004
6005
6006 if (abs(min_offset_diff) >= mddev->chunk_sectors &&
6007 abs(min_offset_diff) >= mddev->new_chunk_sectors)
6008 ;
6009 else if (mddev->ro == 0) {
6010 printk(KERN_ERR "md/raid:%s: in-place reshape "
6011 "must be started in read-only mode "
6012 "- aborting\n",
6013 mdname(mddev));
6014 return -EINVAL;
6015 }
6016 } else if (mddev->reshape_backwards
6017 ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
6018 here_old * mddev->chunk_sectors)
6019 : (here_new * mddev->new_chunk_sectors >=
6020 here_old * mddev->chunk_sectors + (-min_offset_diff))) {
6021
6022 printk(KERN_ERR "md/raid:%s: reshape_position too early for "
6023 "auto-recovery - aborting.\n",
6024 mdname(mddev));
6025 return -EINVAL;
6026 }
6027 printk(KERN_INFO "md/raid:%s: reshape will continue\n",
6028 mdname(mddev));
6029
6030 } else {
6031 BUG_ON(mddev->level != mddev->new_level);
6032 BUG_ON(mddev->layout != mddev->new_layout);
6033 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
6034 BUG_ON(mddev->delta_disks != 0);
6035 }
6036
6037 if (mddev->private == NULL)
6038 conf = setup_conf(mddev);
6039 else
6040 conf = mddev->private;
6041
6042 if (IS_ERR(conf))
6043 return PTR_ERR(conf);
6044
6045 conf->min_offset_diff = min_offset_diff;
6046 mddev->thread = conf->thread;
6047 conf->thread = NULL;
6048 mddev->private = conf;
6049
6050 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
6051 i++) {
6052 rdev = conf->disks[i].rdev;
6053 if (!rdev && conf->disks[i].replacement) {
6054
6055 rdev = conf->disks[i].replacement;
6056 conf->disks[i].replacement = NULL;
6057 clear_bit(Replacement, &rdev->flags);
6058 conf->disks[i].rdev = rdev;
6059 }
6060 if (!rdev)
6061 continue;
6062 if (conf->disks[i].replacement &&
6063 conf->reshape_progress != MaxSector) {
6064
6065 printk(KERN_ERR "md: cannot handle concurrent "
6066 "replacement and reshape.\n");
6067 goto abort;
6068 }
6069 if (test_bit(In_sync, &rdev->flags)) {
6070 working_disks++;
6071 continue;
6072 }
6073
6074
6075
6076
6077
6078
6079
6080
6081
6082 if (mddev->major_version == 0 &&
6083 mddev->minor_version > 90)
6084 rdev->recovery_offset = reshape_offset;
6085
6086 if (rdev->recovery_offset < reshape_offset) {
6087
6088 if (!only_parity(rdev->raid_disk,
6089 conf->algorithm,
6090 conf->raid_disks,
6091 conf->max_degraded))
6092 continue;
6093 }
6094 if (!only_parity(rdev->raid_disk,
6095 conf->prev_algo,
6096 conf->previous_raid_disks,
6097 conf->max_degraded))
6098 continue;
6099 dirty_parity_disks++;
6100 }
6101
6102
6103
6104
6105 mddev->degraded = calc_degraded(conf);
6106
6107 if (has_failed(conf)) {
6108 printk(KERN_ERR "md/raid:%s: not enough operational devices"
6109 " (%d/%d failed)\n",
6110 mdname(mddev), mddev->degraded, conf->raid_disks);
6111 goto abort;
6112 }
6113
6114
6115 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
6116 mddev->resync_max_sectors = mddev->dev_sectors;
6117
6118 if (mddev->degraded > dirty_parity_disks &&
6119 mddev->recovery_cp != MaxSector) {
6120 if (mddev->ok_start_degraded)
6121 printk(KERN_WARNING
6122 "md/raid:%s: starting dirty degraded array"
6123 " - data corruption possible.\n",
6124 mdname(mddev));
6125 else {
6126 printk(KERN_ERR
6127 "md/raid:%s: cannot start dirty degraded array.\n",
6128 mdname(mddev));
6129 goto abort;
6130 }
6131 }
6132
6133 if (mddev->degraded == 0)
6134 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
6135 " devices, algorithm %d\n", mdname(mddev), conf->level,
6136 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
6137 mddev->new_layout);
6138 else
6139 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
6140 " out of %d devices, algorithm %d\n",
6141 mdname(mddev), conf->level,
6142 mddev->raid_disks - mddev->degraded,
6143 mddev->raid_disks, mddev->new_layout);
6144
6145 print_raid5_conf(conf);
6146
6147 if (conf->reshape_progress != MaxSector) {
6148 conf->reshape_safe = conf->reshape_progress;
6149 atomic_set(&conf->reshape_stripes, 0);
6150 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6151 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6152 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6153 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6154 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
6155 "reshape");
6156 }
6157
6158
6159 if (mddev->to_remove == &raid5_attrs_group)
6160 mddev->to_remove = NULL;
6161 else if (mddev->kobj.sd &&
6162 sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
6163 printk(KERN_WARNING
6164 "raid5: failed to create sysfs attributes for %s\n",
6165 mdname(mddev));
6166 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
6167
6168 if (mddev->queue) {
6169 int chunk_size;
6170 bool discard_supported = true;
6171
6172
6173
6174
6175 int data_disks = conf->previous_raid_disks - conf->max_degraded;
6176 int stripe = data_disks *
6177 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
6178 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
6179 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
6180
6181 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
6182
6183 mddev->queue->backing_dev_info.congested_data = mddev;
6184 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
6185
6186 chunk_size = mddev->chunk_sectors << 9;
6187 blk_queue_io_min(mddev->queue, chunk_size);
6188 blk_queue_io_opt(mddev->queue, chunk_size *
6189 (conf->raid_disks - conf->max_degraded));
6190 mddev->queue->limits.raid_partial_stripes_expensive = 1;
6191
6192
6193
6194
6195 stripe = stripe * PAGE_SIZE;
6196
6197
6198 while ((stripe-1) & stripe)
6199 stripe = (stripe | (stripe-1)) + 1;
6200 mddev->queue->limits.discard_alignment = stripe;
6201 mddev->queue->limits.discard_granularity = stripe;
6202
6203
6204
6205
6206 mddev->queue->limits.discard_zeroes_data = 0;
6207
6208 blk_queue_max_write_same_sectors(mddev->queue, 0);
6209
6210 rdev_for_each(rdev, mddev) {
6211 disk_stack_limits(mddev->gendisk, rdev->bdev,
6212 rdev->data_offset << 9);
6213 disk_stack_limits(mddev->gendisk, rdev->bdev,
6214 rdev->new_data_offset << 9);
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225 if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
6226 !bdev_get_queue(rdev->bdev)->
6227 limits.discard_zeroes_data)
6228 discard_supported = false;
6229
6230
6231
6232
6233
6234 if (!devices_handle_discard_safely) {
6235 if (discard_supported) {
6236 pr_info("md/raid456: discard support disabled due to uncertainty.\n");
6237 pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
6238 }
6239 discard_supported = false;
6240 }
6241 }
6242
6243 if (discard_supported &&
6244 mddev->queue->limits.max_discard_sectors >= stripe &&
6245 mddev->queue->limits.discard_granularity >= stripe)
6246 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
6247 mddev->queue);
6248 else
6249 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
6250 mddev->queue);
6251 }
6252
6253 return 0;
6254abort:
6255 md_unregister_thread(&mddev->thread);
6256 print_raid5_conf(conf);
6257 free_conf(conf);
6258 mddev->private = NULL;
6259 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
6260 return -EIO;
6261}
6262
6263static int stop(struct mddev *mddev)
6264{
6265 struct r5conf *conf = mddev->private;
6266
6267 md_unregister_thread(&mddev->thread);
6268 if (mddev->queue)
6269 mddev->queue->backing_dev_info.congested_fn = NULL;
6270 free_conf(conf);
6271 mddev->private = NULL;
6272 mddev->to_remove = &raid5_attrs_group;
6273 return 0;
6274}
6275
6276static void status(struct seq_file *seq, struct mddev *mddev)
6277{
6278 struct r5conf *conf = mddev->private;
6279 int i;
6280
6281 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
6282 mddev->chunk_sectors / 2, mddev->layout);
6283 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
6284 for (i = 0; i < conf->raid_disks; i++)
6285 seq_printf (seq, "%s",
6286 conf->disks[i].rdev &&
6287 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
6288 seq_printf (seq, "]");
6289}
6290
6291static void print_raid5_conf (struct r5conf *conf)
6292{
6293 int i;
6294 struct disk_info *tmp;
6295
6296 printk(KERN_DEBUG "RAID conf printout:\n");
6297 if (!conf) {
6298 printk("(conf==NULL)\n");
6299 return;
6300 }
6301 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
6302 conf->raid_disks,
6303 conf->raid_disks - conf->mddev->degraded);
6304
6305 for (i = 0; i < conf->raid_disks; i++) {
6306 char b[BDEVNAME_SIZE];
6307 tmp = conf->disks + i;
6308 if (tmp->rdev)
6309 printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
6310 i, !test_bit(Faulty, &tmp->rdev->flags),
6311 bdevname(tmp->rdev->bdev, b));
6312 }
6313}
6314
6315static int raid5_spare_active(struct mddev *mddev)
6316{
6317 int i;
6318 struct r5conf *conf = mddev->private;
6319 struct disk_info *tmp;
6320 int count = 0;
6321 unsigned long flags;
6322
6323 for (i = 0; i < conf->raid_disks; i++) {
6324 tmp = conf->disks + i;
6325 if (tmp->replacement
6326 && tmp->replacement->recovery_offset == MaxSector
6327 && !test_bit(Faulty, &tmp->replacement->flags)
6328 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
6329
6330 if (!tmp->rdev
6331 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
6332 count++;
6333 if (tmp->rdev) {
6334
6335
6336
6337
6338 set_bit(Faulty, &tmp->rdev->flags);
6339 sysfs_notify_dirent_safe(
6340 tmp->rdev->sysfs_state);
6341 }
6342 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
6343 } else if (tmp->rdev
6344 && tmp->rdev->recovery_offset == MaxSector
6345 && !test_bit(Faulty, &tmp->rdev->flags)
6346 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
6347 count++;
6348 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
6349 }
6350 }
6351 spin_lock_irqsave(&conf->device_lock, flags);
6352 mddev->degraded = calc_degraded(conf);
6353 spin_unlock_irqrestore(&conf->device_lock, flags);
6354 print_raid5_conf(conf);
6355 return count;
6356}
6357
6358static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
6359{
6360 struct r5conf *conf = mddev->private;
6361 int err = 0;
6362 int number = rdev->raid_disk;
6363 struct md_rdev **rdevp;
6364 struct disk_info *p = conf->disks + number;
6365
6366 print_raid5_conf(conf);
6367 if (rdev == p->rdev)
6368 rdevp = &p->rdev;
6369 else if (rdev == p->replacement)
6370 rdevp = &p->replacement;
6371 else
6372 return 0;
6373
6374 if (number >= conf->raid_disks &&
6375 conf->reshape_progress == MaxSector)
6376 clear_bit(In_sync, &rdev->flags);
6377
6378 if (test_bit(In_sync, &rdev->flags) ||
6379 atomic_read(&rdev->nr_pending)) {
6380 err = -EBUSY;
6381 goto abort;
6382 }
6383
6384
6385
6386 if (!test_bit(Faulty, &rdev->flags) &&
6387 mddev->recovery_disabled != conf->recovery_disabled &&
6388 !has_failed(conf) &&
6389 (!p->replacement || p->replacement == rdev) &&
6390 number < conf->raid_disks) {
6391 err = -EBUSY;
6392 goto abort;
6393 }
6394 *rdevp = NULL;
6395 synchronize_rcu();
6396 if (atomic_read(&rdev->nr_pending)) {
6397
6398 err = -EBUSY;
6399 *rdevp = rdev;
6400 } else if (p->replacement) {
6401
6402 p->rdev = p->replacement;
6403 clear_bit(Replacement, &p->replacement->flags);
6404 smp_mb();
6405
6406
6407 p->replacement = NULL;
6408 clear_bit(WantReplacement, &rdev->flags);
6409 } else
6410
6411
6412
6413 clear_bit(WantReplacement, &rdev->flags);
6414abort:
6415
6416 print_raid5_conf(conf);
6417 return err;
6418}
6419
6420static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
6421{
6422 struct r5conf *conf = mddev->private;
6423 int err = -EEXIST;
6424 int disk;
6425 struct disk_info *p;
6426 int first = 0;
6427 int last = conf->raid_disks - 1;
6428
6429 if (mddev->recovery_disabled == conf->recovery_disabled)
6430 return -EBUSY;
6431
6432 if (rdev->saved_raid_disk < 0 && has_failed(conf))
6433
6434 return -EINVAL;
6435
6436 if (rdev->raid_disk >= 0)
6437 first = last = rdev->raid_disk;
6438
6439
6440
6441
6442
6443 if (rdev->saved_raid_disk >= 0 &&
6444 rdev->saved_raid_disk >= first &&
6445 conf->disks[rdev->saved_raid_disk].rdev == NULL)
6446 first = rdev->saved_raid_disk;
6447
6448 for (disk = first; disk <= last; disk++) {
6449 p = conf->disks + disk;
6450 if (p->rdev == NULL) {
6451 clear_bit(In_sync, &rdev->flags);
6452 rdev->raid_disk = disk;
6453 err = 0;
6454 if (rdev->saved_raid_disk != disk)
6455 conf->fullsync = 1;
6456 rcu_assign_pointer(p->rdev, rdev);
6457 goto out;
6458 }
6459 }
6460 for (disk = first; disk <= last; disk++) {
6461 p = conf->disks + disk;
6462 if (test_bit(WantReplacement, &p->rdev->flags) &&
6463 p->replacement == NULL) {
6464 clear_bit(In_sync, &rdev->flags);
6465 set_bit(Replacement, &rdev->flags);
6466 rdev->raid_disk = disk;
6467 err = 0;
6468 conf->fullsync = 1;
6469 rcu_assign_pointer(p->replacement, rdev);
6470 break;
6471 }
6472 }
6473out:
6474 print_raid5_conf(conf);
6475 return err;
6476}
6477
6478static int raid5_resize(struct mddev *mddev, sector_t sectors)
6479{
6480
6481
6482
6483
6484
6485
6486
6487 sector_t newsize;
6488 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
6489 newsize = raid5_size(mddev, sectors, mddev->raid_disks);
6490 if (mddev->external_size &&
6491 mddev->array_sectors > newsize)
6492 return -EINVAL;
6493 if (mddev->bitmap) {
6494 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
6495 if (ret)
6496 return ret;
6497 }
6498 md_set_array_sectors(mddev, newsize);
6499 set_capacity(mddev->gendisk, mddev->array_sectors);
6500 revalidate_disk(mddev->gendisk);
6501 if (sectors > mddev->dev_sectors &&
6502 mddev->recovery_cp > mddev->dev_sectors) {
6503 mddev->recovery_cp = mddev->dev_sectors;
6504 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6505 }
6506 mddev->dev_sectors = sectors;
6507 mddev->resync_max_sectors = sectors;
6508 return 0;
6509}
6510
6511static int check_stripe_cache(struct mddev *mddev)
6512{
6513
6514
6515
6516
6517
6518
6519
6520
6521 struct r5conf *conf = mddev->private;
6522 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
6523 > conf->max_nr_stripes ||
6524 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
6525 > conf->max_nr_stripes) {
6526 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
6527 mdname(mddev),
6528 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
6529 / STRIPE_SIZE)*4);
6530 return 0;
6531 }
6532 return 1;
6533}
6534
6535static int check_reshape(struct mddev *mddev)
6536{
6537 struct r5conf *conf = mddev->private;
6538
6539 if (mddev->delta_disks == 0 &&
6540 mddev->new_layout == mddev->layout &&
6541 mddev->new_chunk_sectors == mddev->chunk_sectors)
6542 return 0;
6543 if (has_failed(conf))
6544 return -EINVAL;
6545 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) {
6546
6547
6548
6549
6550
6551 int min = 2;
6552 if (mddev->level == 6)
6553 min = 4;
6554 if (mddev->raid_disks + mddev->delta_disks < min)
6555 return -EINVAL;
6556 }
6557
6558 if (!check_stripe_cache(mddev))
6559 return -ENOSPC;
6560
6561 return resize_stripes(conf, (conf->previous_raid_disks
6562 + mddev->delta_disks));
6563}
6564
6565static int raid5_start_reshape(struct mddev *mddev)
6566{
6567 struct r5conf *conf = mddev->private;
6568 struct md_rdev *rdev;
6569 int spares = 0;
6570 unsigned long flags;
6571
6572 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6573 return -EBUSY;
6574
6575 if (!check_stripe_cache(mddev))
6576 return -ENOSPC;
6577
6578 if (has_failed(conf))
6579 return -EINVAL;
6580
6581 rdev_for_each(rdev, mddev) {
6582 if (!test_bit(In_sync, &rdev->flags)
6583 && !test_bit(Faulty, &rdev->flags))
6584 spares++;
6585 }
6586
6587 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
6588
6589
6590
6591 return -EINVAL;
6592
6593
6594
6595
6596
6597 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
6598 < mddev->array_sectors) {
6599 printk(KERN_ERR "md/raid:%s: array size must be reduced "
6600 "before number of disks\n", mdname(mddev));
6601 return -EINVAL;
6602 }
6603
6604 atomic_set(&conf->reshape_stripes, 0);
6605 spin_lock_irq(&conf->device_lock);
6606 write_seqcount_begin(&conf->gen_lock);
6607 conf->previous_raid_disks = conf->raid_disks;
6608 conf->raid_disks += mddev->delta_disks;
6609 conf->prev_chunk_sectors = conf->chunk_sectors;
6610 conf->chunk_sectors = mddev->new_chunk_sectors;
6611 conf->prev_algo = conf->algorithm;
6612 conf->algorithm = mddev->new_layout;
6613 conf->generation++;
6614
6615
6616
6617 smp_mb();
6618 if (mddev->reshape_backwards)
6619 conf->reshape_progress = raid5_size(mddev, 0, 0);
6620 else
6621 conf->reshape_progress = 0;
6622 conf->reshape_safe = conf->reshape_progress;
6623 write_seqcount_end(&conf->gen_lock);
6624 spin_unlock_irq(&conf->device_lock);
6625
6626
6627
6628
6629
6630 mddev_suspend(mddev);
6631 mddev_resume(mddev);
6632
6633
6634
6635
6636
6637
6638
6639
6640 if (mddev->delta_disks >= 0) {
6641 rdev_for_each(rdev, mddev)
6642 if (rdev->raid_disk < 0 &&
6643 !test_bit(Faulty, &rdev->flags)) {
6644 if (raid5_add_disk(mddev, rdev) == 0) {
6645 if (rdev->raid_disk
6646 >= conf->previous_raid_disks)
6647 set_bit(In_sync, &rdev->flags);
6648 else
6649 rdev->recovery_offset = 0;
6650
6651 if (sysfs_link_rdev(mddev, rdev))
6652 ;
6653 }
6654 } else if (rdev->raid_disk >= conf->previous_raid_disks
6655 && !test_bit(Faulty, &rdev->flags)) {
6656
6657 set_bit(In_sync, &rdev->flags);
6658 }
6659
6660
6661
6662
6663
6664 spin_lock_irqsave(&conf->device_lock, flags);
6665 mddev->degraded = calc_degraded(conf);
6666 spin_unlock_irqrestore(&conf->device_lock, flags);
6667 }
6668 mddev->raid_disks = conf->raid_disks;
6669 mddev->reshape_position = conf->reshape_progress;
6670 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6671
6672 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6673 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6674 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6675 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6676 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
6677 "reshape");
6678 if (!mddev->sync_thread) {
6679 mddev->recovery = 0;
6680 spin_lock_irq(&conf->device_lock);
6681 write_seqcount_begin(&conf->gen_lock);
6682 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
6683 mddev->new_chunk_sectors =
6684 conf->chunk_sectors = conf->prev_chunk_sectors;
6685 mddev->new_layout = conf->algorithm = conf->prev_algo;
6686 rdev_for_each(rdev, mddev)
6687 rdev->new_data_offset = rdev->data_offset;
6688 smp_wmb();
6689 conf->generation --;
6690 conf->reshape_progress = MaxSector;
6691 mddev->reshape_position = MaxSector;
6692 write_seqcount_end(&conf->gen_lock);
6693 spin_unlock_irq(&conf->device_lock);
6694 return -EAGAIN;
6695 }
6696 conf->reshape_checkpoint = jiffies;
6697 md_wakeup_thread(mddev->sync_thread);
6698 md_new_event(mddev);
6699 return 0;
6700}
6701
6702
6703
6704
6705static void end_reshape(struct r5conf *conf)
6706{
6707
6708 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
6709 struct md_rdev *rdev;
6710
6711 spin_lock_irq(&conf->device_lock);
6712 conf->previous_raid_disks = conf->raid_disks;
6713 rdev_for_each(rdev, conf->mddev)
6714 rdev->data_offset = rdev->new_data_offset;
6715 smp_wmb();
6716 conf->reshape_progress = MaxSector;
6717 spin_unlock_irq(&conf->device_lock);
6718 wake_up(&conf->wait_for_overlap);
6719
6720
6721
6722
6723 if (conf->mddev->queue) {
6724 int data_disks = conf->raid_disks - conf->max_degraded;
6725 int stripe = data_disks * ((conf->chunk_sectors << 9)
6726 / PAGE_SIZE);
6727 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
6728 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
6729 }
6730 }
6731}
6732
6733
6734
6735
6736static void raid5_finish_reshape(struct mddev *mddev)
6737{
6738 struct r5conf *conf = mddev->private;
6739
6740 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6741
6742 if (mddev->delta_disks > 0) {
6743 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
6744 set_capacity(mddev->gendisk, mddev->array_sectors);
6745 revalidate_disk(mddev->gendisk);
6746 } else {
6747 int d;
6748 spin_lock_irq(&conf->device_lock);
6749 mddev->degraded = calc_degraded(conf);
6750 spin_unlock_irq(&conf->device_lock);
6751 for (d = conf->raid_disks ;
6752 d < conf->raid_disks - mddev->delta_disks;
6753 d++) {
6754 struct md_rdev *rdev = conf->disks[d].rdev;
6755 if (rdev)
6756 clear_bit(In_sync, &rdev->flags);
6757 rdev = conf->disks[d].replacement;
6758 if (rdev)
6759 clear_bit(In_sync, &rdev->flags);
6760 }
6761 }
6762 mddev->layout = conf->algorithm;
6763 mddev->chunk_sectors = conf->chunk_sectors;
6764 mddev->reshape_position = MaxSector;
6765 mddev->delta_disks = 0;
6766 mddev->reshape_backwards = 0;
6767 }
6768}
6769
6770static void raid5_quiesce(struct mddev *mddev, int state)
6771{
6772 struct r5conf *conf = mddev->private;
6773
6774 switch(state) {
6775 case 2:
6776 wake_up(&conf->wait_for_overlap);
6777 break;
6778
6779 case 1:
6780 lock_all_device_hash_locks_irq(conf);
6781
6782
6783
6784 conf->quiesce = 2;
6785 wait_event_cmd(conf->wait_for_stripe,
6786 atomic_read(&conf->active_stripes) == 0 &&
6787 atomic_read(&conf->active_aligned_reads) == 0,
6788 unlock_all_device_hash_locks_irq(conf),
6789 lock_all_device_hash_locks_irq(conf));
6790 conf->quiesce = 1;
6791 unlock_all_device_hash_locks_irq(conf);
6792
6793 wake_up(&conf->wait_for_overlap);
6794 break;
6795
6796 case 0:
6797 lock_all_device_hash_locks_irq(conf);
6798 conf->quiesce = 0;
6799 wake_up(&conf->wait_for_stripe);
6800 wake_up(&conf->wait_for_overlap);
6801 unlock_all_device_hash_locks_irq(conf);
6802 break;
6803 }
6804}
6805
6806static void *raid45_takeover_raid0(struct mddev *mddev, int level)
6807{
6808 struct r0conf *raid0_conf = mddev->private;
6809 sector_t sectors;
6810
6811
6812 if (raid0_conf->nr_strip_zones > 1) {
6813 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
6814 mdname(mddev));
6815 return ERR_PTR(-EINVAL);
6816 }
6817
6818 sectors = raid0_conf->strip_zone[0].zone_end;
6819 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
6820 mddev->dev_sectors = sectors;
6821 mddev->new_level = level;
6822 mddev->new_layout = ALGORITHM_PARITY_N;
6823 mddev->new_chunk_sectors = mddev->chunk_sectors;
6824 mddev->raid_disks += 1;
6825 mddev->delta_disks = 1;
6826
6827 mddev->recovery_cp = MaxSector;
6828
6829 return setup_conf(mddev);
6830}
6831
6832static void *raid5_takeover_raid1(struct mddev *mddev)
6833{
6834 int chunksect;
6835
6836 if (mddev->raid_disks != 2 ||
6837 mddev->degraded > 1)
6838 return ERR_PTR(-EINVAL);
6839
6840
6841
6842 chunksect = 64*2;
6843
6844
6845 while (chunksect && (mddev->array_sectors & (chunksect-1)))
6846 chunksect >>= 1;
6847
6848 if ((chunksect<<9) < STRIPE_SIZE)
6849
6850 return ERR_PTR(-EINVAL);
6851
6852 mddev->new_level = 5;
6853 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
6854 mddev->new_chunk_sectors = chunksect;
6855
6856 return setup_conf(mddev);
6857}
6858
6859static void *raid5_takeover_raid6(struct mddev *mddev)
6860{
6861 int new_layout;
6862
6863 switch (mddev->layout) {
6864 case ALGORITHM_LEFT_ASYMMETRIC_6:
6865 new_layout = ALGORITHM_LEFT_ASYMMETRIC;
6866 break;
6867 case ALGORITHM_RIGHT_ASYMMETRIC_6:
6868 new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
6869 break;
6870 case ALGORITHM_LEFT_SYMMETRIC_6:
6871 new_layout = ALGORITHM_LEFT_SYMMETRIC;
6872 break;
6873 case ALGORITHM_RIGHT_SYMMETRIC_6:
6874 new_layout = ALGORITHM_RIGHT_SYMMETRIC;
6875 break;
6876 case ALGORITHM_PARITY_0_6:
6877 new_layout = ALGORITHM_PARITY_0;
6878 break;
6879 case ALGORITHM_PARITY_N:
6880 new_layout = ALGORITHM_PARITY_N;
6881 break;
6882 default:
6883 return ERR_PTR(-EINVAL);
6884 }
6885 mddev->new_level = 5;
6886 mddev->new_layout = new_layout;
6887 mddev->delta_disks = -1;
6888 mddev->raid_disks -= 1;
6889 return setup_conf(mddev);
6890}
6891
6892static int raid5_check_reshape(struct mddev *mddev)
6893{
6894
6895
6896
6897
6898
6899 struct r5conf *conf = mddev->private;
6900 int new_chunk = mddev->new_chunk_sectors;
6901
6902 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
6903 return -EINVAL;
6904 if (new_chunk > 0) {
6905 if (!is_power_of_2(new_chunk))
6906 return -EINVAL;
6907 if (new_chunk < (PAGE_SIZE>>9))
6908 return -EINVAL;
6909 if (mddev->array_sectors & (new_chunk-1))
6910
6911 return -EINVAL;
6912 }
6913
6914
6915
6916 if (mddev->raid_disks == 2) {
6917
6918 if (mddev->new_layout >= 0) {
6919 conf->algorithm = mddev->new_layout;
6920 mddev->layout = mddev->new_layout;
6921 }
6922 if (new_chunk > 0) {
6923 conf->chunk_sectors = new_chunk ;
6924 mddev->chunk_sectors = new_chunk;
6925 }
6926 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6927 md_wakeup_thread(mddev->thread);
6928 }
6929 return check_reshape(mddev);
6930}
6931
6932static int raid6_check_reshape(struct mddev *mddev)
6933{
6934 int new_chunk = mddev->new_chunk_sectors;
6935
6936 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
6937 return -EINVAL;
6938 if (new_chunk > 0) {
6939 if (!is_power_of_2(new_chunk))
6940 return -EINVAL;
6941 if (new_chunk < (PAGE_SIZE >> 9))
6942 return -EINVAL;
6943 if (mddev->array_sectors & (new_chunk-1))
6944
6945 return -EINVAL;
6946 }
6947
6948
6949 return check_reshape(mddev);
6950}
6951
6952static void *raid5_takeover(struct mddev *mddev)
6953{
6954
6955
6956
6957
6958
6959
6960 if (mddev->level == 0)
6961 return raid45_takeover_raid0(mddev, 5);
6962 if (mddev->level == 1)
6963 return raid5_takeover_raid1(mddev);
6964 if (mddev->level == 4) {
6965 mddev->new_layout = ALGORITHM_PARITY_N;
6966 mddev->new_level = 5;
6967 return setup_conf(mddev);
6968 }
6969 if (mddev->level == 6)
6970 return raid5_takeover_raid6(mddev);
6971
6972 return ERR_PTR(-EINVAL);
6973}
6974
6975static void *raid4_takeover(struct mddev *mddev)
6976{
6977
6978
6979
6980
6981 if (mddev->level == 0)
6982 return raid45_takeover_raid0(mddev, 4);
6983 if (mddev->level == 5 &&
6984 mddev->layout == ALGORITHM_PARITY_N) {
6985 mddev->new_layout = 0;
6986 mddev->new_level = 4;
6987 return setup_conf(mddev);
6988 }
6989 return ERR_PTR(-EINVAL);
6990}
6991
6992static struct md_personality raid5_personality;
6993
6994static void *raid6_takeover(struct mddev *mddev)
6995{
6996
6997
6998
6999
7000 int new_layout;
7001
7002 if (mddev->pers != &raid5_personality)
7003 return ERR_PTR(-EINVAL);
7004 if (mddev->degraded > 1)
7005 return ERR_PTR(-EINVAL);
7006 if (mddev->raid_disks > 253)
7007 return ERR_PTR(-EINVAL);
7008 if (mddev->raid_disks < 3)
7009 return ERR_PTR(-EINVAL);
7010
7011 switch (mddev->layout) {
7012 case ALGORITHM_LEFT_ASYMMETRIC:
7013 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
7014 break;
7015 case ALGORITHM_RIGHT_ASYMMETRIC:
7016 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
7017 break;
7018 case ALGORITHM_LEFT_SYMMETRIC:
7019 new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
7020 break;
7021 case ALGORITHM_RIGHT_SYMMETRIC:
7022 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
7023 break;
7024 case ALGORITHM_PARITY_0:
7025 new_layout = ALGORITHM_PARITY_0_6;
7026 break;
7027 case ALGORITHM_PARITY_N:
7028 new_layout = ALGORITHM_PARITY_N;
7029 break;
7030 default:
7031 return ERR_PTR(-EINVAL);
7032 }
7033 mddev->new_level = 6;
7034 mddev->new_layout = new_layout;
7035 mddev->delta_disks = 1;
7036 mddev->raid_disks += 1;
7037 return setup_conf(mddev);
7038}
7039
7040static struct md_personality raid6_personality =
7041{
7042 .name = "raid6",
7043 .level = 6,
7044 .owner = THIS_MODULE,
7045 .make_request = make_request,
7046 .run = run,
7047 .stop = stop,
7048 .status = status,
7049 .error_handler = error,
7050 .hot_add_disk = raid5_add_disk,
7051 .hot_remove_disk= raid5_remove_disk,
7052 .spare_active = raid5_spare_active,
7053 .sync_request = sync_request,
7054 .resize = raid5_resize,
7055 .size = raid5_size,
7056 .check_reshape = raid6_check_reshape,
7057 .start_reshape = raid5_start_reshape,
7058 .finish_reshape = raid5_finish_reshape,
7059 .quiesce = raid5_quiesce,
7060 .takeover = raid6_takeover,
7061};
7062static struct md_personality raid5_personality =
7063{
7064 .name = "raid5",
7065 .level = 5,
7066 .owner = THIS_MODULE,
7067 .make_request = make_request,
7068 .run = run,
7069 .stop = stop,
7070 .status = status,
7071 .error_handler = error,
7072 .hot_add_disk = raid5_add_disk,
7073 .hot_remove_disk= raid5_remove_disk,
7074 .spare_active = raid5_spare_active,
7075 .sync_request = sync_request,
7076 .resize = raid5_resize,
7077 .size = raid5_size,
7078 .check_reshape = raid5_check_reshape,
7079 .start_reshape = raid5_start_reshape,
7080 .finish_reshape = raid5_finish_reshape,
7081 .quiesce = raid5_quiesce,
7082 .takeover = raid5_takeover,
7083};
7084
7085static struct md_personality raid4_personality =
7086{
7087 .name = "raid4",
7088 .level = 4,
7089 .owner = THIS_MODULE,
7090 .make_request = make_request,
7091 .run = run,
7092 .stop = stop,
7093 .status = status,
7094 .error_handler = error,
7095 .hot_add_disk = raid5_add_disk,
7096 .hot_remove_disk= raid5_remove_disk,
7097 .spare_active = raid5_spare_active,
7098 .sync_request = sync_request,
7099 .resize = raid5_resize,
7100 .size = raid5_size,
7101 .check_reshape = raid5_check_reshape,
7102 .start_reshape = raid5_start_reshape,
7103 .finish_reshape = raid5_finish_reshape,
7104 .quiesce = raid5_quiesce,
7105 .takeover = raid4_takeover,
7106};
7107
7108static int __init raid5_init(void)
7109{
7110 raid5_wq = alloc_workqueue("raid5wq",
7111 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
7112 if (!raid5_wq)
7113 return -ENOMEM;
7114 register_md_personality(&raid6_personality);
7115 register_md_personality(&raid5_personality);
7116 register_md_personality(&raid4_personality);
7117 return 0;
7118}
7119
7120static void raid5_exit(void)
7121{
7122 unregister_md_personality(&raid6_personality);
7123 unregister_md_personality(&raid5_personality);
7124 unregister_md_personality(&raid4_personality);
7125 destroy_workqueue(raid5_wq);
7126}
7127
7128module_init(raid5_init);
7129module_exit(raid5_exit);
7130MODULE_LICENSE("GPL");
7131MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
7132MODULE_ALIAS("md-personality-4");
7133MODULE_ALIAS("md-raid5");
7134MODULE_ALIAS("md-raid4");
7135MODULE_ALIAS("md-level-5");
7136MODULE_ALIAS("md-level-4");
7137MODULE_ALIAS("md-personality-8");
7138MODULE_ALIAS("md-raid6");
7139MODULE_ALIAS("md-level-6");
7140
7141
7142MODULE_ALIAS("raid5");
7143MODULE_ALIAS("raid6");
7144