1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/sched.h>
20#include <linux/wait.h>
21#include <linux/bio.h>
22#include <linux/slab.h>
23#include <linux/buffer_head.h>
24#include <linux/blkdev.h>
25#include <linux/random.h>
26#include <linux/iocontext.h>
27#include <linux/capability.h>
28#include <linux/ratelimit.h>
29#include <linux/kthread.h>
30#include <linux/raid/pq.h>
31#include <linux/hash.h>
32#include <linux/list_sort.h>
33#include <linux/raid/xor.h>
34#include <linux/vmalloc.h>
35#include <asm/div64.h>
36#include "ctree.h"
37#include "extent_map.h"
38#include "disk-io.h"
39#include "transaction.h"
40#include "print-tree.h"
41#include "volumes.h"
42#include "raid56.h"
43#include "async-thread.h"
44#include "check-integrity.h"
45#include "rcu-string.h"
46
47
48#define RBIO_RMW_LOCKED_BIT 1
49
50
51
52
53
54#define RBIO_CACHE_BIT 2
55
56
57
58
59#define RBIO_CACHE_READY_BIT 3
60
61#define RBIO_CACHE_SIZE 1024
62
63enum btrfs_rbio_ops {
64 BTRFS_RBIO_WRITE,
65 BTRFS_RBIO_READ_REBUILD,
66 BTRFS_RBIO_PARITY_SCRUB,
67 BTRFS_RBIO_REBUILD_MISSING,
68};
69
70struct btrfs_raid_bio {
71 struct btrfs_fs_info *fs_info;
72 struct btrfs_bio *bbio;
73
74
75
76
77
78
79 struct list_head hash_list;
80
81
82
83
84 struct list_head stripe_cache;
85
86
87
88
89 struct btrfs_work work;
90
91
92
93
94
95
96 struct bio_list bio_list;
97 spinlock_t bio_list_lock;
98
99
100
101
102
103
104
105 struct list_head plug_list;
106
107
108
109
110
111 unsigned long flags;
112
113
114 int stripe_len;
115
116
117 int nr_data;
118
119 int real_stripes;
120
121 int stripe_npages;
122
123
124
125
126
127
128 enum btrfs_rbio_ops operation;
129
130
131 int faila;
132
133
134 int failb;
135
136 int scrubp;
137
138
139
140
141 int nr_pages;
142
143
144
145
146
147
148 int bio_list_bytes;
149
150 int generic_bio_cnt;
151
152 atomic_t refs;
153
154 atomic_t stripes_pending;
155
156 atomic_t error;
157
158
159
160
161
162
163
164
165
166 struct page **stripe_pages;
167
168
169
170
171
172 struct page **bio_pages;
173
174
175
176
177 unsigned long *dbitmap;
178};
179
180static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
181static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
182static void rmw_work(struct btrfs_work *work);
183static void read_rebuild_work(struct btrfs_work *work);
184static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
185static void async_read_rebuild(struct btrfs_raid_bio *rbio);
186static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
187static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
188static void __free_raid_bio(struct btrfs_raid_bio *rbio);
189static void index_rbio_pages(struct btrfs_raid_bio *rbio);
190static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
191
192static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
193 int need_check);
194static void async_scrub_parity(struct btrfs_raid_bio *rbio);
195
196
197
198
199
200int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
201{
202 struct btrfs_stripe_hash_table *table;
203 struct btrfs_stripe_hash_table *x;
204 struct btrfs_stripe_hash *cur;
205 struct btrfs_stripe_hash *h;
206 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
207 int i;
208 int table_size;
209
210 if (info->stripe_hash_table)
211 return 0;
212
213
214
215
216
217
218
219
220 table_size = sizeof(*table) + sizeof(*h) * num_entries;
221 table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
222 if (!table) {
223 table = vzalloc(table_size);
224 if (!table)
225 return -ENOMEM;
226 }
227
228 spin_lock_init(&table->cache_lock);
229 INIT_LIST_HEAD(&table->stripe_cache);
230
231 h = table->table;
232
233 for (i = 0; i < num_entries; i++) {
234 cur = h + i;
235 INIT_LIST_HEAD(&cur->hash_list);
236 spin_lock_init(&cur->lock);
237 init_waitqueue_head(&cur->wait);
238 }
239
240 x = cmpxchg(&info->stripe_hash_table, NULL, table);
241 if (x)
242 kvfree(x);
243 return 0;
244}
245
246
247
248
249
250
251
252
253
254
255static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
256{
257 int i;
258 char *s;
259 char *d;
260 int ret;
261
262 ret = alloc_rbio_pages(rbio);
263 if (ret)
264 return;
265
266 for (i = 0; i < rbio->nr_pages; i++) {
267 if (!rbio->bio_pages[i])
268 continue;
269
270 s = kmap(rbio->bio_pages[i]);
271 d = kmap(rbio->stripe_pages[i]);
272
273 memcpy(d, s, PAGE_SIZE);
274
275 kunmap(rbio->bio_pages[i]);
276 kunmap(rbio->stripe_pages[i]);
277 SetPageUptodate(rbio->stripe_pages[i]);
278 }
279 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
280}
281
282
283
284
285static int rbio_bucket(struct btrfs_raid_bio *rbio)
286{
287 u64 num = rbio->bbio->raid_map[0];
288
289
290
291
292
293
294
295
296
297 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
298}
299
300
301
302
303
304static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
305{
306 int i;
307 struct page *s;
308 struct page *d;
309
310 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
311 return;
312
313 for (i = 0; i < dest->nr_pages; i++) {
314 s = src->stripe_pages[i];
315 if (!s || !PageUptodate(s)) {
316 continue;
317 }
318
319 d = dest->stripe_pages[i];
320 if (d)
321 __free_page(d);
322
323 dest->stripe_pages[i] = s;
324 src->stripe_pages[i] = NULL;
325 }
326}
327
328
329
330
331
332
333
334
335static void merge_rbio(struct btrfs_raid_bio *dest,
336 struct btrfs_raid_bio *victim)
337{
338 bio_list_merge(&dest->bio_list, &victim->bio_list);
339 dest->bio_list_bytes += victim->bio_list_bytes;
340 dest->generic_bio_cnt += victim->generic_bio_cnt;
341 bio_list_init(&victim->bio_list);
342}
343
344
345
346
347
348static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
349{
350 int bucket = rbio_bucket(rbio);
351 struct btrfs_stripe_hash_table *table;
352 struct btrfs_stripe_hash *h;
353 int freeit = 0;
354
355
356
357
358 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
359 return;
360
361 table = rbio->fs_info->stripe_hash_table;
362 h = table->table + bucket;
363
364
365
366
367 spin_lock(&h->lock);
368
369
370
371
372
373 spin_lock(&rbio->bio_list_lock);
374
375 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
376 list_del_init(&rbio->stripe_cache);
377 table->cache_size -= 1;
378 freeit = 1;
379
380
381
382
383
384
385
386
387
388
389 if (bio_list_empty(&rbio->bio_list)) {
390 if (!list_empty(&rbio->hash_list)) {
391 list_del_init(&rbio->hash_list);
392 atomic_dec(&rbio->refs);
393 BUG_ON(!list_empty(&rbio->plug_list));
394 }
395 }
396 }
397
398 spin_unlock(&rbio->bio_list_lock);
399 spin_unlock(&h->lock);
400
401 if (freeit)
402 __free_raid_bio(rbio);
403}
404
405
406
407
408static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
409{
410 struct btrfs_stripe_hash_table *table;
411 unsigned long flags;
412
413 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
414 return;
415
416 table = rbio->fs_info->stripe_hash_table;
417
418 spin_lock_irqsave(&table->cache_lock, flags);
419 __remove_rbio_from_cache(rbio);
420 spin_unlock_irqrestore(&table->cache_lock, flags);
421}
422
423
424
425
426static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
427{
428 struct btrfs_stripe_hash_table *table;
429 unsigned long flags;
430 struct btrfs_raid_bio *rbio;
431
432 table = info->stripe_hash_table;
433
434 spin_lock_irqsave(&table->cache_lock, flags);
435 while (!list_empty(&table->stripe_cache)) {
436 rbio = list_entry(table->stripe_cache.next,
437 struct btrfs_raid_bio,
438 stripe_cache);
439 __remove_rbio_from_cache(rbio);
440 }
441 spin_unlock_irqrestore(&table->cache_lock, flags);
442}
443
444
445
446
447
448void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
449{
450 if (!info->stripe_hash_table)
451 return;
452 btrfs_clear_rbio_cache(info);
453 kvfree(info->stripe_hash_table);
454 info->stripe_hash_table = NULL;
455}
456
457
458
459
460
461
462
463
464
465
466
467
468static void cache_rbio(struct btrfs_raid_bio *rbio)
469{
470 struct btrfs_stripe_hash_table *table;
471 unsigned long flags;
472
473 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
474 return;
475
476 table = rbio->fs_info->stripe_hash_table;
477
478 spin_lock_irqsave(&table->cache_lock, flags);
479 spin_lock(&rbio->bio_list_lock);
480
481
482 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
483 atomic_inc(&rbio->refs);
484
485 if (!list_empty(&rbio->stripe_cache)){
486 list_move(&rbio->stripe_cache, &table->stripe_cache);
487 } else {
488 list_add(&rbio->stripe_cache, &table->stripe_cache);
489 table->cache_size += 1;
490 }
491
492 spin_unlock(&rbio->bio_list_lock);
493
494 if (table->cache_size > RBIO_CACHE_SIZE) {
495 struct btrfs_raid_bio *found;
496
497 found = list_entry(table->stripe_cache.prev,
498 struct btrfs_raid_bio,
499 stripe_cache);
500
501 if (found != rbio)
502 __remove_rbio_from_cache(found);
503 }
504
505 spin_unlock_irqrestore(&table->cache_lock, flags);
506}
507
508
509
510
511
512
513static void run_xor(void **pages, int src_cnt, ssize_t len)
514{
515 int src_off = 0;
516 int xor_src_cnt = 0;
517 void *dest = pages[src_cnt];
518
519 while(src_cnt > 0) {
520 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
521 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
522
523 src_cnt -= xor_src_cnt;
524 src_off += xor_src_cnt;
525 }
526}
527
528
529
530
531
532
533
534
535static int __rbio_is_full(struct btrfs_raid_bio *rbio)
536{
537 unsigned long size = rbio->bio_list_bytes;
538 int ret = 1;
539
540 if (size != rbio->nr_data * rbio->stripe_len)
541 ret = 0;
542
543 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
544 return ret;
545}
546
547static int rbio_is_full(struct btrfs_raid_bio *rbio)
548{
549 unsigned long flags;
550 int ret;
551
552 spin_lock_irqsave(&rbio->bio_list_lock, flags);
553 ret = __rbio_is_full(rbio);
554 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
555 return ret;
556}
557
558
559
560
561
562
563
564
565
566
567
568static int rbio_can_merge(struct btrfs_raid_bio *last,
569 struct btrfs_raid_bio *cur)
570{
571 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
572 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
573 return 0;
574
575
576
577
578
579
580
581
582 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
583 test_bit(RBIO_CACHE_BIT, &cur->flags))
584 return 0;
585
586 if (last->bbio->raid_map[0] !=
587 cur->bbio->raid_map[0])
588 return 0;
589
590
591 if (last->operation != cur->operation)
592 return 0;
593
594
595
596
597
598
599
600
601 if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
602 cur->operation == BTRFS_RBIO_PARITY_SCRUB)
603 return 0;
604
605 if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
606 cur->operation == BTRFS_RBIO_REBUILD_MISSING)
607 return 0;
608
609 return 1;
610}
611
612static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
613 int index)
614{
615 return stripe * rbio->stripe_npages + index;
616}
617
618
619
620
621
622static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
623 int index)
624{
625 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
626}
627
628
629
630
631static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
632{
633 return rbio_stripe_page(rbio, rbio->nr_data, index);
634}
635
636
637
638
639
640static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
641{
642 if (rbio->nr_data + 1 == rbio->real_stripes)
643 return NULL;
644 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
645}
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
670{
671 int bucket = rbio_bucket(rbio);
672 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
673 struct btrfs_raid_bio *cur;
674 struct btrfs_raid_bio *pending;
675 unsigned long flags;
676 DEFINE_WAIT(wait);
677 struct btrfs_raid_bio *freeit = NULL;
678 struct btrfs_raid_bio *cache_drop = NULL;
679 int ret = 0;
680 int walk = 0;
681
682 spin_lock_irqsave(&h->lock, flags);
683 list_for_each_entry(cur, &h->hash_list, hash_list) {
684 walk++;
685 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
686 spin_lock(&cur->bio_list_lock);
687
688
689 if (bio_list_empty(&cur->bio_list) &&
690 list_empty(&cur->plug_list) &&
691 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
692 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
693 list_del_init(&cur->hash_list);
694 atomic_dec(&cur->refs);
695
696 steal_rbio(cur, rbio);
697 cache_drop = cur;
698 spin_unlock(&cur->bio_list_lock);
699
700 goto lockit;
701 }
702
703
704 if (rbio_can_merge(cur, rbio)) {
705 merge_rbio(cur, rbio);
706 spin_unlock(&cur->bio_list_lock);
707 freeit = rbio;
708 ret = 1;
709 goto out;
710 }
711
712
713
714
715
716
717
718
719
720
721 list_for_each_entry(pending, &cur->plug_list,
722 plug_list) {
723 if (rbio_can_merge(pending, rbio)) {
724 merge_rbio(pending, rbio);
725 spin_unlock(&cur->bio_list_lock);
726 freeit = rbio;
727 ret = 1;
728 goto out;
729 }
730 }
731
732
733
734
735
736 list_add_tail(&rbio->plug_list, &cur->plug_list);
737 spin_unlock(&cur->bio_list_lock);
738 ret = 1;
739 goto out;
740 }
741 }
742lockit:
743 atomic_inc(&rbio->refs);
744 list_add(&rbio->hash_list, &h->hash_list);
745out:
746 spin_unlock_irqrestore(&h->lock, flags);
747 if (cache_drop)
748 remove_rbio_from_cache(cache_drop);
749 if (freeit)
750 __free_raid_bio(freeit);
751 return ret;
752}
753
754
755
756
757
758static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
759{
760 int bucket;
761 struct btrfs_stripe_hash *h;
762 unsigned long flags;
763 int keep_cache = 0;
764
765 bucket = rbio_bucket(rbio);
766 h = rbio->fs_info->stripe_hash_table->table + bucket;
767
768 if (list_empty(&rbio->plug_list))
769 cache_rbio(rbio);
770
771 spin_lock_irqsave(&h->lock, flags);
772 spin_lock(&rbio->bio_list_lock);
773
774 if (!list_empty(&rbio->hash_list)) {
775
776
777
778
779
780 if (list_empty(&rbio->plug_list) &&
781 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
782 keep_cache = 1;
783 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
784 BUG_ON(!bio_list_empty(&rbio->bio_list));
785 goto done;
786 }
787
788 list_del_init(&rbio->hash_list);
789 atomic_dec(&rbio->refs);
790
791
792
793
794
795
796 if (!list_empty(&rbio->plug_list)) {
797 struct btrfs_raid_bio *next;
798 struct list_head *head = rbio->plug_list.next;
799
800 next = list_entry(head, struct btrfs_raid_bio,
801 plug_list);
802
803 list_del_init(&rbio->plug_list);
804
805 list_add(&next->hash_list, &h->hash_list);
806 atomic_inc(&next->refs);
807 spin_unlock(&rbio->bio_list_lock);
808 spin_unlock_irqrestore(&h->lock, flags);
809
810 if (next->operation == BTRFS_RBIO_READ_REBUILD)
811 async_read_rebuild(next);
812 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
813 steal_rbio(rbio, next);
814 async_read_rebuild(next);
815 } else if (next->operation == BTRFS_RBIO_WRITE) {
816 steal_rbio(rbio, next);
817 async_rmw_stripe(next);
818 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
819 steal_rbio(rbio, next);
820 async_scrub_parity(next);
821 }
822
823 goto done_nolock;
824
825
826
827
828 } else if (waitqueue_active(&h->wait)) {
829 spin_unlock(&rbio->bio_list_lock);
830 spin_unlock_irqrestore(&h->lock, flags);
831 wake_up(&h->wait);
832 goto done_nolock;
833 }
834 }
835done:
836 spin_unlock(&rbio->bio_list_lock);
837 spin_unlock_irqrestore(&h->lock, flags);
838
839done_nolock:
840 if (!keep_cache)
841 remove_rbio_from_cache(rbio);
842}
843
844static void __free_raid_bio(struct btrfs_raid_bio *rbio)
845{
846 int i;
847
848 WARN_ON(atomic_read(&rbio->refs) < 0);
849 if (!atomic_dec_and_test(&rbio->refs))
850 return;
851
852 WARN_ON(!list_empty(&rbio->stripe_cache));
853 WARN_ON(!list_empty(&rbio->hash_list));
854 WARN_ON(!bio_list_empty(&rbio->bio_list));
855
856 for (i = 0; i < rbio->nr_pages; i++) {
857 if (rbio->stripe_pages[i]) {
858 __free_page(rbio->stripe_pages[i]);
859 rbio->stripe_pages[i] = NULL;
860 }
861 }
862
863 btrfs_put_bbio(rbio->bbio);
864 kfree(rbio);
865}
866
867static void free_raid_bio(struct btrfs_raid_bio *rbio)
868{
869 unlock_stripe(rbio);
870 __free_raid_bio(rbio);
871}
872
873
874
875
876
877static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
878{
879 struct bio *cur = bio_list_get(&rbio->bio_list);
880 struct bio *next;
881
882 if (rbio->generic_bio_cnt)
883 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
884
885 free_raid_bio(rbio);
886
887 while (cur) {
888 next = cur->bi_next;
889 cur->bi_next = NULL;
890 cur->bi_error = err;
891 bio_endio(cur);
892 cur = next;
893 }
894}
895
896
897
898
899
900static void raid_write_end_io(struct bio *bio)
901{
902 struct btrfs_raid_bio *rbio = bio->bi_private;
903 int err = bio->bi_error;
904 int max_errors;
905
906 if (err)
907 fail_bio_stripe(rbio, bio);
908
909 bio_put(bio);
910
911 if (!atomic_dec_and_test(&rbio->stripes_pending))
912 return;
913
914 err = 0;
915
916
917 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
918 0 : rbio->bbio->max_errors;
919 if (atomic_read(&rbio->error) > max_errors)
920 err = -EIO;
921
922 rbio_orig_end_io(rbio, err);
923}
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
942 int index, int pagenr, int bio_list_only)
943{
944 int chunk_page;
945 struct page *p = NULL;
946
947 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
948
949 spin_lock_irq(&rbio->bio_list_lock);
950 p = rbio->bio_pages[chunk_page];
951 spin_unlock_irq(&rbio->bio_list_lock);
952
953 if (p || bio_list_only)
954 return p;
955
956 return rbio->stripe_pages[chunk_page];
957}
958
959
960
961
962
963static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
964{
965 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
966}
967
968
969
970
971
972static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
973 struct btrfs_bio *bbio, u64 stripe_len)
974{
975 struct btrfs_raid_bio *rbio;
976 int nr_data = 0;
977 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
978 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
979 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
980 void *p;
981
982 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
983 DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
984 sizeof(long), GFP_NOFS);
985 if (!rbio)
986 return ERR_PTR(-ENOMEM);
987
988 bio_list_init(&rbio->bio_list);
989 INIT_LIST_HEAD(&rbio->plug_list);
990 spin_lock_init(&rbio->bio_list_lock);
991 INIT_LIST_HEAD(&rbio->stripe_cache);
992 INIT_LIST_HEAD(&rbio->hash_list);
993 rbio->bbio = bbio;
994 rbio->fs_info = root->fs_info;
995 rbio->stripe_len = stripe_len;
996 rbio->nr_pages = num_pages;
997 rbio->real_stripes = real_stripes;
998 rbio->stripe_npages = stripe_npages;
999 rbio->faila = -1;
1000 rbio->failb = -1;
1001 atomic_set(&rbio->refs, 1);
1002 atomic_set(&rbio->error, 0);
1003 atomic_set(&rbio->stripes_pending, 0);
1004
1005
1006
1007
1008
1009 p = rbio + 1;
1010 rbio->stripe_pages = p;
1011 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1012 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
1013
1014 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1015 nr_data = real_stripes - 1;
1016 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1017 nr_data = real_stripes - 2;
1018 else
1019 BUG();
1020
1021 rbio->nr_data = nr_data;
1022 return rbio;
1023}
1024
1025
1026static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1027{
1028 int i;
1029 struct page *page;
1030
1031 for (i = 0; i < rbio->nr_pages; i++) {
1032 if (rbio->stripe_pages[i])
1033 continue;
1034 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1035 if (!page)
1036 return -ENOMEM;
1037 rbio->stripe_pages[i] = page;
1038 }
1039 return 0;
1040}
1041
1042
1043static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1044{
1045 int i;
1046 struct page *page;
1047
1048 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1049
1050 for (; i < rbio->nr_pages; i++) {
1051 if (rbio->stripe_pages[i])
1052 continue;
1053 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1054 if (!page)
1055 return -ENOMEM;
1056 rbio->stripe_pages[i] = page;
1057 }
1058 return 0;
1059}
1060
1061
1062
1063
1064
1065
1066static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1067 struct bio_list *bio_list,
1068 struct page *page,
1069 int stripe_nr,
1070 unsigned long page_index,
1071 unsigned long bio_max_len)
1072{
1073 struct bio *last = bio_list->tail;
1074 u64 last_end = 0;
1075 int ret;
1076 struct bio *bio;
1077 struct btrfs_bio_stripe *stripe;
1078 u64 disk_start;
1079
1080 stripe = &rbio->bbio->stripes[stripe_nr];
1081 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1082
1083
1084 if (!stripe->dev->bdev)
1085 return fail_rbio_index(rbio, stripe_nr);
1086
1087
1088 if (last) {
1089 last_end = (u64)last->bi_iter.bi_sector << 9;
1090 last_end += last->bi_iter.bi_size;
1091
1092
1093
1094
1095
1096 if (last_end == disk_start && stripe->dev->bdev &&
1097 !last->bi_error &&
1098 last->bi_bdev == stripe->dev->bdev) {
1099 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1100 if (ret == PAGE_SIZE)
1101 return 0;
1102 }
1103 }
1104
1105
1106 bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
1107 if (!bio)
1108 return -ENOMEM;
1109
1110 bio->bi_iter.bi_size = 0;
1111 bio->bi_bdev = stripe->dev->bdev;
1112 bio->bi_iter.bi_sector = disk_start >> 9;
1113
1114 bio_add_page(bio, page, PAGE_SIZE, 0);
1115 bio_list_add(bio_list, bio);
1116 return 0;
1117}
1118
1119
1120
1121
1122
1123
1124
1125
1126static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1127{
1128 if (rbio->faila >= 0 || rbio->failb >= 0) {
1129 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1130 __raid56_parity_recover(rbio);
1131 } else {
1132 finish_rmw(rbio);
1133 }
1134}
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1145{
1146 struct bio *bio;
1147 u64 start;
1148 unsigned long stripe_offset;
1149 unsigned long page_index;
1150 struct page *p;
1151 int i;
1152
1153 spin_lock_irq(&rbio->bio_list_lock);
1154 bio_list_for_each(bio, &rbio->bio_list) {
1155 start = (u64)bio->bi_iter.bi_sector << 9;
1156 stripe_offset = start - rbio->bbio->raid_map[0];
1157 page_index = stripe_offset >> PAGE_SHIFT;
1158
1159 for (i = 0; i < bio->bi_vcnt; i++) {
1160 p = bio->bi_io_vec[i].bv_page;
1161 rbio->bio_pages[page_index + i] = p;
1162 }
1163 }
1164 spin_unlock_irq(&rbio->bio_list_lock);
1165}
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1176{
1177 struct btrfs_bio *bbio = rbio->bbio;
1178 void *pointers[rbio->real_stripes];
1179 int nr_data = rbio->nr_data;
1180 int stripe;
1181 int pagenr;
1182 int p_stripe = -1;
1183 int q_stripe = -1;
1184 struct bio_list bio_list;
1185 struct bio *bio;
1186 int ret;
1187
1188 bio_list_init(&bio_list);
1189
1190 if (rbio->real_stripes - rbio->nr_data == 1) {
1191 p_stripe = rbio->real_stripes - 1;
1192 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1193 p_stripe = rbio->real_stripes - 2;
1194 q_stripe = rbio->real_stripes - 1;
1195 } else {
1196 BUG();
1197 }
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 spin_lock_irq(&rbio->bio_list_lock);
1208 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1209 spin_unlock_irq(&rbio->bio_list_lock);
1210
1211 atomic_set(&rbio->error, 0);
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222 index_rbio_pages(rbio);
1223 if (!rbio_is_full(rbio))
1224 cache_rbio_pages(rbio);
1225 else
1226 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1227
1228 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1229 struct page *p;
1230
1231 for (stripe = 0; stripe < nr_data; stripe++) {
1232 p = page_in_rbio(rbio, stripe, pagenr, 0);
1233 pointers[stripe] = kmap(p);
1234 }
1235
1236
1237 p = rbio_pstripe_page(rbio, pagenr);
1238 SetPageUptodate(p);
1239 pointers[stripe++] = kmap(p);
1240
1241 if (q_stripe != -1) {
1242
1243
1244
1245
1246
1247 p = rbio_qstripe_page(rbio, pagenr);
1248 SetPageUptodate(p);
1249 pointers[stripe++] = kmap(p);
1250
1251 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1252 pointers);
1253 } else {
1254
1255 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1256 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1257 }
1258
1259
1260 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1261 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1262 }
1263
1264
1265
1266
1267
1268
1269 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1270 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1271 struct page *page;
1272 if (stripe < rbio->nr_data) {
1273 page = page_in_rbio(rbio, stripe, pagenr, 1);
1274 if (!page)
1275 continue;
1276 } else {
1277 page = rbio_stripe_page(rbio, stripe, pagenr);
1278 }
1279
1280 ret = rbio_add_io_page(rbio, &bio_list,
1281 page, stripe, pagenr, rbio->stripe_len);
1282 if (ret)
1283 goto cleanup;
1284 }
1285 }
1286
1287 if (likely(!bbio->num_tgtdevs))
1288 goto write_data;
1289
1290 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1291 if (!bbio->tgtdev_map[stripe])
1292 continue;
1293
1294 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1295 struct page *page;
1296 if (stripe < rbio->nr_data) {
1297 page = page_in_rbio(rbio, stripe, pagenr, 1);
1298 if (!page)
1299 continue;
1300 } else {
1301 page = rbio_stripe_page(rbio, stripe, pagenr);
1302 }
1303
1304 ret = rbio_add_io_page(rbio, &bio_list, page,
1305 rbio->bbio->tgtdev_map[stripe],
1306 pagenr, rbio->stripe_len);
1307 if (ret)
1308 goto cleanup;
1309 }
1310 }
1311
1312write_data:
1313 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1314 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1315
1316 while (1) {
1317 bio = bio_list_pop(&bio_list);
1318 if (!bio)
1319 break;
1320
1321 bio->bi_private = rbio;
1322 bio->bi_end_io = raid_write_end_io;
1323 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1324
1325 submit_bio(bio);
1326 }
1327 return;
1328
1329cleanup:
1330 rbio_orig_end_io(rbio, -EIO);
1331}
1332
1333
1334
1335
1336
1337
1338static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1339 struct bio *bio)
1340{
1341 u64 physical = bio->bi_iter.bi_sector;
1342 u64 stripe_start;
1343 int i;
1344 struct btrfs_bio_stripe *stripe;
1345
1346 physical <<= 9;
1347
1348 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1349 stripe = &rbio->bbio->stripes[i];
1350 stripe_start = stripe->physical;
1351 if (physical >= stripe_start &&
1352 physical < stripe_start + rbio->stripe_len &&
1353 bio->bi_bdev == stripe->dev->bdev) {
1354 return i;
1355 }
1356 }
1357 return -1;
1358}
1359
1360
1361
1362
1363
1364
1365static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1366 struct bio *bio)
1367{
1368 u64 logical = bio->bi_iter.bi_sector;
1369 u64 stripe_start;
1370 int i;
1371
1372 logical <<= 9;
1373
1374 for (i = 0; i < rbio->nr_data; i++) {
1375 stripe_start = rbio->bbio->raid_map[i];
1376 if (logical >= stripe_start &&
1377 logical < stripe_start + rbio->stripe_len) {
1378 return i;
1379 }
1380 }
1381 return -1;
1382}
1383
1384
1385
1386
1387static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1388{
1389 unsigned long flags;
1390 int ret = 0;
1391
1392 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1393
1394
1395 if (rbio->faila == failed || rbio->failb == failed)
1396 goto out;
1397
1398 if (rbio->faila == -1) {
1399
1400 rbio->faila = failed;
1401 atomic_inc(&rbio->error);
1402 } else if (rbio->failb == -1) {
1403
1404 rbio->failb = failed;
1405 atomic_inc(&rbio->error);
1406 } else {
1407 ret = -EIO;
1408 }
1409out:
1410 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1411
1412 return ret;
1413}
1414
1415
1416
1417
1418
1419static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1420 struct bio *bio)
1421{
1422 int failed = find_bio_stripe(rbio, bio);
1423
1424 if (failed < 0)
1425 return -EIO;
1426
1427 return fail_rbio_index(rbio, failed);
1428}
1429
1430
1431
1432
1433
1434static void set_bio_pages_uptodate(struct bio *bio)
1435{
1436 int i;
1437 struct page *p;
1438
1439 for (i = 0; i < bio->bi_vcnt; i++) {
1440 p = bio->bi_io_vec[i].bv_page;
1441 SetPageUptodate(p);
1442 }
1443}
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453static void raid_rmw_end_io(struct bio *bio)
1454{
1455 struct btrfs_raid_bio *rbio = bio->bi_private;
1456
1457 if (bio->bi_error)
1458 fail_bio_stripe(rbio, bio);
1459 else
1460 set_bio_pages_uptodate(bio);
1461
1462 bio_put(bio);
1463
1464 if (!atomic_dec_and_test(&rbio->stripes_pending))
1465 return;
1466
1467 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1468 goto cleanup;
1469
1470
1471
1472
1473
1474
1475 validate_rbio_for_rmw(rbio);
1476 return;
1477
1478cleanup:
1479
1480 rbio_orig_end_io(rbio, -EIO);
1481}
1482
1483static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1484{
1485 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1486 rmw_work, NULL, NULL);
1487
1488 btrfs_queue_work(rbio->fs_info->rmw_workers,
1489 &rbio->work);
1490}
1491
1492static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1493{
1494 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1495 read_rebuild_work, NULL, NULL);
1496
1497 btrfs_queue_work(rbio->fs_info->rmw_workers,
1498 &rbio->work);
1499}
1500
1501
1502
1503
1504
1505static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1506{
1507 int bios_to_read = 0;
1508 struct bio_list bio_list;
1509 int ret;
1510 int pagenr;
1511 int stripe;
1512 struct bio *bio;
1513
1514 bio_list_init(&bio_list);
1515
1516 ret = alloc_rbio_pages(rbio);
1517 if (ret)
1518 goto cleanup;
1519
1520 index_rbio_pages(rbio);
1521
1522 atomic_set(&rbio->error, 0);
1523
1524
1525
1526
1527 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1528 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1529 struct page *page;
1530
1531
1532
1533
1534
1535
1536 page = page_in_rbio(rbio, stripe, pagenr, 1);
1537 if (page)
1538 continue;
1539
1540 page = rbio_stripe_page(rbio, stripe, pagenr);
1541
1542
1543
1544
1545 if (PageUptodate(page))
1546 continue;
1547
1548 ret = rbio_add_io_page(rbio, &bio_list, page,
1549 stripe, pagenr, rbio->stripe_len);
1550 if (ret)
1551 goto cleanup;
1552 }
1553 }
1554
1555 bios_to_read = bio_list_size(&bio_list);
1556 if (!bios_to_read) {
1557
1558
1559
1560
1561
1562
1563 goto finish;
1564 }
1565
1566
1567
1568
1569
1570 atomic_set(&rbio->stripes_pending, bios_to_read);
1571 while (1) {
1572 bio = bio_list_pop(&bio_list);
1573 if (!bio)
1574 break;
1575
1576 bio->bi_private = rbio;
1577 bio->bi_end_io = raid_rmw_end_io;
1578 bio_set_op_attrs(bio, REQ_OP_READ, 0);
1579
1580 btrfs_bio_wq_end_io(rbio->fs_info, bio,
1581 BTRFS_WQ_ENDIO_RAID56);
1582
1583 submit_bio(bio);
1584 }
1585
1586 return 0;
1587
1588cleanup:
1589 rbio_orig_end_io(rbio, -EIO);
1590 return -EIO;
1591
1592finish:
1593 validate_rbio_for_rmw(rbio);
1594 return 0;
1595}
1596
1597
1598
1599
1600
1601static int full_stripe_write(struct btrfs_raid_bio *rbio)
1602{
1603 int ret;
1604
1605 ret = alloc_rbio_parity_pages(rbio);
1606 if (ret) {
1607 __free_raid_bio(rbio);
1608 return ret;
1609 }
1610
1611 ret = lock_stripe_add(rbio);
1612 if (ret == 0)
1613 finish_rmw(rbio);
1614 return 0;
1615}
1616
1617
1618
1619
1620
1621
1622static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1623{
1624 int ret;
1625
1626 ret = lock_stripe_add(rbio);
1627 if (ret == 0)
1628 async_rmw_stripe(rbio);
1629 return 0;
1630}
1631
1632
1633
1634
1635
1636
1637
1638static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1639{
1640
1641 if (!rbio_is_full(rbio))
1642 return partial_stripe_write(rbio);
1643 return full_stripe_write(rbio);
1644}
1645
1646
1647
1648
1649
1650
1651
1652
1653struct btrfs_plug_cb {
1654 struct blk_plug_cb cb;
1655 struct btrfs_fs_info *info;
1656 struct list_head rbio_list;
1657 struct btrfs_work work;
1658};
1659
1660
1661
1662
1663static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1664{
1665 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1666 plug_list);
1667 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1668 plug_list);
1669 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1670 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1671
1672 if (a_sector < b_sector)
1673 return -1;
1674 if (a_sector > b_sector)
1675 return 1;
1676 return 0;
1677}
1678
1679static void run_plug(struct btrfs_plug_cb *plug)
1680{
1681 struct btrfs_raid_bio *cur;
1682 struct btrfs_raid_bio *last = NULL;
1683
1684
1685
1686
1687
1688
1689 list_sort(NULL, &plug->rbio_list, plug_cmp);
1690 while (!list_empty(&plug->rbio_list)) {
1691 cur = list_entry(plug->rbio_list.next,
1692 struct btrfs_raid_bio, plug_list);
1693 list_del_init(&cur->plug_list);
1694
1695 if (rbio_is_full(cur)) {
1696
1697 full_stripe_write(cur);
1698 continue;
1699 }
1700 if (last) {
1701 if (rbio_can_merge(last, cur)) {
1702 merge_rbio(last, cur);
1703 __free_raid_bio(cur);
1704 continue;
1705
1706 }
1707 __raid56_parity_write(last);
1708 }
1709 last = cur;
1710 }
1711 if (last) {
1712 __raid56_parity_write(last);
1713 }
1714 kfree(plug);
1715}
1716
1717
1718
1719
1720
1721static void unplug_work(struct btrfs_work *work)
1722{
1723 struct btrfs_plug_cb *plug;
1724 plug = container_of(work, struct btrfs_plug_cb, work);
1725 run_plug(plug);
1726}
1727
1728static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1729{
1730 struct btrfs_plug_cb *plug;
1731 plug = container_of(cb, struct btrfs_plug_cb, cb);
1732
1733 if (from_schedule) {
1734 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1735 unplug_work, NULL, NULL);
1736 btrfs_queue_work(plug->info->rmw_workers,
1737 &plug->work);
1738 return;
1739 }
1740 run_plug(plug);
1741}
1742
1743
1744
1745
1746int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1747 struct btrfs_bio *bbio, u64 stripe_len)
1748{
1749 struct btrfs_raid_bio *rbio;
1750 struct btrfs_plug_cb *plug = NULL;
1751 struct blk_plug_cb *cb;
1752 int ret;
1753
1754 rbio = alloc_rbio(root, bbio, stripe_len);
1755 if (IS_ERR(rbio)) {
1756 btrfs_put_bbio(bbio);
1757 return PTR_ERR(rbio);
1758 }
1759 bio_list_add(&rbio->bio_list, bio);
1760 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1761 rbio->operation = BTRFS_RBIO_WRITE;
1762
1763 btrfs_bio_counter_inc_noblocked(root->fs_info);
1764 rbio->generic_bio_cnt = 1;
1765
1766
1767
1768
1769
1770 if (rbio_is_full(rbio)) {
1771 ret = full_stripe_write(rbio);
1772 if (ret)
1773 btrfs_bio_counter_dec(root->fs_info);
1774 return ret;
1775 }
1776
1777 cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
1778 sizeof(*plug));
1779 if (cb) {
1780 plug = container_of(cb, struct btrfs_plug_cb, cb);
1781 if (!plug->info) {
1782 plug->info = root->fs_info;
1783 INIT_LIST_HEAD(&plug->rbio_list);
1784 }
1785 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1786 ret = 0;
1787 } else {
1788 ret = __raid56_parity_write(rbio);
1789 if (ret)
1790 btrfs_bio_counter_dec(root->fs_info);
1791 }
1792 return ret;
1793}
1794
1795
1796
1797
1798
1799
1800static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1801{
1802 int pagenr, stripe;
1803 void **pointers;
1804 int faila = -1, failb = -1;
1805 struct page *page;
1806 int err;
1807 int i;
1808
1809 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1810 if (!pointers) {
1811 err = -ENOMEM;
1812 goto cleanup_io;
1813 }
1814
1815 faila = rbio->faila;
1816 failb = rbio->failb;
1817
1818 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1819 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1820 spin_lock_irq(&rbio->bio_list_lock);
1821 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1822 spin_unlock_irq(&rbio->bio_list_lock);
1823 }
1824
1825 index_rbio_pages(rbio);
1826
1827 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1828
1829
1830
1831
1832 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1833 !test_bit(pagenr, rbio->dbitmap))
1834 continue;
1835
1836
1837
1838
1839 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1840
1841
1842
1843
1844 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1845 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1846 (stripe == faila || stripe == failb)) {
1847 page = page_in_rbio(rbio, stripe, pagenr, 0);
1848 } else {
1849 page = rbio_stripe_page(rbio, stripe, pagenr);
1850 }
1851 pointers[stripe] = kmap(page);
1852 }
1853
1854
1855 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1856
1857
1858
1859
1860 if (failb < 0) {
1861 if (faila == rbio->nr_data) {
1862
1863
1864
1865
1866
1867 err = -EIO;
1868 goto cleanup;
1869 }
1870
1871
1872
1873
1874 goto pstripe;
1875 }
1876
1877
1878 if (faila > failb) {
1879 int tmp = failb;
1880 failb = faila;
1881 faila = tmp;
1882 }
1883
1884
1885
1886
1887
1888
1889
1890 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1891 if (rbio->bbio->raid_map[faila] ==
1892 RAID5_P_STRIPE) {
1893 err = -EIO;
1894 goto cleanup;
1895 }
1896
1897
1898
1899
1900 goto pstripe;
1901 }
1902
1903 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1904 raid6_datap_recov(rbio->real_stripes,
1905 PAGE_SIZE, faila, pointers);
1906 } else {
1907 raid6_2data_recov(rbio->real_stripes,
1908 PAGE_SIZE, faila, failb,
1909 pointers);
1910 }
1911 } else {
1912 void *p;
1913
1914
1915 BUG_ON(failb != -1);
1916pstripe:
1917
1918 memcpy(pointers[faila],
1919 pointers[rbio->nr_data],
1920 PAGE_SIZE);
1921
1922
1923 p = pointers[faila];
1924 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1925 pointers[stripe] = pointers[stripe + 1];
1926 pointers[rbio->nr_data - 1] = p;
1927
1928
1929 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1930 }
1931
1932
1933
1934
1935
1936
1937 if (rbio->operation == BTRFS_RBIO_WRITE) {
1938 for (i = 0; i < rbio->stripe_npages; i++) {
1939 if (faila != -1) {
1940 page = rbio_stripe_page(rbio, faila, i);
1941 SetPageUptodate(page);
1942 }
1943 if (failb != -1) {
1944 page = rbio_stripe_page(rbio, failb, i);
1945 SetPageUptodate(page);
1946 }
1947 }
1948 }
1949 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1950
1951
1952
1953
1954 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1955 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1956 (stripe == faila || stripe == failb)) {
1957 page = page_in_rbio(rbio, stripe, pagenr, 0);
1958 } else {
1959 page = rbio_stripe_page(rbio, stripe, pagenr);
1960 }
1961 kunmap(page);
1962 }
1963 }
1964
1965 err = 0;
1966cleanup:
1967 kfree(pointers);
1968
1969cleanup_io:
1970 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1971 if (err == 0)
1972 cache_rbio_pages(rbio);
1973 else
1974 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1975
1976 rbio_orig_end_io(rbio, err);
1977 } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1978 rbio_orig_end_io(rbio, err);
1979 } else if (err == 0) {
1980 rbio->faila = -1;
1981 rbio->failb = -1;
1982
1983 if (rbio->operation == BTRFS_RBIO_WRITE)
1984 finish_rmw(rbio);
1985 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1986 finish_parity_scrub(rbio, 0);
1987 else
1988 BUG();
1989 } else {
1990 rbio_orig_end_io(rbio, err);
1991 }
1992}
1993
1994
1995
1996
1997
1998static void raid_recover_end_io(struct bio *bio)
1999{
2000 struct btrfs_raid_bio *rbio = bio->bi_private;
2001
2002
2003
2004
2005
2006 if (bio->bi_error)
2007 fail_bio_stripe(rbio, bio);
2008 else
2009 set_bio_pages_uptodate(bio);
2010 bio_put(bio);
2011
2012 if (!atomic_dec_and_test(&rbio->stripes_pending))
2013 return;
2014
2015 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2016 rbio_orig_end_io(rbio, -EIO);
2017 else
2018 __raid_recover_end_io(rbio);
2019}
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2030{
2031 int bios_to_read = 0;
2032 struct bio_list bio_list;
2033 int ret;
2034 int pagenr;
2035 int stripe;
2036 struct bio *bio;
2037
2038 bio_list_init(&bio_list);
2039
2040 ret = alloc_rbio_pages(rbio);
2041 if (ret)
2042 goto cleanup;
2043
2044 atomic_set(&rbio->error, 0);
2045
2046
2047
2048
2049
2050
2051 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2052 if (rbio->faila == stripe || rbio->failb == stripe) {
2053 atomic_inc(&rbio->error);
2054 continue;
2055 }
2056
2057 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2058 struct page *p;
2059
2060
2061
2062
2063
2064 p = rbio_stripe_page(rbio, stripe, pagenr);
2065 if (PageUptodate(p))
2066 continue;
2067
2068 ret = rbio_add_io_page(rbio, &bio_list,
2069 rbio_stripe_page(rbio, stripe, pagenr),
2070 stripe, pagenr, rbio->stripe_len);
2071 if (ret < 0)
2072 goto cleanup;
2073 }
2074 }
2075
2076 bios_to_read = bio_list_size(&bio_list);
2077 if (!bios_to_read) {
2078
2079
2080
2081
2082
2083 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2084 __raid_recover_end_io(rbio);
2085 goto out;
2086 } else {
2087 goto cleanup;
2088 }
2089 }
2090
2091
2092
2093
2094
2095 atomic_set(&rbio->stripes_pending, bios_to_read);
2096 while (1) {
2097 bio = bio_list_pop(&bio_list);
2098 if (!bio)
2099 break;
2100
2101 bio->bi_private = rbio;
2102 bio->bi_end_io = raid_recover_end_io;
2103 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2104
2105 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2106 BTRFS_WQ_ENDIO_RAID56);
2107
2108 submit_bio(bio);
2109 }
2110out:
2111 return 0;
2112
2113cleanup:
2114 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2115 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2116 rbio_orig_end_io(rbio, -EIO);
2117 return -EIO;
2118}
2119
2120
2121
2122
2123
2124
2125
2126int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2127 struct btrfs_bio *bbio, u64 stripe_len,
2128 int mirror_num, int generic_io)
2129{
2130 struct btrfs_raid_bio *rbio;
2131 int ret;
2132
2133 rbio = alloc_rbio(root, bbio, stripe_len);
2134 if (IS_ERR(rbio)) {
2135 if (generic_io)
2136 btrfs_put_bbio(bbio);
2137 return PTR_ERR(rbio);
2138 }
2139
2140 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2141 bio_list_add(&rbio->bio_list, bio);
2142 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2143
2144 rbio->faila = find_logical_bio_stripe(rbio, bio);
2145 if (rbio->faila == -1) {
2146 btrfs_warn(root->fs_info,
2147 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2148 __func__, (u64)bio->bi_iter.bi_sector << 9,
2149 (u64)bio->bi_iter.bi_size, bbio->map_type);
2150 if (generic_io)
2151 btrfs_put_bbio(bbio);
2152 kfree(rbio);
2153 return -EIO;
2154 }
2155
2156 if (generic_io) {
2157 btrfs_bio_counter_inc_noblocked(root->fs_info);
2158 rbio->generic_bio_cnt = 1;
2159 } else {
2160 btrfs_get_bbio(bbio);
2161 }
2162
2163
2164
2165
2166
2167 if (mirror_num == 3)
2168 rbio->failb = rbio->real_stripes - 2;
2169
2170 ret = lock_stripe_add(rbio);
2171
2172
2173
2174
2175
2176
2177
2178
2179 if (ret == 0)
2180 __raid56_parity_recover(rbio);
2181
2182
2183
2184
2185
2186 return 0;
2187
2188}
2189
2190static void rmw_work(struct btrfs_work *work)
2191{
2192 struct btrfs_raid_bio *rbio;
2193
2194 rbio = container_of(work, struct btrfs_raid_bio, work);
2195 raid56_rmw_stripe(rbio);
2196}
2197
2198static void read_rebuild_work(struct btrfs_work *work)
2199{
2200 struct btrfs_raid_bio *rbio;
2201
2202 rbio = container_of(work, struct btrfs_raid_bio, work);
2203 __raid56_parity_recover(rbio);
2204}
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214struct btrfs_raid_bio *
2215raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
2216 struct btrfs_bio *bbio, u64 stripe_len,
2217 struct btrfs_device *scrub_dev,
2218 unsigned long *dbitmap, int stripe_nsectors)
2219{
2220 struct btrfs_raid_bio *rbio;
2221 int i;
2222
2223 rbio = alloc_rbio(root, bbio, stripe_len);
2224 if (IS_ERR(rbio))
2225 return NULL;
2226 bio_list_add(&rbio->bio_list, bio);
2227
2228
2229
2230
2231 ASSERT(!bio->bi_iter.bi_size);
2232 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2233
2234 for (i = 0; i < rbio->real_stripes; i++) {
2235 if (bbio->stripes[i].dev == scrub_dev) {
2236 rbio->scrubp = i;
2237 break;
2238 }
2239 }
2240
2241
2242 ASSERT(root->sectorsize == PAGE_SIZE);
2243 ASSERT(rbio->stripe_npages == stripe_nsectors);
2244 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2245
2246 return rbio;
2247}
2248
2249
2250void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2251 u64 logical)
2252{
2253 int stripe_offset;
2254 int index;
2255
2256 ASSERT(logical >= rbio->bbio->raid_map[0]);
2257 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2258 rbio->stripe_len * rbio->nr_data);
2259 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2260 index = stripe_offset >> PAGE_SHIFT;
2261 rbio->bio_pages[index] = page;
2262}
2263
2264
2265
2266
2267
2268static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2269{
2270 int i;
2271 int bit;
2272 int index;
2273 struct page *page;
2274
2275 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2276 for (i = 0; i < rbio->real_stripes; i++) {
2277 index = i * rbio->stripe_npages + bit;
2278 if (rbio->stripe_pages[index])
2279 continue;
2280
2281 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2282 if (!page)
2283 return -ENOMEM;
2284 rbio->stripe_pages[index] = page;
2285 }
2286 }
2287 return 0;
2288}
2289
2290static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2291 int need_check)
2292{
2293 struct btrfs_bio *bbio = rbio->bbio;
2294 void *pointers[rbio->real_stripes];
2295 DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2296 int nr_data = rbio->nr_data;
2297 int stripe;
2298 int pagenr;
2299 int p_stripe = -1;
2300 int q_stripe = -1;
2301 struct page *p_page = NULL;
2302 struct page *q_page = NULL;
2303 struct bio_list bio_list;
2304 struct bio *bio;
2305 int is_replace = 0;
2306 int ret;
2307
2308 bio_list_init(&bio_list);
2309
2310 if (rbio->real_stripes - rbio->nr_data == 1) {
2311 p_stripe = rbio->real_stripes - 1;
2312 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2313 p_stripe = rbio->real_stripes - 2;
2314 q_stripe = rbio->real_stripes - 1;
2315 } else {
2316 BUG();
2317 }
2318
2319 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2320 is_replace = 1;
2321 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2322 }
2323
2324
2325
2326
2327
2328
2329 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2330
2331 if (!need_check)
2332 goto writeback;
2333
2334 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2335 if (!p_page)
2336 goto cleanup;
2337 SetPageUptodate(p_page);
2338
2339 if (q_stripe != -1) {
2340 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2341 if (!q_page) {
2342 __free_page(p_page);
2343 goto cleanup;
2344 }
2345 SetPageUptodate(q_page);
2346 }
2347
2348 atomic_set(&rbio->error, 0);
2349
2350 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2351 struct page *p;
2352 void *parity;
2353
2354 for (stripe = 0; stripe < nr_data; stripe++) {
2355 p = page_in_rbio(rbio, stripe, pagenr, 0);
2356 pointers[stripe] = kmap(p);
2357 }
2358
2359
2360 pointers[stripe++] = kmap(p_page);
2361
2362 if (q_stripe != -1) {
2363
2364
2365
2366
2367
2368 pointers[stripe++] = kmap(q_page);
2369
2370 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2371 pointers);
2372 } else {
2373
2374 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2375 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2376 }
2377
2378
2379 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2380 parity = kmap(p);
2381 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2382 memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2383 else
2384
2385 bitmap_clear(rbio->dbitmap, pagenr, 1);
2386 kunmap(p);
2387
2388 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2389 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2390 }
2391
2392 __free_page(p_page);
2393 if (q_page)
2394 __free_page(q_page);
2395
2396writeback:
2397
2398
2399
2400
2401
2402 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2403 struct page *page;
2404
2405 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2406 ret = rbio_add_io_page(rbio, &bio_list,
2407 page, rbio->scrubp, pagenr, rbio->stripe_len);
2408 if (ret)
2409 goto cleanup;
2410 }
2411
2412 if (!is_replace)
2413 goto submit_write;
2414
2415 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2416 struct page *page;
2417
2418 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2419 ret = rbio_add_io_page(rbio, &bio_list, page,
2420 bbio->tgtdev_map[rbio->scrubp],
2421 pagenr, rbio->stripe_len);
2422 if (ret)
2423 goto cleanup;
2424 }
2425
2426submit_write:
2427 nr_data = bio_list_size(&bio_list);
2428 if (!nr_data) {
2429
2430 rbio_orig_end_io(rbio, 0);
2431 return;
2432 }
2433
2434 atomic_set(&rbio->stripes_pending, nr_data);
2435
2436 while (1) {
2437 bio = bio_list_pop(&bio_list);
2438 if (!bio)
2439 break;
2440
2441 bio->bi_private = rbio;
2442 bio->bi_end_io = raid_write_end_io;
2443 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2444
2445 submit_bio(bio);
2446 }
2447 return;
2448
2449cleanup:
2450 rbio_orig_end_io(rbio, -EIO);
2451}
2452
2453static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2454{
2455 if (stripe >= 0 && stripe < rbio->nr_data)
2456 return 1;
2457 return 0;
2458}
2459
2460
2461
2462
2463
2464
2465
2466
2467static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2468{
2469 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2470 goto cleanup;
2471
2472 if (rbio->faila >= 0 || rbio->failb >= 0) {
2473 int dfail = 0, failp = -1;
2474
2475 if (is_data_stripe(rbio, rbio->faila))
2476 dfail++;
2477 else if (is_parity_stripe(rbio->faila))
2478 failp = rbio->faila;
2479
2480 if (is_data_stripe(rbio, rbio->failb))
2481 dfail++;
2482 else if (is_parity_stripe(rbio->failb))
2483 failp = rbio->failb;
2484
2485
2486
2487
2488
2489
2490 if (dfail > rbio->bbio->max_errors - 1)
2491 goto cleanup;
2492
2493
2494
2495
2496
2497 if (dfail == 0) {
2498 finish_parity_scrub(rbio, 0);
2499 return;
2500 }
2501
2502
2503
2504
2505
2506
2507
2508 if (failp != rbio->scrubp)
2509 goto cleanup;
2510
2511 __raid_recover_end_io(rbio);
2512 } else {
2513 finish_parity_scrub(rbio, 1);
2514 }
2515 return;
2516
2517cleanup:
2518 rbio_orig_end_io(rbio, -EIO);
2519}
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529static void raid56_parity_scrub_end_io(struct bio *bio)
2530{
2531 struct btrfs_raid_bio *rbio = bio->bi_private;
2532
2533 if (bio->bi_error)
2534 fail_bio_stripe(rbio, bio);
2535 else
2536 set_bio_pages_uptodate(bio);
2537
2538 bio_put(bio);
2539
2540 if (!atomic_dec_and_test(&rbio->stripes_pending))
2541 return;
2542
2543
2544
2545
2546
2547
2548 validate_rbio_for_parity_scrub(rbio);
2549}
2550
2551static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2552{
2553 int bios_to_read = 0;
2554 struct bio_list bio_list;
2555 int ret;
2556 int pagenr;
2557 int stripe;
2558 struct bio *bio;
2559
2560 ret = alloc_rbio_essential_pages(rbio);
2561 if (ret)
2562 goto cleanup;
2563
2564 bio_list_init(&bio_list);
2565
2566 atomic_set(&rbio->error, 0);
2567
2568
2569
2570
2571 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2572 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2573 struct page *page;
2574
2575
2576
2577
2578
2579
2580 page = page_in_rbio(rbio, stripe, pagenr, 1);
2581 if (page)
2582 continue;
2583
2584 page = rbio_stripe_page(rbio, stripe, pagenr);
2585
2586
2587
2588
2589 if (PageUptodate(page))
2590 continue;
2591
2592 ret = rbio_add_io_page(rbio, &bio_list, page,
2593 stripe, pagenr, rbio->stripe_len);
2594 if (ret)
2595 goto cleanup;
2596 }
2597 }
2598
2599 bios_to_read = bio_list_size(&bio_list);
2600 if (!bios_to_read) {
2601
2602
2603
2604
2605
2606
2607 goto finish;
2608 }
2609
2610
2611
2612
2613
2614 atomic_set(&rbio->stripes_pending, bios_to_read);
2615 while (1) {
2616 bio = bio_list_pop(&bio_list);
2617 if (!bio)
2618 break;
2619
2620 bio->bi_private = rbio;
2621 bio->bi_end_io = raid56_parity_scrub_end_io;
2622 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2623
2624 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2625 BTRFS_WQ_ENDIO_RAID56);
2626
2627 submit_bio(bio);
2628 }
2629
2630 return;
2631
2632cleanup:
2633 rbio_orig_end_io(rbio, -EIO);
2634 return;
2635
2636finish:
2637 validate_rbio_for_parity_scrub(rbio);
2638}
2639
2640static void scrub_parity_work(struct btrfs_work *work)
2641{
2642 struct btrfs_raid_bio *rbio;
2643
2644 rbio = container_of(work, struct btrfs_raid_bio, work);
2645 raid56_parity_scrub_stripe(rbio);
2646}
2647
2648static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2649{
2650 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2651 scrub_parity_work, NULL, NULL);
2652
2653 btrfs_queue_work(rbio->fs_info->rmw_workers,
2654 &rbio->work);
2655}
2656
2657void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2658{
2659 if (!lock_stripe_add(rbio))
2660 async_scrub_parity(rbio);
2661}
2662
2663
2664
2665struct btrfs_raid_bio *
2666raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
2667 struct btrfs_bio *bbio, u64 length)
2668{
2669 struct btrfs_raid_bio *rbio;
2670
2671 rbio = alloc_rbio(root, bbio, length);
2672 if (IS_ERR(rbio))
2673 return NULL;
2674
2675 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2676 bio_list_add(&rbio->bio_list, bio);
2677
2678
2679
2680
2681 ASSERT(!bio->bi_iter.bi_size);
2682
2683 rbio->faila = find_logical_bio_stripe(rbio, bio);
2684 if (rbio->faila == -1) {
2685 BUG();
2686 kfree(rbio);
2687 return NULL;
2688 }
2689
2690 return rbio;
2691}
2692
2693static void missing_raid56_work(struct btrfs_work *work)
2694{
2695 struct btrfs_raid_bio *rbio;
2696
2697 rbio = container_of(work, struct btrfs_raid_bio, work);
2698 __raid56_parity_recover(rbio);
2699}
2700
2701static void async_missing_raid56(struct btrfs_raid_bio *rbio)
2702{
2703 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2704 missing_raid56_work, NULL, NULL);
2705
2706 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2707}
2708
2709void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2710{
2711 if (!lock_stripe_add(rbio))
2712 async_missing_raid56(rbio);
2713}
2714