1
2
3
4
5
6
7#include <linux/sched.h>
8#include <linux/bio.h>
9#include <linux/slab.h>
10#include <linux/blkdev.h>
11#include <linux/raid/pq.h>
12#include <linux/hash.h>
13#include <linux/list_sort.h>
14#include <linux/raid/xor.h>
15#include <linux/mm.h>
16#include "ctree.h"
17#include "disk-io.h"
18#include "volumes.h"
19#include "raid56.h"
20#include "async-thread.h"
21
22
23#define RBIO_RMW_LOCKED_BIT 1
24
25
26
27
28
29#define RBIO_CACHE_BIT 2
30
31
32
33
34#define RBIO_CACHE_READY_BIT 3
35
36#define RBIO_CACHE_SIZE 1024
37
38#define BTRFS_STRIPE_HASH_TABLE_BITS 11
39
40
41struct btrfs_stripe_hash {
42 struct list_head hash_list;
43 spinlock_t lock;
44};
45
46
47struct btrfs_stripe_hash_table {
48 struct list_head stripe_cache;
49 spinlock_t cache_lock;
50 int cache_size;
51 struct btrfs_stripe_hash table[];
52};
53
54enum btrfs_rbio_ops {
55 BTRFS_RBIO_WRITE,
56 BTRFS_RBIO_READ_REBUILD,
57 BTRFS_RBIO_PARITY_SCRUB,
58 BTRFS_RBIO_REBUILD_MISSING,
59};
60
61struct btrfs_raid_bio {
62 struct btrfs_fs_info *fs_info;
63 struct btrfs_bio *bbio;
64
65
66
67
68
69
70 struct list_head hash_list;
71
72
73
74
75 struct list_head stripe_cache;
76
77
78
79
80 struct btrfs_work work;
81
82
83
84
85
86
87 struct bio_list bio_list;
88 spinlock_t bio_list_lock;
89
90
91
92
93
94
95
96 struct list_head plug_list;
97
98
99
100
101
102 unsigned long flags;
103
104
105 int stripe_len;
106
107
108 int nr_data;
109
110 int real_stripes;
111
112 int stripe_npages;
113
114
115
116
117
118
119 enum btrfs_rbio_ops operation;
120
121
122 int faila;
123
124
125 int failb;
126
127 int scrubp;
128
129
130
131
132 int nr_pages;
133
134
135
136
137
138
139 int bio_list_bytes;
140
141 int generic_bio_cnt;
142
143 refcount_t refs;
144
145 atomic_t stripes_pending;
146
147 atomic_t error;
148
149
150
151
152
153
154
155
156
157 struct page **stripe_pages;
158
159
160
161
162
163 struct page **bio_pages;
164
165
166
167
168 unsigned long *dbitmap;
169
170
171 void **finish_pointers;
172
173
174 unsigned long *finish_pbitmap;
175};
176
177static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
178static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
179static void rmw_work(struct btrfs_work *work);
180static void read_rebuild_work(struct btrfs_work *work);
181static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
182static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
183static void __free_raid_bio(struct btrfs_raid_bio *rbio);
184static void index_rbio_pages(struct btrfs_raid_bio *rbio);
185static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
186
187static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
188 int need_check);
189static void scrub_parity_work(struct btrfs_work *work);
190
191static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
192{
193 btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL);
194 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
195}
196
197
198
199
200
201int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
202{
203 struct btrfs_stripe_hash_table *table;
204 struct btrfs_stripe_hash_table *x;
205 struct btrfs_stripe_hash *cur;
206 struct btrfs_stripe_hash *h;
207 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
208 int i;
209 int table_size;
210
211 if (info->stripe_hash_table)
212 return 0;
213
214
215
216
217
218
219
220
221 table_size = sizeof(*table) + sizeof(*h) * num_entries;
222 table = kvzalloc(table_size, GFP_KERNEL);
223 if (!table)
224 return -ENOMEM;
225
226 spin_lock_init(&table->cache_lock);
227 INIT_LIST_HEAD(&table->stripe_cache);
228
229 h = table->table;
230
231 for (i = 0; i < num_entries; i++) {
232 cur = h + i;
233 INIT_LIST_HEAD(&cur->hash_list);
234 spin_lock_init(&cur->lock);
235 }
236
237 x = cmpxchg(&info->stripe_hash_table, NULL, table);
238 if (x)
239 kvfree(x);
240 return 0;
241}
242
243
244
245
246
247
248
249
250
251
252static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
253{
254 int i;
255 char *s;
256 char *d;
257 int ret;
258
259 ret = alloc_rbio_pages(rbio);
260 if (ret)
261 return;
262
263 for (i = 0; i < rbio->nr_pages; i++) {
264 if (!rbio->bio_pages[i])
265 continue;
266
267 s = kmap(rbio->bio_pages[i]);
268 d = kmap(rbio->stripe_pages[i]);
269
270 copy_page(d, s);
271
272 kunmap(rbio->bio_pages[i]);
273 kunmap(rbio->stripe_pages[i]);
274 SetPageUptodate(rbio->stripe_pages[i]);
275 }
276 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
277}
278
279
280
281
282static int rbio_bucket(struct btrfs_raid_bio *rbio)
283{
284 u64 num = rbio->bbio->raid_map[0];
285
286
287
288
289
290
291
292
293
294 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
295}
296
297
298
299
300
301static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
302{
303 int i;
304 struct page *s;
305 struct page *d;
306
307 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
308 return;
309
310 for (i = 0; i < dest->nr_pages; i++) {
311 s = src->stripe_pages[i];
312 if (!s || !PageUptodate(s)) {
313 continue;
314 }
315
316 d = dest->stripe_pages[i];
317 if (d)
318 __free_page(d);
319
320 dest->stripe_pages[i] = s;
321 src->stripe_pages[i] = NULL;
322 }
323}
324
325
326
327
328
329
330
331
332static void merge_rbio(struct btrfs_raid_bio *dest,
333 struct btrfs_raid_bio *victim)
334{
335 bio_list_merge(&dest->bio_list, &victim->bio_list);
336 dest->bio_list_bytes += victim->bio_list_bytes;
337 dest->generic_bio_cnt += victim->generic_bio_cnt;
338 bio_list_init(&victim->bio_list);
339}
340
341
342
343
344
345static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
346{
347 int bucket = rbio_bucket(rbio);
348 struct btrfs_stripe_hash_table *table;
349 struct btrfs_stripe_hash *h;
350 int freeit = 0;
351
352
353
354
355 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
356 return;
357
358 table = rbio->fs_info->stripe_hash_table;
359 h = table->table + bucket;
360
361
362
363
364 spin_lock(&h->lock);
365
366
367
368
369
370 spin_lock(&rbio->bio_list_lock);
371
372 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
373 list_del_init(&rbio->stripe_cache);
374 table->cache_size -= 1;
375 freeit = 1;
376
377
378
379
380
381
382
383
384
385
386 if (bio_list_empty(&rbio->bio_list)) {
387 if (!list_empty(&rbio->hash_list)) {
388 list_del_init(&rbio->hash_list);
389 refcount_dec(&rbio->refs);
390 BUG_ON(!list_empty(&rbio->plug_list));
391 }
392 }
393 }
394
395 spin_unlock(&rbio->bio_list_lock);
396 spin_unlock(&h->lock);
397
398 if (freeit)
399 __free_raid_bio(rbio);
400}
401
402
403
404
405static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
406{
407 struct btrfs_stripe_hash_table *table;
408 unsigned long flags;
409
410 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
411 return;
412
413 table = rbio->fs_info->stripe_hash_table;
414
415 spin_lock_irqsave(&table->cache_lock, flags);
416 __remove_rbio_from_cache(rbio);
417 spin_unlock_irqrestore(&table->cache_lock, flags);
418}
419
420
421
422
423static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
424{
425 struct btrfs_stripe_hash_table *table;
426 unsigned long flags;
427 struct btrfs_raid_bio *rbio;
428
429 table = info->stripe_hash_table;
430
431 spin_lock_irqsave(&table->cache_lock, flags);
432 while (!list_empty(&table->stripe_cache)) {
433 rbio = list_entry(table->stripe_cache.next,
434 struct btrfs_raid_bio,
435 stripe_cache);
436 __remove_rbio_from_cache(rbio);
437 }
438 spin_unlock_irqrestore(&table->cache_lock, flags);
439}
440
441
442
443
444
445void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
446{
447 if (!info->stripe_hash_table)
448 return;
449 btrfs_clear_rbio_cache(info);
450 kvfree(info->stripe_hash_table);
451 info->stripe_hash_table = NULL;
452}
453
454
455
456
457
458
459
460
461
462
463
464
465static void cache_rbio(struct btrfs_raid_bio *rbio)
466{
467 struct btrfs_stripe_hash_table *table;
468 unsigned long flags;
469
470 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
471 return;
472
473 table = rbio->fs_info->stripe_hash_table;
474
475 spin_lock_irqsave(&table->cache_lock, flags);
476 spin_lock(&rbio->bio_list_lock);
477
478
479 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
480 refcount_inc(&rbio->refs);
481
482 if (!list_empty(&rbio->stripe_cache)){
483 list_move(&rbio->stripe_cache, &table->stripe_cache);
484 } else {
485 list_add(&rbio->stripe_cache, &table->stripe_cache);
486 table->cache_size += 1;
487 }
488
489 spin_unlock(&rbio->bio_list_lock);
490
491 if (table->cache_size > RBIO_CACHE_SIZE) {
492 struct btrfs_raid_bio *found;
493
494 found = list_entry(table->stripe_cache.prev,
495 struct btrfs_raid_bio,
496 stripe_cache);
497
498 if (found != rbio)
499 __remove_rbio_from_cache(found);
500 }
501
502 spin_unlock_irqrestore(&table->cache_lock, flags);
503}
504
505
506
507
508
509
510static void run_xor(void **pages, int src_cnt, ssize_t len)
511{
512 int src_off = 0;
513 int xor_src_cnt = 0;
514 void *dest = pages[src_cnt];
515
516 while(src_cnt > 0) {
517 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
518 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
519
520 src_cnt -= xor_src_cnt;
521 src_off += xor_src_cnt;
522 }
523}
524
525
526
527
528
529static int rbio_is_full(struct btrfs_raid_bio *rbio)
530{
531 unsigned long flags;
532 unsigned long size = rbio->bio_list_bytes;
533 int ret = 1;
534
535 spin_lock_irqsave(&rbio->bio_list_lock, flags);
536 if (size != rbio->nr_data * rbio->stripe_len)
537 ret = 0;
538 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
539 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
540
541 return ret;
542}
543
544
545
546
547
548
549
550
551
552
553
554static int rbio_can_merge(struct btrfs_raid_bio *last,
555 struct btrfs_raid_bio *cur)
556{
557 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
558 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
559 return 0;
560
561
562
563
564
565
566
567
568 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
569 test_bit(RBIO_CACHE_BIT, &cur->flags))
570 return 0;
571
572 if (last->bbio->raid_map[0] !=
573 cur->bbio->raid_map[0])
574 return 0;
575
576
577 if (last->operation != cur->operation)
578 return 0;
579
580
581
582
583
584
585
586
587 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
588 return 0;
589
590 if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
591 return 0;
592
593 if (last->operation == BTRFS_RBIO_READ_REBUILD) {
594 int fa = last->faila;
595 int fb = last->failb;
596 int cur_fa = cur->faila;
597 int cur_fb = cur->failb;
598
599 if (last->faila >= last->failb) {
600 fa = last->failb;
601 fb = last->faila;
602 }
603
604 if (cur->faila >= cur->failb) {
605 cur_fa = cur->failb;
606 cur_fb = cur->faila;
607 }
608
609 if (fa != cur_fa || fb != cur_fb)
610 return 0;
611 }
612 return 1;
613}
614
615static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
616 int index)
617{
618 return stripe * rbio->stripe_npages + index;
619}
620
621
622
623
624
625static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
626 int index)
627{
628 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
629}
630
631
632
633
634static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
635{
636 return rbio_stripe_page(rbio, rbio->nr_data, index);
637}
638
639
640
641
642
643static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
644{
645 if (rbio->nr_data + 1 == rbio->real_stripes)
646 return NULL;
647 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
648}
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
673{
674 int bucket = rbio_bucket(rbio);
675 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
676 struct btrfs_raid_bio *cur;
677 struct btrfs_raid_bio *pending;
678 unsigned long flags;
679 struct btrfs_raid_bio *freeit = NULL;
680 struct btrfs_raid_bio *cache_drop = NULL;
681 int ret = 0;
682
683 spin_lock_irqsave(&h->lock, flags);
684 list_for_each_entry(cur, &h->hash_list, hash_list) {
685 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
686 spin_lock(&cur->bio_list_lock);
687
688
689 if (bio_list_empty(&cur->bio_list) &&
690 list_empty(&cur->plug_list) &&
691 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
692 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
693 list_del_init(&cur->hash_list);
694 refcount_dec(&cur->refs);
695
696 steal_rbio(cur, rbio);
697 cache_drop = cur;
698 spin_unlock(&cur->bio_list_lock);
699
700 goto lockit;
701 }
702
703
704 if (rbio_can_merge(cur, rbio)) {
705 merge_rbio(cur, rbio);
706 spin_unlock(&cur->bio_list_lock);
707 freeit = rbio;
708 ret = 1;
709 goto out;
710 }
711
712
713
714
715
716
717
718
719
720
721 list_for_each_entry(pending, &cur->plug_list,
722 plug_list) {
723 if (rbio_can_merge(pending, rbio)) {
724 merge_rbio(pending, rbio);
725 spin_unlock(&cur->bio_list_lock);
726 freeit = rbio;
727 ret = 1;
728 goto out;
729 }
730 }
731
732
733
734
735
736 list_add_tail(&rbio->plug_list, &cur->plug_list);
737 spin_unlock(&cur->bio_list_lock);
738 ret = 1;
739 goto out;
740 }
741 }
742lockit:
743 refcount_inc(&rbio->refs);
744 list_add(&rbio->hash_list, &h->hash_list);
745out:
746 spin_unlock_irqrestore(&h->lock, flags);
747 if (cache_drop)
748 remove_rbio_from_cache(cache_drop);
749 if (freeit)
750 __free_raid_bio(freeit);
751 return ret;
752}
753
754
755
756
757
758static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
759{
760 int bucket;
761 struct btrfs_stripe_hash *h;
762 unsigned long flags;
763 int keep_cache = 0;
764
765 bucket = rbio_bucket(rbio);
766 h = rbio->fs_info->stripe_hash_table->table + bucket;
767
768 if (list_empty(&rbio->plug_list))
769 cache_rbio(rbio);
770
771 spin_lock_irqsave(&h->lock, flags);
772 spin_lock(&rbio->bio_list_lock);
773
774 if (!list_empty(&rbio->hash_list)) {
775
776
777
778
779
780 if (list_empty(&rbio->plug_list) &&
781 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
782 keep_cache = 1;
783 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
784 BUG_ON(!bio_list_empty(&rbio->bio_list));
785 goto done;
786 }
787
788 list_del_init(&rbio->hash_list);
789 refcount_dec(&rbio->refs);
790
791
792
793
794
795
796 if (!list_empty(&rbio->plug_list)) {
797 struct btrfs_raid_bio *next;
798 struct list_head *head = rbio->plug_list.next;
799
800 next = list_entry(head, struct btrfs_raid_bio,
801 plug_list);
802
803 list_del_init(&rbio->plug_list);
804
805 list_add(&next->hash_list, &h->hash_list);
806 refcount_inc(&next->refs);
807 spin_unlock(&rbio->bio_list_lock);
808 spin_unlock_irqrestore(&h->lock, flags);
809
810 if (next->operation == BTRFS_RBIO_READ_REBUILD)
811 start_async_work(next, read_rebuild_work);
812 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
813 steal_rbio(rbio, next);
814 start_async_work(next, read_rebuild_work);
815 } else if (next->operation == BTRFS_RBIO_WRITE) {
816 steal_rbio(rbio, next);
817 start_async_work(next, rmw_work);
818 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
819 steal_rbio(rbio, next);
820 start_async_work(next, scrub_parity_work);
821 }
822
823 goto done_nolock;
824 }
825 }
826done:
827 spin_unlock(&rbio->bio_list_lock);
828 spin_unlock_irqrestore(&h->lock, flags);
829
830done_nolock:
831 if (!keep_cache)
832 remove_rbio_from_cache(rbio);
833}
834
835static void __free_raid_bio(struct btrfs_raid_bio *rbio)
836{
837 int i;
838
839 if (!refcount_dec_and_test(&rbio->refs))
840 return;
841
842 WARN_ON(!list_empty(&rbio->stripe_cache));
843 WARN_ON(!list_empty(&rbio->hash_list));
844 WARN_ON(!bio_list_empty(&rbio->bio_list));
845
846 for (i = 0; i < rbio->nr_pages; i++) {
847 if (rbio->stripe_pages[i]) {
848 __free_page(rbio->stripe_pages[i]);
849 rbio->stripe_pages[i] = NULL;
850 }
851 }
852
853 btrfs_put_bbio(rbio->bbio);
854 kfree(rbio);
855}
856
857static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
858{
859 struct bio *next;
860
861 while (cur) {
862 next = cur->bi_next;
863 cur->bi_next = NULL;
864 cur->bi_status = err;
865 bio_endio(cur);
866 cur = next;
867 }
868}
869
870
871
872
873
874static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
875{
876 struct bio *cur = bio_list_get(&rbio->bio_list);
877 struct bio *extra;
878
879 if (rbio->generic_bio_cnt)
880 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
881
882
883
884
885
886
887
888
889
890 unlock_stripe(rbio);
891 extra = bio_list_get(&rbio->bio_list);
892 __free_raid_bio(rbio);
893
894 rbio_endio_bio_list(cur, err);
895 if (extra)
896 rbio_endio_bio_list(extra, err);
897}
898
899
900
901
902
903static void raid_write_end_io(struct bio *bio)
904{
905 struct btrfs_raid_bio *rbio = bio->bi_private;
906 blk_status_t err = bio->bi_status;
907 int max_errors;
908
909 if (err)
910 fail_bio_stripe(rbio, bio);
911
912 bio_put(bio);
913
914 if (!atomic_dec_and_test(&rbio->stripes_pending))
915 return;
916
917 err = BLK_STS_OK;
918
919
920 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
921 0 : rbio->bbio->max_errors;
922 if (atomic_read(&rbio->error) > max_errors)
923 err = BLK_STS_IOERR;
924
925 rbio_orig_end_io(rbio, err);
926}
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
945 int index, int pagenr, int bio_list_only)
946{
947 int chunk_page;
948 struct page *p = NULL;
949
950 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
951
952 spin_lock_irq(&rbio->bio_list_lock);
953 p = rbio->bio_pages[chunk_page];
954 spin_unlock_irq(&rbio->bio_list_lock);
955
956 if (p || bio_list_only)
957 return p;
958
959 return rbio->stripe_pages[chunk_page];
960}
961
962
963
964
965
966static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
967{
968 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
969}
970
971
972
973
974
975static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
976 struct btrfs_bio *bbio,
977 u64 stripe_len)
978{
979 struct btrfs_raid_bio *rbio;
980 int nr_data = 0;
981 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
982 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
983 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
984 void *p;
985
986 rbio = kzalloc(sizeof(*rbio) +
987 sizeof(*rbio->stripe_pages) * num_pages +
988 sizeof(*rbio->bio_pages) * num_pages +
989 sizeof(*rbio->finish_pointers) * real_stripes +
990 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
991 sizeof(*rbio->finish_pbitmap) *
992 BITS_TO_LONGS(stripe_npages),
993 GFP_NOFS);
994 if (!rbio)
995 return ERR_PTR(-ENOMEM);
996
997 bio_list_init(&rbio->bio_list);
998 INIT_LIST_HEAD(&rbio->plug_list);
999 spin_lock_init(&rbio->bio_list_lock);
1000 INIT_LIST_HEAD(&rbio->stripe_cache);
1001 INIT_LIST_HEAD(&rbio->hash_list);
1002 rbio->bbio = bbio;
1003 rbio->fs_info = fs_info;
1004 rbio->stripe_len = stripe_len;
1005 rbio->nr_pages = num_pages;
1006 rbio->real_stripes = real_stripes;
1007 rbio->stripe_npages = stripe_npages;
1008 rbio->faila = -1;
1009 rbio->failb = -1;
1010 refcount_set(&rbio->refs, 1);
1011 atomic_set(&rbio->error, 0);
1012 atomic_set(&rbio->stripes_pending, 0);
1013
1014
1015
1016
1017
1018 p = rbio + 1;
1019#define CONSUME_ALLOC(ptr, count) do { \
1020 ptr = p; \
1021 p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
1022 } while (0)
1023 CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1024 CONSUME_ALLOC(rbio->bio_pages, num_pages);
1025 CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1026 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1027 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1028#undef CONSUME_ALLOC
1029
1030 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1031 nr_data = real_stripes - 1;
1032 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1033 nr_data = real_stripes - 2;
1034 else
1035 BUG();
1036
1037 rbio->nr_data = nr_data;
1038 return rbio;
1039}
1040
1041
1042static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1043{
1044 int i;
1045 struct page *page;
1046
1047 for (i = 0; i < rbio->nr_pages; i++) {
1048 if (rbio->stripe_pages[i])
1049 continue;
1050 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1051 if (!page)
1052 return -ENOMEM;
1053 rbio->stripe_pages[i] = page;
1054 }
1055 return 0;
1056}
1057
1058
1059static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1060{
1061 int i;
1062 struct page *page;
1063
1064 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1065
1066 for (; i < rbio->nr_pages; i++) {
1067 if (rbio->stripe_pages[i])
1068 continue;
1069 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1070 if (!page)
1071 return -ENOMEM;
1072 rbio->stripe_pages[i] = page;
1073 }
1074 return 0;
1075}
1076
1077
1078
1079
1080
1081
1082static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1083 struct bio_list *bio_list,
1084 struct page *page,
1085 int stripe_nr,
1086 unsigned long page_index,
1087 unsigned long bio_max_len)
1088{
1089 struct bio *last = bio_list->tail;
1090 u64 last_end = 0;
1091 int ret;
1092 struct bio *bio;
1093 struct btrfs_bio_stripe *stripe;
1094 u64 disk_start;
1095
1096 stripe = &rbio->bbio->stripes[stripe_nr];
1097 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1098
1099
1100 if (!stripe->dev->bdev)
1101 return fail_rbio_index(rbio, stripe_nr);
1102
1103
1104 if (last) {
1105 last_end = (u64)last->bi_iter.bi_sector << 9;
1106 last_end += last->bi_iter.bi_size;
1107
1108
1109
1110
1111
1112 if (last_end == disk_start && stripe->dev->bdev &&
1113 !last->bi_status &&
1114 last->bi_disk == stripe->dev->bdev->bd_disk &&
1115 last->bi_partno == stripe->dev->bdev->bd_partno) {
1116 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1117 if (ret == PAGE_SIZE)
1118 return 0;
1119 }
1120 }
1121
1122
1123 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1124 bio->bi_iter.bi_size = 0;
1125 bio_set_dev(bio, stripe->dev->bdev);
1126 bio->bi_iter.bi_sector = disk_start >> 9;
1127
1128 bio_add_page(bio, page, PAGE_SIZE, 0);
1129 bio_list_add(bio_list, bio);
1130 return 0;
1131}
1132
1133
1134
1135
1136
1137
1138
1139
1140static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1141{
1142 if (rbio->faila >= 0 || rbio->failb >= 0) {
1143 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1144 __raid56_parity_recover(rbio);
1145 } else {
1146 finish_rmw(rbio);
1147 }
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1159{
1160 struct bio *bio;
1161 u64 start;
1162 unsigned long stripe_offset;
1163 unsigned long page_index;
1164
1165 spin_lock_irq(&rbio->bio_list_lock);
1166 bio_list_for_each(bio, &rbio->bio_list) {
1167 struct bio_vec bvec;
1168 struct bvec_iter iter;
1169 int i = 0;
1170
1171 start = (u64)bio->bi_iter.bi_sector << 9;
1172 stripe_offset = start - rbio->bbio->raid_map[0];
1173 page_index = stripe_offset >> PAGE_SHIFT;
1174
1175 if (bio_flagged(bio, BIO_CLONED))
1176 bio->bi_iter = btrfs_io_bio(bio)->iter;
1177
1178 bio_for_each_segment(bvec, bio, iter) {
1179 rbio->bio_pages[page_index + i] = bvec.bv_page;
1180 i++;
1181 }
1182 }
1183 spin_unlock_irq(&rbio->bio_list_lock);
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1195{
1196 struct btrfs_bio *bbio = rbio->bbio;
1197 void **pointers = rbio->finish_pointers;
1198 int nr_data = rbio->nr_data;
1199 int stripe;
1200 int pagenr;
1201 int p_stripe = -1;
1202 int q_stripe = -1;
1203 struct bio_list bio_list;
1204 struct bio *bio;
1205 int ret;
1206
1207 bio_list_init(&bio_list);
1208
1209 if (rbio->real_stripes - rbio->nr_data == 1) {
1210 p_stripe = rbio->real_stripes - 1;
1211 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1212 p_stripe = rbio->real_stripes - 2;
1213 q_stripe = rbio->real_stripes - 1;
1214 } else {
1215 BUG();
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 spin_lock_irq(&rbio->bio_list_lock);
1227 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1228 spin_unlock_irq(&rbio->bio_list_lock);
1229
1230 atomic_set(&rbio->error, 0);
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 index_rbio_pages(rbio);
1242 if (!rbio_is_full(rbio))
1243 cache_rbio_pages(rbio);
1244 else
1245 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1246
1247 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1248 struct page *p;
1249
1250 for (stripe = 0; stripe < nr_data; stripe++) {
1251 p = page_in_rbio(rbio, stripe, pagenr, 0);
1252 pointers[stripe] = kmap(p);
1253 }
1254
1255
1256 p = rbio_pstripe_page(rbio, pagenr);
1257 SetPageUptodate(p);
1258 pointers[stripe++] = kmap(p);
1259
1260 if (q_stripe != -1) {
1261
1262
1263
1264
1265
1266 p = rbio_qstripe_page(rbio, pagenr);
1267 SetPageUptodate(p);
1268 pointers[stripe++] = kmap(p);
1269
1270 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1271 pointers);
1272 } else {
1273
1274 copy_page(pointers[nr_data], pointers[0]);
1275 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1276 }
1277
1278
1279 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1280 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1281 }
1282
1283
1284
1285
1286
1287
1288 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1289 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1290 struct page *page;
1291 if (stripe < rbio->nr_data) {
1292 page = page_in_rbio(rbio, stripe, pagenr, 1);
1293 if (!page)
1294 continue;
1295 } else {
1296 page = rbio_stripe_page(rbio, stripe, pagenr);
1297 }
1298
1299 ret = rbio_add_io_page(rbio, &bio_list,
1300 page, stripe, pagenr, rbio->stripe_len);
1301 if (ret)
1302 goto cleanup;
1303 }
1304 }
1305
1306 if (likely(!bbio->num_tgtdevs))
1307 goto write_data;
1308
1309 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1310 if (!bbio->tgtdev_map[stripe])
1311 continue;
1312
1313 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1314 struct page *page;
1315 if (stripe < rbio->nr_data) {
1316 page = page_in_rbio(rbio, stripe, pagenr, 1);
1317 if (!page)
1318 continue;
1319 } else {
1320 page = rbio_stripe_page(rbio, stripe, pagenr);
1321 }
1322
1323 ret = rbio_add_io_page(rbio, &bio_list, page,
1324 rbio->bbio->tgtdev_map[stripe],
1325 pagenr, rbio->stripe_len);
1326 if (ret)
1327 goto cleanup;
1328 }
1329 }
1330
1331write_data:
1332 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1333 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1334
1335 while (1) {
1336 bio = bio_list_pop(&bio_list);
1337 if (!bio)
1338 break;
1339
1340 bio->bi_private = rbio;
1341 bio->bi_end_io = raid_write_end_io;
1342 bio->bi_opf = REQ_OP_WRITE;
1343
1344 submit_bio(bio);
1345 }
1346 return;
1347
1348cleanup:
1349 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1350
1351 while ((bio = bio_list_pop(&bio_list)))
1352 bio_put(bio);
1353}
1354
1355
1356
1357
1358
1359
1360static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1361 struct bio *bio)
1362{
1363 u64 physical = bio->bi_iter.bi_sector;
1364 u64 stripe_start;
1365 int i;
1366 struct btrfs_bio_stripe *stripe;
1367
1368 physical <<= 9;
1369
1370 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1371 stripe = &rbio->bbio->stripes[i];
1372 stripe_start = stripe->physical;
1373 if (physical >= stripe_start &&
1374 physical < stripe_start + rbio->stripe_len &&
1375 stripe->dev->bdev &&
1376 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1377 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1378 return i;
1379 }
1380 }
1381 return -1;
1382}
1383
1384
1385
1386
1387
1388
1389static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1390 struct bio *bio)
1391{
1392 u64 logical = bio->bi_iter.bi_sector;
1393 u64 stripe_start;
1394 int i;
1395
1396 logical <<= 9;
1397
1398 for (i = 0; i < rbio->nr_data; i++) {
1399 stripe_start = rbio->bbio->raid_map[i];
1400 if (logical >= stripe_start &&
1401 logical < stripe_start + rbio->stripe_len) {
1402 return i;
1403 }
1404 }
1405 return -1;
1406}
1407
1408
1409
1410
1411static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1412{
1413 unsigned long flags;
1414 int ret = 0;
1415
1416 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1417
1418
1419 if (rbio->faila == failed || rbio->failb == failed)
1420 goto out;
1421
1422 if (rbio->faila == -1) {
1423
1424 rbio->faila = failed;
1425 atomic_inc(&rbio->error);
1426 } else if (rbio->failb == -1) {
1427
1428 rbio->failb = failed;
1429 atomic_inc(&rbio->error);
1430 } else {
1431 ret = -EIO;
1432 }
1433out:
1434 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1435
1436 return ret;
1437}
1438
1439
1440
1441
1442
1443static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1444 struct bio *bio)
1445{
1446 int failed = find_bio_stripe(rbio, bio);
1447
1448 if (failed < 0)
1449 return -EIO;
1450
1451 return fail_rbio_index(rbio, failed);
1452}
1453
1454
1455
1456
1457
1458static void set_bio_pages_uptodate(struct bio *bio)
1459{
1460 struct bio_vec *bvec;
1461 struct bvec_iter_all iter_all;
1462
1463 ASSERT(!bio_flagged(bio, BIO_CLONED));
1464
1465 bio_for_each_segment_all(bvec, bio, iter_all)
1466 SetPageUptodate(bvec->bv_page);
1467}
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477static void raid_rmw_end_io(struct bio *bio)
1478{
1479 struct btrfs_raid_bio *rbio = bio->bi_private;
1480
1481 if (bio->bi_status)
1482 fail_bio_stripe(rbio, bio);
1483 else
1484 set_bio_pages_uptodate(bio);
1485
1486 bio_put(bio);
1487
1488 if (!atomic_dec_and_test(&rbio->stripes_pending))
1489 return;
1490
1491 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1492 goto cleanup;
1493
1494
1495
1496
1497
1498
1499 validate_rbio_for_rmw(rbio);
1500 return;
1501
1502cleanup:
1503
1504 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1505}
1506
1507
1508
1509
1510
1511static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1512{
1513 int bios_to_read = 0;
1514 struct bio_list bio_list;
1515 int ret;
1516 int pagenr;
1517 int stripe;
1518 struct bio *bio;
1519
1520 bio_list_init(&bio_list);
1521
1522 ret = alloc_rbio_pages(rbio);
1523 if (ret)
1524 goto cleanup;
1525
1526 index_rbio_pages(rbio);
1527
1528 atomic_set(&rbio->error, 0);
1529
1530
1531
1532
1533 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1534 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1535 struct page *page;
1536
1537
1538
1539
1540
1541
1542 page = page_in_rbio(rbio, stripe, pagenr, 1);
1543 if (page)
1544 continue;
1545
1546 page = rbio_stripe_page(rbio, stripe, pagenr);
1547
1548
1549
1550
1551 if (PageUptodate(page))
1552 continue;
1553
1554 ret = rbio_add_io_page(rbio, &bio_list, page,
1555 stripe, pagenr, rbio->stripe_len);
1556 if (ret)
1557 goto cleanup;
1558 }
1559 }
1560
1561 bios_to_read = bio_list_size(&bio_list);
1562 if (!bios_to_read) {
1563
1564
1565
1566
1567
1568
1569 goto finish;
1570 }
1571
1572
1573
1574
1575
1576 atomic_set(&rbio->stripes_pending, bios_to_read);
1577 while (1) {
1578 bio = bio_list_pop(&bio_list);
1579 if (!bio)
1580 break;
1581
1582 bio->bi_private = rbio;
1583 bio->bi_end_io = raid_rmw_end_io;
1584 bio->bi_opf = REQ_OP_READ;
1585
1586 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1587
1588 submit_bio(bio);
1589 }
1590
1591 return 0;
1592
1593cleanup:
1594 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1595
1596 while ((bio = bio_list_pop(&bio_list)))
1597 bio_put(bio);
1598
1599 return -EIO;
1600
1601finish:
1602 validate_rbio_for_rmw(rbio);
1603 return 0;
1604}
1605
1606
1607
1608
1609
1610static int full_stripe_write(struct btrfs_raid_bio *rbio)
1611{
1612 int ret;
1613
1614 ret = alloc_rbio_parity_pages(rbio);
1615 if (ret) {
1616 __free_raid_bio(rbio);
1617 return ret;
1618 }
1619
1620 ret = lock_stripe_add(rbio);
1621 if (ret == 0)
1622 finish_rmw(rbio);
1623 return 0;
1624}
1625
1626
1627
1628
1629
1630
1631static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1632{
1633 int ret;
1634
1635 ret = lock_stripe_add(rbio);
1636 if (ret == 0)
1637 start_async_work(rbio, rmw_work);
1638 return 0;
1639}
1640
1641
1642
1643
1644
1645
1646
1647static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1648{
1649
1650 if (!rbio_is_full(rbio))
1651 return partial_stripe_write(rbio);
1652 return full_stripe_write(rbio);
1653}
1654
1655
1656
1657
1658
1659
1660
1661
1662struct btrfs_plug_cb {
1663 struct blk_plug_cb cb;
1664 struct btrfs_fs_info *info;
1665 struct list_head rbio_list;
1666 struct btrfs_work work;
1667};
1668
1669
1670
1671
1672static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1673{
1674 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1675 plug_list);
1676 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1677 plug_list);
1678 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1679 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1680
1681 if (a_sector < b_sector)
1682 return -1;
1683 if (a_sector > b_sector)
1684 return 1;
1685 return 0;
1686}
1687
1688static void run_plug(struct btrfs_plug_cb *plug)
1689{
1690 struct btrfs_raid_bio *cur;
1691 struct btrfs_raid_bio *last = NULL;
1692
1693
1694
1695
1696
1697
1698 list_sort(NULL, &plug->rbio_list, plug_cmp);
1699 while (!list_empty(&plug->rbio_list)) {
1700 cur = list_entry(plug->rbio_list.next,
1701 struct btrfs_raid_bio, plug_list);
1702 list_del_init(&cur->plug_list);
1703
1704 if (rbio_is_full(cur)) {
1705 int ret;
1706
1707
1708 ret = full_stripe_write(cur);
1709 BUG_ON(ret);
1710 continue;
1711 }
1712 if (last) {
1713 if (rbio_can_merge(last, cur)) {
1714 merge_rbio(last, cur);
1715 __free_raid_bio(cur);
1716 continue;
1717
1718 }
1719 __raid56_parity_write(last);
1720 }
1721 last = cur;
1722 }
1723 if (last) {
1724 __raid56_parity_write(last);
1725 }
1726 kfree(plug);
1727}
1728
1729
1730
1731
1732
1733static void unplug_work(struct btrfs_work *work)
1734{
1735 struct btrfs_plug_cb *plug;
1736 plug = container_of(work, struct btrfs_plug_cb, work);
1737 run_plug(plug);
1738}
1739
1740static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1741{
1742 struct btrfs_plug_cb *plug;
1743 plug = container_of(cb, struct btrfs_plug_cb, cb);
1744
1745 if (from_schedule) {
1746 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1747 unplug_work, NULL, NULL);
1748 btrfs_queue_work(plug->info->rmw_workers,
1749 &plug->work);
1750 return;
1751 }
1752 run_plug(plug);
1753}
1754
1755
1756
1757
1758int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1759 struct btrfs_bio *bbio, u64 stripe_len)
1760{
1761 struct btrfs_raid_bio *rbio;
1762 struct btrfs_plug_cb *plug = NULL;
1763 struct blk_plug_cb *cb;
1764 int ret;
1765
1766 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1767 if (IS_ERR(rbio)) {
1768 btrfs_put_bbio(bbio);
1769 return PTR_ERR(rbio);
1770 }
1771 bio_list_add(&rbio->bio_list, bio);
1772 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1773 rbio->operation = BTRFS_RBIO_WRITE;
1774
1775 btrfs_bio_counter_inc_noblocked(fs_info);
1776 rbio->generic_bio_cnt = 1;
1777
1778
1779
1780
1781
1782 if (rbio_is_full(rbio)) {
1783 ret = full_stripe_write(rbio);
1784 if (ret)
1785 btrfs_bio_counter_dec(fs_info);
1786 return ret;
1787 }
1788
1789 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1790 if (cb) {
1791 plug = container_of(cb, struct btrfs_plug_cb, cb);
1792 if (!plug->info) {
1793 plug->info = fs_info;
1794 INIT_LIST_HEAD(&plug->rbio_list);
1795 }
1796 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1797 ret = 0;
1798 } else {
1799 ret = __raid56_parity_write(rbio);
1800 if (ret)
1801 btrfs_bio_counter_dec(fs_info);
1802 }
1803 return ret;
1804}
1805
1806
1807
1808
1809
1810
1811static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1812{
1813 int pagenr, stripe;
1814 void **pointers;
1815 int faila = -1, failb = -1;
1816 struct page *page;
1817 blk_status_t err;
1818 int i;
1819
1820 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1821 if (!pointers) {
1822 err = BLK_STS_RESOURCE;
1823 goto cleanup_io;
1824 }
1825
1826 faila = rbio->faila;
1827 failb = rbio->failb;
1828
1829 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1830 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1831 spin_lock_irq(&rbio->bio_list_lock);
1832 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1833 spin_unlock_irq(&rbio->bio_list_lock);
1834 }
1835
1836 index_rbio_pages(rbio);
1837
1838 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1839
1840
1841
1842
1843 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1844 !test_bit(pagenr, rbio->dbitmap))
1845 continue;
1846
1847
1848
1849
1850 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1851
1852
1853
1854
1855 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1856 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1857 (stripe == faila || stripe == failb)) {
1858 page = page_in_rbio(rbio, stripe, pagenr, 0);
1859 } else {
1860 page = rbio_stripe_page(rbio, stripe, pagenr);
1861 }
1862 pointers[stripe] = kmap(page);
1863 }
1864
1865
1866 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1867
1868
1869
1870
1871 if (failb < 0) {
1872 if (faila == rbio->nr_data) {
1873
1874
1875
1876
1877
1878 err = BLK_STS_IOERR;
1879 goto cleanup;
1880 }
1881
1882
1883
1884
1885 goto pstripe;
1886 }
1887
1888
1889 if (faila > failb) {
1890 int tmp = failb;
1891 failb = faila;
1892 faila = tmp;
1893 }
1894
1895
1896
1897
1898
1899
1900
1901 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1902 if (rbio->bbio->raid_map[faila] ==
1903 RAID5_P_STRIPE) {
1904 err = BLK_STS_IOERR;
1905 goto cleanup;
1906 }
1907
1908
1909
1910
1911 goto pstripe;
1912 }
1913
1914 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1915 raid6_datap_recov(rbio->real_stripes,
1916 PAGE_SIZE, faila, pointers);
1917 } else {
1918 raid6_2data_recov(rbio->real_stripes,
1919 PAGE_SIZE, faila, failb,
1920 pointers);
1921 }
1922 } else {
1923 void *p;
1924
1925
1926 BUG_ON(failb != -1);
1927pstripe:
1928
1929 copy_page(pointers[faila], pointers[rbio->nr_data]);
1930
1931
1932 p = pointers[faila];
1933 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1934 pointers[stripe] = pointers[stripe + 1];
1935 pointers[rbio->nr_data - 1] = p;
1936
1937
1938 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1939 }
1940
1941
1942
1943
1944
1945
1946 if (rbio->operation == BTRFS_RBIO_WRITE) {
1947 for (i = 0; i < rbio->stripe_npages; i++) {
1948 if (faila != -1) {
1949 page = rbio_stripe_page(rbio, faila, i);
1950 SetPageUptodate(page);
1951 }
1952 if (failb != -1) {
1953 page = rbio_stripe_page(rbio, failb, i);
1954 SetPageUptodate(page);
1955 }
1956 }
1957 }
1958 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1959
1960
1961
1962
1963 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1964 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1965 (stripe == faila || stripe == failb)) {
1966 page = page_in_rbio(rbio, stripe, pagenr, 0);
1967 } else {
1968 page = rbio_stripe_page(rbio, stripe, pagenr);
1969 }
1970 kunmap(page);
1971 }
1972 }
1973
1974 err = BLK_STS_OK;
1975cleanup:
1976 kfree(pointers);
1977
1978cleanup_io:
1979
1980
1981
1982
1983
1984 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1985 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001 if (err == BLK_STS_OK && rbio->failb < 0)
2002 cache_rbio_pages(rbio);
2003 else
2004 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2005
2006 rbio_orig_end_io(rbio, err);
2007 } else if (err == BLK_STS_OK) {
2008 rbio->faila = -1;
2009 rbio->failb = -1;
2010
2011 if (rbio->operation == BTRFS_RBIO_WRITE)
2012 finish_rmw(rbio);
2013 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2014 finish_parity_scrub(rbio, 0);
2015 else
2016 BUG();
2017 } else {
2018 rbio_orig_end_io(rbio, err);
2019 }
2020}
2021
2022
2023
2024
2025
2026static void raid_recover_end_io(struct bio *bio)
2027{
2028 struct btrfs_raid_bio *rbio = bio->bi_private;
2029
2030
2031
2032
2033
2034 if (bio->bi_status)
2035 fail_bio_stripe(rbio, bio);
2036 else
2037 set_bio_pages_uptodate(bio);
2038 bio_put(bio);
2039
2040 if (!atomic_dec_and_test(&rbio->stripes_pending))
2041 return;
2042
2043 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2044 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2045 else
2046 __raid_recover_end_io(rbio);
2047}
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2058{
2059 int bios_to_read = 0;
2060 struct bio_list bio_list;
2061 int ret;
2062 int pagenr;
2063 int stripe;
2064 struct bio *bio;
2065
2066 bio_list_init(&bio_list);
2067
2068 ret = alloc_rbio_pages(rbio);
2069 if (ret)
2070 goto cleanup;
2071
2072 atomic_set(&rbio->error, 0);
2073
2074
2075
2076
2077
2078
2079 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2080 if (rbio->faila == stripe || rbio->failb == stripe) {
2081 atomic_inc(&rbio->error);
2082 continue;
2083 }
2084
2085 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2086 struct page *p;
2087
2088
2089
2090
2091
2092 p = rbio_stripe_page(rbio, stripe, pagenr);
2093 if (PageUptodate(p))
2094 continue;
2095
2096 ret = rbio_add_io_page(rbio, &bio_list,
2097 rbio_stripe_page(rbio, stripe, pagenr),
2098 stripe, pagenr, rbio->stripe_len);
2099 if (ret < 0)
2100 goto cleanup;
2101 }
2102 }
2103
2104 bios_to_read = bio_list_size(&bio_list);
2105 if (!bios_to_read) {
2106
2107
2108
2109
2110
2111 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2112 __raid_recover_end_io(rbio);
2113 goto out;
2114 } else {
2115 goto cleanup;
2116 }
2117 }
2118
2119
2120
2121
2122
2123 atomic_set(&rbio->stripes_pending, bios_to_read);
2124 while (1) {
2125 bio = bio_list_pop(&bio_list);
2126 if (!bio)
2127 break;
2128
2129 bio->bi_private = rbio;
2130 bio->bi_end_io = raid_recover_end_io;
2131 bio->bi_opf = REQ_OP_READ;
2132
2133 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2134
2135 submit_bio(bio);
2136 }
2137out:
2138 return 0;
2139
2140cleanup:
2141 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2142 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2143 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2144
2145 while ((bio = bio_list_pop(&bio_list)))
2146 bio_put(bio);
2147
2148 return -EIO;
2149}
2150
2151
2152
2153
2154
2155
2156
2157int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2158 struct btrfs_bio *bbio, u64 stripe_len,
2159 int mirror_num, int generic_io)
2160{
2161 struct btrfs_raid_bio *rbio;
2162 int ret;
2163
2164 if (generic_io) {
2165 ASSERT(bbio->mirror_num == mirror_num);
2166 btrfs_io_bio(bio)->mirror_num = mirror_num;
2167 }
2168
2169 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2170 if (IS_ERR(rbio)) {
2171 if (generic_io)
2172 btrfs_put_bbio(bbio);
2173 return PTR_ERR(rbio);
2174 }
2175
2176 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2177 bio_list_add(&rbio->bio_list, bio);
2178 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2179
2180 rbio->faila = find_logical_bio_stripe(rbio, bio);
2181 if (rbio->faila == -1) {
2182 btrfs_warn(fs_info,
2183 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2184 __func__, (u64)bio->bi_iter.bi_sector << 9,
2185 (u64)bio->bi_iter.bi_size, bbio->map_type);
2186 if (generic_io)
2187 btrfs_put_bbio(bbio);
2188 kfree(rbio);
2189 return -EIO;
2190 }
2191
2192 if (generic_io) {
2193 btrfs_bio_counter_inc_noblocked(fs_info);
2194 rbio->generic_bio_cnt = 1;
2195 } else {
2196 btrfs_get_bbio(bbio);
2197 }
2198
2199
2200
2201
2202
2203
2204 if (mirror_num > 2) {
2205
2206
2207
2208
2209
2210 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2211 ASSERT(rbio->failb > 0);
2212 if (rbio->failb <= rbio->faila)
2213 rbio->failb--;
2214 }
2215
2216 ret = lock_stripe_add(rbio);
2217
2218
2219
2220
2221
2222
2223
2224
2225 if (ret == 0)
2226 __raid56_parity_recover(rbio);
2227
2228
2229
2230
2231
2232 return 0;
2233
2234}
2235
2236static void rmw_work(struct btrfs_work *work)
2237{
2238 struct btrfs_raid_bio *rbio;
2239
2240 rbio = container_of(work, struct btrfs_raid_bio, work);
2241 raid56_rmw_stripe(rbio);
2242}
2243
2244static void read_rebuild_work(struct btrfs_work *work)
2245{
2246 struct btrfs_raid_bio *rbio;
2247
2248 rbio = container_of(work, struct btrfs_raid_bio, work);
2249 __raid56_parity_recover(rbio);
2250}
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262struct btrfs_raid_bio *
2263raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2264 struct btrfs_bio *bbio, u64 stripe_len,
2265 struct btrfs_device *scrub_dev,
2266 unsigned long *dbitmap, int stripe_nsectors)
2267{
2268 struct btrfs_raid_bio *rbio;
2269 int i;
2270
2271 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2272 if (IS_ERR(rbio))
2273 return NULL;
2274 bio_list_add(&rbio->bio_list, bio);
2275
2276
2277
2278
2279 ASSERT(!bio->bi_iter.bi_size);
2280 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2281
2282
2283
2284
2285
2286
2287 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2288 if (bbio->stripes[i].dev == scrub_dev) {
2289 rbio->scrubp = i;
2290 break;
2291 }
2292 }
2293 ASSERT(i < rbio->real_stripes);
2294
2295
2296 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2297 ASSERT(rbio->stripe_npages == stripe_nsectors);
2298 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2299
2300
2301
2302
2303
2304 rbio->generic_bio_cnt = 1;
2305
2306 return rbio;
2307}
2308
2309
2310void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2311 u64 logical)
2312{
2313 int stripe_offset;
2314 int index;
2315
2316 ASSERT(logical >= rbio->bbio->raid_map[0]);
2317 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2318 rbio->stripe_len * rbio->nr_data);
2319 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2320 index = stripe_offset >> PAGE_SHIFT;
2321 rbio->bio_pages[index] = page;
2322}
2323
2324
2325
2326
2327
2328static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2329{
2330 int i;
2331 int bit;
2332 int index;
2333 struct page *page;
2334
2335 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2336 for (i = 0; i < rbio->real_stripes; i++) {
2337 index = i * rbio->stripe_npages + bit;
2338 if (rbio->stripe_pages[index])
2339 continue;
2340
2341 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2342 if (!page)
2343 return -ENOMEM;
2344 rbio->stripe_pages[index] = page;
2345 }
2346 }
2347 return 0;
2348}
2349
2350static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2351 int need_check)
2352{
2353 struct btrfs_bio *bbio = rbio->bbio;
2354 void **pointers = rbio->finish_pointers;
2355 unsigned long *pbitmap = rbio->finish_pbitmap;
2356 int nr_data = rbio->nr_data;
2357 int stripe;
2358 int pagenr;
2359 int p_stripe = -1;
2360 int q_stripe = -1;
2361 struct page *p_page = NULL;
2362 struct page *q_page = NULL;
2363 struct bio_list bio_list;
2364 struct bio *bio;
2365 int is_replace = 0;
2366 int ret;
2367
2368 bio_list_init(&bio_list);
2369
2370 if (rbio->real_stripes - rbio->nr_data == 1) {
2371 p_stripe = rbio->real_stripes - 1;
2372 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2373 p_stripe = rbio->real_stripes - 2;
2374 q_stripe = rbio->real_stripes - 1;
2375 } else {
2376 BUG();
2377 }
2378
2379 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2380 is_replace = 1;
2381 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2382 }
2383
2384
2385
2386
2387
2388
2389 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2390
2391 if (!need_check)
2392 goto writeback;
2393
2394 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2395 if (!p_page)
2396 goto cleanup;
2397 SetPageUptodate(p_page);
2398
2399 if (q_stripe != -1) {
2400 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2401 if (!q_page) {
2402 __free_page(p_page);
2403 goto cleanup;
2404 }
2405 SetPageUptodate(q_page);
2406 }
2407
2408 atomic_set(&rbio->error, 0);
2409
2410 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2411 struct page *p;
2412 void *parity;
2413
2414 for (stripe = 0; stripe < nr_data; stripe++) {
2415 p = page_in_rbio(rbio, stripe, pagenr, 0);
2416 pointers[stripe] = kmap(p);
2417 }
2418
2419
2420 pointers[stripe++] = kmap(p_page);
2421
2422 if (q_stripe != -1) {
2423
2424
2425
2426
2427
2428 pointers[stripe++] = kmap(q_page);
2429
2430 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2431 pointers);
2432 } else {
2433
2434 copy_page(pointers[nr_data], pointers[0]);
2435 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2436 }
2437
2438
2439 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2440 parity = kmap(p);
2441 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2442 copy_page(parity, pointers[rbio->scrubp]);
2443 else
2444
2445 bitmap_clear(rbio->dbitmap, pagenr, 1);
2446 kunmap(p);
2447
2448 for (stripe = 0; stripe < nr_data; stripe++)
2449 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2450 kunmap(p_page);
2451 }
2452
2453 __free_page(p_page);
2454 if (q_page)
2455 __free_page(q_page);
2456
2457writeback:
2458
2459
2460
2461
2462
2463 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2464 struct page *page;
2465
2466 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2467 ret = rbio_add_io_page(rbio, &bio_list,
2468 page, rbio->scrubp, pagenr, rbio->stripe_len);
2469 if (ret)
2470 goto cleanup;
2471 }
2472
2473 if (!is_replace)
2474 goto submit_write;
2475
2476 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2477 struct page *page;
2478
2479 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2480 ret = rbio_add_io_page(rbio, &bio_list, page,
2481 bbio->tgtdev_map[rbio->scrubp],
2482 pagenr, rbio->stripe_len);
2483 if (ret)
2484 goto cleanup;
2485 }
2486
2487submit_write:
2488 nr_data = bio_list_size(&bio_list);
2489 if (!nr_data) {
2490
2491 rbio_orig_end_io(rbio, BLK_STS_OK);
2492 return;
2493 }
2494
2495 atomic_set(&rbio->stripes_pending, nr_data);
2496
2497 while (1) {
2498 bio = bio_list_pop(&bio_list);
2499 if (!bio)
2500 break;
2501
2502 bio->bi_private = rbio;
2503 bio->bi_end_io = raid_write_end_io;
2504 bio->bi_opf = REQ_OP_WRITE;
2505
2506 submit_bio(bio);
2507 }
2508 return;
2509
2510cleanup:
2511 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2512
2513 while ((bio = bio_list_pop(&bio_list)))
2514 bio_put(bio);
2515}
2516
2517static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2518{
2519 if (stripe >= 0 && stripe < rbio->nr_data)
2520 return 1;
2521 return 0;
2522}
2523
2524
2525
2526
2527
2528
2529
2530
2531static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2532{
2533 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2534 goto cleanup;
2535
2536 if (rbio->faila >= 0 || rbio->failb >= 0) {
2537 int dfail = 0, failp = -1;
2538
2539 if (is_data_stripe(rbio, rbio->faila))
2540 dfail++;
2541 else if (is_parity_stripe(rbio->faila))
2542 failp = rbio->faila;
2543
2544 if (is_data_stripe(rbio, rbio->failb))
2545 dfail++;
2546 else if (is_parity_stripe(rbio->failb))
2547 failp = rbio->failb;
2548
2549
2550
2551
2552
2553
2554 if (dfail > rbio->bbio->max_errors - 1)
2555 goto cleanup;
2556
2557
2558
2559
2560
2561 if (dfail == 0) {
2562 finish_parity_scrub(rbio, 0);
2563 return;
2564 }
2565
2566
2567
2568
2569
2570
2571
2572 if (failp != rbio->scrubp)
2573 goto cleanup;
2574
2575 __raid_recover_end_io(rbio);
2576 } else {
2577 finish_parity_scrub(rbio, 1);
2578 }
2579 return;
2580
2581cleanup:
2582 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2583}
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593static void raid56_parity_scrub_end_io(struct bio *bio)
2594{
2595 struct btrfs_raid_bio *rbio = bio->bi_private;
2596
2597 if (bio->bi_status)
2598 fail_bio_stripe(rbio, bio);
2599 else
2600 set_bio_pages_uptodate(bio);
2601
2602 bio_put(bio);
2603
2604 if (!atomic_dec_and_test(&rbio->stripes_pending))
2605 return;
2606
2607
2608
2609
2610
2611
2612 validate_rbio_for_parity_scrub(rbio);
2613}
2614
2615static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2616{
2617 int bios_to_read = 0;
2618 struct bio_list bio_list;
2619 int ret;
2620 int pagenr;
2621 int stripe;
2622 struct bio *bio;
2623
2624 bio_list_init(&bio_list);
2625
2626 ret = alloc_rbio_essential_pages(rbio);
2627 if (ret)
2628 goto cleanup;
2629
2630 atomic_set(&rbio->error, 0);
2631
2632
2633
2634
2635 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2636 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2637 struct page *page;
2638
2639
2640
2641
2642
2643
2644 page = page_in_rbio(rbio, stripe, pagenr, 1);
2645 if (page)
2646 continue;
2647
2648 page = rbio_stripe_page(rbio, stripe, pagenr);
2649
2650
2651
2652
2653 if (PageUptodate(page))
2654 continue;
2655
2656 ret = rbio_add_io_page(rbio, &bio_list, page,
2657 stripe, pagenr, rbio->stripe_len);
2658 if (ret)
2659 goto cleanup;
2660 }
2661 }
2662
2663 bios_to_read = bio_list_size(&bio_list);
2664 if (!bios_to_read) {
2665
2666
2667
2668
2669
2670
2671 goto finish;
2672 }
2673
2674
2675
2676
2677
2678 atomic_set(&rbio->stripes_pending, bios_to_read);
2679 while (1) {
2680 bio = bio_list_pop(&bio_list);
2681 if (!bio)
2682 break;
2683
2684 bio->bi_private = rbio;
2685 bio->bi_end_io = raid56_parity_scrub_end_io;
2686 bio->bi_opf = REQ_OP_READ;
2687
2688 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2689
2690 submit_bio(bio);
2691 }
2692
2693 return;
2694
2695cleanup:
2696 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2697
2698 while ((bio = bio_list_pop(&bio_list)))
2699 bio_put(bio);
2700
2701 return;
2702
2703finish:
2704 validate_rbio_for_parity_scrub(rbio);
2705}
2706
2707static void scrub_parity_work(struct btrfs_work *work)
2708{
2709 struct btrfs_raid_bio *rbio;
2710
2711 rbio = container_of(work, struct btrfs_raid_bio, work);
2712 raid56_parity_scrub_stripe(rbio);
2713}
2714
2715void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2716{
2717 if (!lock_stripe_add(rbio))
2718 start_async_work(rbio, scrub_parity_work);
2719}
2720
2721
2722
2723struct btrfs_raid_bio *
2724raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2725 struct btrfs_bio *bbio, u64 length)
2726{
2727 struct btrfs_raid_bio *rbio;
2728
2729 rbio = alloc_rbio(fs_info, bbio, length);
2730 if (IS_ERR(rbio))
2731 return NULL;
2732
2733 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2734 bio_list_add(&rbio->bio_list, bio);
2735
2736
2737
2738
2739 ASSERT(!bio->bi_iter.bi_size);
2740
2741 rbio->faila = find_logical_bio_stripe(rbio, bio);
2742 if (rbio->faila == -1) {
2743 BUG();
2744 kfree(rbio);
2745 return NULL;
2746 }
2747
2748
2749
2750
2751
2752 rbio->generic_bio_cnt = 1;
2753
2754 return rbio;
2755}
2756
2757void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2758{
2759 if (!lock_stripe_add(rbio))
2760 start_async_work(rbio, read_rebuild_work);
2761}
2762