1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/sched.h>
20#include <linux/wait.h>
21#include <linux/bio.h>
22#include <linux/slab.h>
23#include <linux/buffer_head.h>
24#include <linux/blkdev.h>
25#include <linux/random.h>
26#include <linux/iocontext.h>
27#include <linux/capability.h>
28#include <linux/ratelimit.h>
29#include <linux/kthread.h>
30#include <linux/raid/pq.h>
31#include <linux/hash.h>
32#include <linux/list_sort.h>
33#include <linux/raid/xor.h>
34#include <linux/mm.h>
35#include <asm/div64.h>
36#include "ctree.h"
37#include "extent_map.h"
38#include "disk-io.h"
39#include "transaction.h"
40#include "print-tree.h"
41#include "volumes.h"
42#include "raid56.h"
43#include "async-thread.h"
44#include "check-integrity.h"
45#include "rcu-string.h"
46
47
48#define RBIO_RMW_LOCKED_BIT 1
49
50
51
52
53
54#define RBIO_CACHE_BIT 2
55
56
57
58
59#define RBIO_CACHE_READY_BIT 3
60
61#define RBIO_CACHE_SIZE 1024
62
63enum btrfs_rbio_ops {
64 BTRFS_RBIO_WRITE,
65 BTRFS_RBIO_READ_REBUILD,
66 BTRFS_RBIO_PARITY_SCRUB,
67 BTRFS_RBIO_REBUILD_MISSING,
68};
69
70struct btrfs_raid_bio {
71 struct btrfs_fs_info *fs_info;
72 struct btrfs_bio *bbio;
73
74
75
76
77
78
79 struct list_head hash_list;
80
81
82
83
84 struct list_head stripe_cache;
85
86
87
88
89 struct btrfs_work work;
90
91
92
93
94
95
96 struct bio_list bio_list;
97 spinlock_t bio_list_lock;
98
99
100
101
102
103
104
105 struct list_head plug_list;
106
107
108
109
110
111 unsigned long flags;
112
113
114 int stripe_len;
115
116
117 int nr_data;
118
119 int real_stripes;
120
121 int stripe_npages;
122
123
124
125
126
127
128 enum btrfs_rbio_ops operation;
129
130
131 int faila;
132
133
134 int failb;
135
136 int scrubp;
137
138
139
140
141 int nr_pages;
142
143
144
145
146
147
148 int bio_list_bytes;
149
150 int generic_bio_cnt;
151
152 refcount_t refs;
153
154 atomic_t stripes_pending;
155
156 atomic_t error;
157
158
159
160
161
162
163
164
165
166 struct page **stripe_pages;
167
168
169
170
171
172 struct page **bio_pages;
173
174
175
176
177 unsigned long *dbitmap;
178};
179
180static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
181static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
182static void rmw_work(struct btrfs_work *work);
183static void read_rebuild_work(struct btrfs_work *work);
184static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
185static void async_read_rebuild(struct btrfs_raid_bio *rbio);
186static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
187static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
188static void __free_raid_bio(struct btrfs_raid_bio *rbio);
189static void index_rbio_pages(struct btrfs_raid_bio *rbio);
190static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
191
192static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
193 int need_check);
194static void async_scrub_parity(struct btrfs_raid_bio *rbio);
195
196
197
198
199
200int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
201{
202 struct btrfs_stripe_hash_table *table;
203 struct btrfs_stripe_hash_table *x;
204 struct btrfs_stripe_hash *cur;
205 struct btrfs_stripe_hash *h;
206 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
207 int i;
208 int table_size;
209
210 if (info->stripe_hash_table)
211 return 0;
212
213
214
215
216
217
218
219
220 table_size = sizeof(*table) + sizeof(*h) * num_entries;
221 table = kvzalloc(table_size, GFP_KERNEL);
222 if (!table)
223 return -ENOMEM;
224
225 spin_lock_init(&table->cache_lock);
226 INIT_LIST_HEAD(&table->stripe_cache);
227
228 h = table->table;
229
230 for (i = 0; i < num_entries; i++) {
231 cur = h + i;
232 INIT_LIST_HEAD(&cur->hash_list);
233 spin_lock_init(&cur->lock);
234 init_waitqueue_head(&cur->wait);
235 }
236
237 x = cmpxchg(&info->stripe_hash_table, NULL, table);
238 if (x)
239 kvfree(x);
240 return 0;
241}
242
243
244
245
246
247
248
249
250
251
252static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
253{
254 int i;
255 char *s;
256 char *d;
257 int ret;
258
259 ret = alloc_rbio_pages(rbio);
260 if (ret)
261 return;
262
263 for (i = 0; i < rbio->nr_pages; i++) {
264 if (!rbio->bio_pages[i])
265 continue;
266
267 s = kmap(rbio->bio_pages[i]);
268 d = kmap(rbio->stripe_pages[i]);
269
270 memcpy(d, s, PAGE_SIZE);
271
272 kunmap(rbio->bio_pages[i]);
273 kunmap(rbio->stripe_pages[i]);
274 SetPageUptodate(rbio->stripe_pages[i]);
275 }
276 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
277}
278
279
280
281
282static int rbio_bucket(struct btrfs_raid_bio *rbio)
283{
284 u64 num = rbio->bbio->raid_map[0];
285
286
287
288
289
290
291
292
293
294 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
295}
296
297
298
299
300
301static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
302{
303 int i;
304 struct page *s;
305 struct page *d;
306
307 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
308 return;
309
310 for (i = 0; i < dest->nr_pages; i++) {
311 s = src->stripe_pages[i];
312 if (!s || !PageUptodate(s)) {
313 continue;
314 }
315
316 d = dest->stripe_pages[i];
317 if (d)
318 __free_page(d);
319
320 dest->stripe_pages[i] = s;
321 src->stripe_pages[i] = NULL;
322 }
323}
324
325
326
327
328
329
330
331
332static void merge_rbio(struct btrfs_raid_bio *dest,
333 struct btrfs_raid_bio *victim)
334{
335 bio_list_merge(&dest->bio_list, &victim->bio_list);
336 dest->bio_list_bytes += victim->bio_list_bytes;
337 dest->generic_bio_cnt += victim->generic_bio_cnt;
338 bio_list_init(&victim->bio_list);
339}
340
341
342
343
344
345static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
346{
347 int bucket = rbio_bucket(rbio);
348 struct btrfs_stripe_hash_table *table;
349 struct btrfs_stripe_hash *h;
350 int freeit = 0;
351
352
353
354
355 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
356 return;
357
358 table = rbio->fs_info->stripe_hash_table;
359 h = table->table + bucket;
360
361
362
363
364 spin_lock(&h->lock);
365
366
367
368
369
370 spin_lock(&rbio->bio_list_lock);
371
372 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
373 list_del_init(&rbio->stripe_cache);
374 table->cache_size -= 1;
375 freeit = 1;
376
377
378
379
380
381
382
383
384
385
386 if (bio_list_empty(&rbio->bio_list)) {
387 if (!list_empty(&rbio->hash_list)) {
388 list_del_init(&rbio->hash_list);
389 refcount_dec(&rbio->refs);
390 BUG_ON(!list_empty(&rbio->plug_list));
391 }
392 }
393 }
394
395 spin_unlock(&rbio->bio_list_lock);
396 spin_unlock(&h->lock);
397
398 if (freeit)
399 __free_raid_bio(rbio);
400}
401
402
403
404
405static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
406{
407 struct btrfs_stripe_hash_table *table;
408 unsigned long flags;
409
410 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
411 return;
412
413 table = rbio->fs_info->stripe_hash_table;
414
415 spin_lock_irqsave(&table->cache_lock, flags);
416 __remove_rbio_from_cache(rbio);
417 spin_unlock_irqrestore(&table->cache_lock, flags);
418}
419
420
421
422
423static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
424{
425 struct btrfs_stripe_hash_table *table;
426 unsigned long flags;
427 struct btrfs_raid_bio *rbio;
428
429 table = info->stripe_hash_table;
430
431 spin_lock_irqsave(&table->cache_lock, flags);
432 while (!list_empty(&table->stripe_cache)) {
433 rbio = list_entry(table->stripe_cache.next,
434 struct btrfs_raid_bio,
435 stripe_cache);
436 __remove_rbio_from_cache(rbio);
437 }
438 spin_unlock_irqrestore(&table->cache_lock, flags);
439}
440
441
442
443
444
445void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
446{
447 if (!info->stripe_hash_table)
448 return;
449 btrfs_clear_rbio_cache(info);
450 kvfree(info->stripe_hash_table);
451 info->stripe_hash_table = NULL;
452}
453
454
455
456
457
458
459
460
461
462
463
464
465static void cache_rbio(struct btrfs_raid_bio *rbio)
466{
467 struct btrfs_stripe_hash_table *table;
468 unsigned long flags;
469
470 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
471 return;
472
473 table = rbio->fs_info->stripe_hash_table;
474
475 spin_lock_irqsave(&table->cache_lock, flags);
476 spin_lock(&rbio->bio_list_lock);
477
478
479 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
480 refcount_inc(&rbio->refs);
481
482 if (!list_empty(&rbio->stripe_cache)){
483 list_move(&rbio->stripe_cache, &table->stripe_cache);
484 } else {
485 list_add(&rbio->stripe_cache, &table->stripe_cache);
486 table->cache_size += 1;
487 }
488
489 spin_unlock(&rbio->bio_list_lock);
490
491 if (table->cache_size > RBIO_CACHE_SIZE) {
492 struct btrfs_raid_bio *found;
493
494 found = list_entry(table->stripe_cache.prev,
495 struct btrfs_raid_bio,
496 stripe_cache);
497
498 if (found != rbio)
499 __remove_rbio_from_cache(found);
500 }
501
502 spin_unlock_irqrestore(&table->cache_lock, flags);
503}
504
505
506
507
508
509
510static void run_xor(void **pages, int src_cnt, ssize_t len)
511{
512 int src_off = 0;
513 int xor_src_cnt = 0;
514 void *dest = pages[src_cnt];
515
516 while(src_cnt > 0) {
517 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
518 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
519
520 src_cnt -= xor_src_cnt;
521 src_off += xor_src_cnt;
522 }
523}
524
525
526
527
528
529
530
531
532static int __rbio_is_full(struct btrfs_raid_bio *rbio)
533{
534 unsigned long size = rbio->bio_list_bytes;
535 int ret = 1;
536
537 if (size != rbio->nr_data * rbio->stripe_len)
538 ret = 0;
539
540 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
541 return ret;
542}
543
544static int rbio_is_full(struct btrfs_raid_bio *rbio)
545{
546 unsigned long flags;
547 int ret;
548
549 spin_lock_irqsave(&rbio->bio_list_lock, flags);
550 ret = __rbio_is_full(rbio);
551 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
552 return ret;
553}
554
555
556
557
558
559
560
561
562
563
564
565static int rbio_can_merge(struct btrfs_raid_bio *last,
566 struct btrfs_raid_bio *cur)
567{
568 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
569 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
570 return 0;
571
572
573
574
575
576
577
578
579 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
580 test_bit(RBIO_CACHE_BIT, &cur->flags))
581 return 0;
582
583 if (last->bbio->raid_map[0] !=
584 cur->bbio->raid_map[0])
585 return 0;
586
587
588 if (last->operation != cur->operation)
589 return 0;
590
591
592
593
594
595
596
597
598 if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
599 cur->operation == BTRFS_RBIO_PARITY_SCRUB)
600 return 0;
601
602 if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
603 cur->operation == BTRFS_RBIO_REBUILD_MISSING)
604 return 0;
605
606 return 1;
607}
608
609static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
610 int index)
611{
612 return stripe * rbio->stripe_npages + index;
613}
614
615
616
617
618
619static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
620 int index)
621{
622 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
623}
624
625
626
627
628static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
629{
630 return rbio_stripe_page(rbio, rbio->nr_data, index);
631}
632
633
634
635
636
637static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
638{
639 if (rbio->nr_data + 1 == rbio->real_stripes)
640 return NULL;
641 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
642}
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
667{
668 int bucket = rbio_bucket(rbio);
669 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
670 struct btrfs_raid_bio *cur;
671 struct btrfs_raid_bio *pending;
672 unsigned long flags;
673 DEFINE_WAIT(wait);
674 struct btrfs_raid_bio *freeit = NULL;
675 struct btrfs_raid_bio *cache_drop = NULL;
676 int ret = 0;
677
678 spin_lock_irqsave(&h->lock, flags);
679 list_for_each_entry(cur, &h->hash_list, hash_list) {
680 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
681 spin_lock(&cur->bio_list_lock);
682
683
684 if (bio_list_empty(&cur->bio_list) &&
685 list_empty(&cur->plug_list) &&
686 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
687 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
688 list_del_init(&cur->hash_list);
689 refcount_dec(&cur->refs);
690
691 steal_rbio(cur, rbio);
692 cache_drop = cur;
693 spin_unlock(&cur->bio_list_lock);
694
695 goto lockit;
696 }
697
698
699 if (rbio_can_merge(cur, rbio)) {
700 merge_rbio(cur, rbio);
701 spin_unlock(&cur->bio_list_lock);
702 freeit = rbio;
703 ret = 1;
704 goto out;
705 }
706
707
708
709
710
711
712
713
714
715
716 list_for_each_entry(pending, &cur->plug_list,
717 plug_list) {
718 if (rbio_can_merge(pending, rbio)) {
719 merge_rbio(pending, rbio);
720 spin_unlock(&cur->bio_list_lock);
721 freeit = rbio;
722 ret = 1;
723 goto out;
724 }
725 }
726
727
728
729
730
731 list_add_tail(&rbio->plug_list, &cur->plug_list);
732 spin_unlock(&cur->bio_list_lock);
733 ret = 1;
734 goto out;
735 }
736 }
737lockit:
738 refcount_inc(&rbio->refs);
739 list_add(&rbio->hash_list, &h->hash_list);
740out:
741 spin_unlock_irqrestore(&h->lock, flags);
742 if (cache_drop)
743 remove_rbio_from_cache(cache_drop);
744 if (freeit)
745 __free_raid_bio(freeit);
746 return ret;
747}
748
749
750
751
752
753static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
754{
755 int bucket;
756 struct btrfs_stripe_hash *h;
757 unsigned long flags;
758 int keep_cache = 0;
759
760 bucket = rbio_bucket(rbio);
761 h = rbio->fs_info->stripe_hash_table->table + bucket;
762
763 if (list_empty(&rbio->plug_list))
764 cache_rbio(rbio);
765
766 spin_lock_irqsave(&h->lock, flags);
767 spin_lock(&rbio->bio_list_lock);
768
769 if (!list_empty(&rbio->hash_list)) {
770
771
772
773
774
775 if (list_empty(&rbio->plug_list) &&
776 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
777 keep_cache = 1;
778 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
779 BUG_ON(!bio_list_empty(&rbio->bio_list));
780 goto done;
781 }
782
783 list_del_init(&rbio->hash_list);
784 refcount_dec(&rbio->refs);
785
786
787
788
789
790
791 if (!list_empty(&rbio->plug_list)) {
792 struct btrfs_raid_bio *next;
793 struct list_head *head = rbio->plug_list.next;
794
795 next = list_entry(head, struct btrfs_raid_bio,
796 plug_list);
797
798 list_del_init(&rbio->plug_list);
799
800 list_add(&next->hash_list, &h->hash_list);
801 refcount_inc(&next->refs);
802 spin_unlock(&rbio->bio_list_lock);
803 spin_unlock_irqrestore(&h->lock, flags);
804
805 if (next->operation == BTRFS_RBIO_READ_REBUILD)
806 async_read_rebuild(next);
807 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
808 steal_rbio(rbio, next);
809 async_read_rebuild(next);
810 } else if (next->operation == BTRFS_RBIO_WRITE) {
811 steal_rbio(rbio, next);
812 async_rmw_stripe(next);
813 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
814 steal_rbio(rbio, next);
815 async_scrub_parity(next);
816 }
817
818 goto done_nolock;
819
820
821
822
823 } else if (waitqueue_active(&h->wait)) {
824 spin_unlock(&rbio->bio_list_lock);
825 spin_unlock_irqrestore(&h->lock, flags);
826 wake_up(&h->wait);
827 goto done_nolock;
828 }
829 }
830done:
831 spin_unlock(&rbio->bio_list_lock);
832 spin_unlock_irqrestore(&h->lock, flags);
833
834done_nolock:
835 if (!keep_cache)
836 remove_rbio_from_cache(rbio);
837}
838
839static void __free_raid_bio(struct btrfs_raid_bio *rbio)
840{
841 int i;
842
843 if (!refcount_dec_and_test(&rbio->refs))
844 return;
845
846 WARN_ON(!list_empty(&rbio->stripe_cache));
847 WARN_ON(!list_empty(&rbio->hash_list));
848 WARN_ON(!bio_list_empty(&rbio->bio_list));
849
850 for (i = 0; i < rbio->nr_pages; i++) {
851 if (rbio->stripe_pages[i]) {
852 __free_page(rbio->stripe_pages[i]);
853 rbio->stripe_pages[i] = NULL;
854 }
855 }
856
857 btrfs_put_bbio(rbio->bbio);
858 kfree(rbio);
859}
860
861static void free_raid_bio(struct btrfs_raid_bio *rbio)
862{
863 unlock_stripe(rbio);
864 __free_raid_bio(rbio);
865}
866
867
868
869
870
871static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
872{
873 struct bio *cur = bio_list_get(&rbio->bio_list);
874 struct bio *next;
875
876 if (rbio->generic_bio_cnt)
877 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
878
879 free_raid_bio(rbio);
880
881 while (cur) {
882 next = cur->bi_next;
883 cur->bi_next = NULL;
884 cur->bi_status = err;
885 bio_endio(cur);
886 cur = next;
887 }
888}
889
890
891
892
893
894static void raid_write_end_io(struct bio *bio)
895{
896 struct btrfs_raid_bio *rbio = bio->bi_private;
897 blk_status_t err = bio->bi_status;
898 int max_errors;
899
900 if (err)
901 fail_bio_stripe(rbio, bio);
902
903 bio_put(bio);
904
905 if (!atomic_dec_and_test(&rbio->stripes_pending))
906 return;
907
908 err = BLK_STS_OK;
909
910
911 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
912 0 : rbio->bbio->max_errors;
913 if (atomic_read(&rbio->error) > max_errors)
914 err = BLK_STS_IOERR;
915
916 rbio_orig_end_io(rbio, err);
917}
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
936 int index, int pagenr, int bio_list_only)
937{
938 int chunk_page;
939 struct page *p = NULL;
940
941 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
942
943 spin_lock_irq(&rbio->bio_list_lock);
944 p = rbio->bio_pages[chunk_page];
945 spin_unlock_irq(&rbio->bio_list_lock);
946
947 if (p || bio_list_only)
948 return p;
949
950 return rbio->stripe_pages[chunk_page];
951}
952
953
954
955
956
957static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
958{
959 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
960}
961
962
963
964
965
966static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
967 struct btrfs_bio *bbio,
968 u64 stripe_len)
969{
970 struct btrfs_raid_bio *rbio;
971 int nr_data = 0;
972 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
973 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
974 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
975 void *p;
976
977 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
978 DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
979 sizeof(long), GFP_NOFS);
980 if (!rbio)
981 return ERR_PTR(-ENOMEM);
982
983 bio_list_init(&rbio->bio_list);
984 INIT_LIST_HEAD(&rbio->plug_list);
985 spin_lock_init(&rbio->bio_list_lock);
986 INIT_LIST_HEAD(&rbio->stripe_cache);
987 INIT_LIST_HEAD(&rbio->hash_list);
988 rbio->bbio = bbio;
989 rbio->fs_info = fs_info;
990 rbio->stripe_len = stripe_len;
991 rbio->nr_pages = num_pages;
992 rbio->real_stripes = real_stripes;
993 rbio->stripe_npages = stripe_npages;
994 rbio->faila = -1;
995 rbio->failb = -1;
996 refcount_set(&rbio->refs, 1);
997 atomic_set(&rbio->error, 0);
998 atomic_set(&rbio->stripes_pending, 0);
999
1000
1001
1002
1003
1004 p = rbio + 1;
1005 rbio->stripe_pages = p;
1006 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1007 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
1008
1009 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1010 nr_data = real_stripes - 1;
1011 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1012 nr_data = real_stripes - 2;
1013 else
1014 BUG();
1015
1016 rbio->nr_data = nr_data;
1017 return rbio;
1018}
1019
1020
1021static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1022{
1023 int i;
1024 struct page *page;
1025
1026 for (i = 0; i < rbio->nr_pages; i++) {
1027 if (rbio->stripe_pages[i])
1028 continue;
1029 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1030 if (!page)
1031 return -ENOMEM;
1032 rbio->stripe_pages[i] = page;
1033 }
1034 return 0;
1035}
1036
1037
1038static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1039{
1040 int i;
1041 struct page *page;
1042
1043 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1044
1045 for (; i < rbio->nr_pages; i++) {
1046 if (rbio->stripe_pages[i])
1047 continue;
1048 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1049 if (!page)
1050 return -ENOMEM;
1051 rbio->stripe_pages[i] = page;
1052 }
1053 return 0;
1054}
1055
1056
1057
1058
1059
1060
1061static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1062 struct bio_list *bio_list,
1063 struct page *page,
1064 int stripe_nr,
1065 unsigned long page_index,
1066 unsigned long bio_max_len)
1067{
1068 struct bio *last = bio_list->tail;
1069 u64 last_end = 0;
1070 int ret;
1071 struct bio *bio;
1072 struct btrfs_bio_stripe *stripe;
1073 u64 disk_start;
1074
1075 stripe = &rbio->bbio->stripes[stripe_nr];
1076 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1077
1078
1079 if (!stripe->dev->bdev)
1080 return fail_rbio_index(rbio, stripe_nr);
1081
1082
1083 if (last) {
1084 last_end = (u64)last->bi_iter.bi_sector << 9;
1085 last_end += last->bi_iter.bi_size;
1086
1087
1088
1089
1090
1091 if (last_end == disk_start && stripe->dev->bdev &&
1092 !last->bi_status &&
1093 last->bi_disk == stripe->dev->bdev->bd_disk &&
1094 last->bi_partno == stripe->dev->bdev->bd_partno) {
1095 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1096 if (ret == PAGE_SIZE)
1097 return 0;
1098 }
1099 }
1100
1101
1102 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1103 bio->bi_iter.bi_size = 0;
1104 bio_set_dev(bio, stripe->dev->bdev);
1105 bio->bi_iter.bi_sector = disk_start >> 9;
1106
1107 bio_add_page(bio, page, PAGE_SIZE, 0);
1108 bio_list_add(bio_list, bio);
1109 return 0;
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1120{
1121 if (rbio->faila >= 0 || rbio->failb >= 0) {
1122 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1123 __raid56_parity_recover(rbio);
1124 } else {
1125 finish_rmw(rbio);
1126 }
1127}
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1138{
1139 struct bio *bio;
1140 u64 start;
1141 unsigned long stripe_offset;
1142 unsigned long page_index;
1143
1144 spin_lock_irq(&rbio->bio_list_lock);
1145 bio_list_for_each(bio, &rbio->bio_list) {
1146 struct bio_vec bvec;
1147 struct bvec_iter iter;
1148 int i = 0;
1149
1150 start = (u64)bio->bi_iter.bi_sector << 9;
1151 stripe_offset = start - rbio->bbio->raid_map[0];
1152 page_index = stripe_offset >> PAGE_SHIFT;
1153
1154 if (bio_flagged(bio, BIO_CLONED))
1155 bio->bi_iter = btrfs_io_bio(bio)->iter;
1156
1157 bio_for_each_segment(bvec, bio, iter) {
1158 rbio->bio_pages[page_index + i] = bvec.bv_page;
1159 i++;
1160 }
1161 }
1162 spin_unlock_irq(&rbio->bio_list_lock);
1163}
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1174{
1175 struct btrfs_bio *bbio = rbio->bbio;
1176 void *pointers[rbio->real_stripes];
1177 int nr_data = rbio->nr_data;
1178 int stripe;
1179 int pagenr;
1180 int p_stripe = -1;
1181 int q_stripe = -1;
1182 struct bio_list bio_list;
1183 struct bio *bio;
1184 int ret;
1185
1186 bio_list_init(&bio_list);
1187
1188 if (rbio->real_stripes - rbio->nr_data == 1) {
1189 p_stripe = rbio->real_stripes - 1;
1190 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1191 p_stripe = rbio->real_stripes - 2;
1192 q_stripe = rbio->real_stripes - 1;
1193 } else {
1194 BUG();
1195 }
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 spin_lock_irq(&rbio->bio_list_lock);
1206 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1207 spin_unlock_irq(&rbio->bio_list_lock);
1208
1209 atomic_set(&rbio->error, 0);
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220 index_rbio_pages(rbio);
1221 if (!rbio_is_full(rbio))
1222 cache_rbio_pages(rbio);
1223 else
1224 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1225
1226 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1227 struct page *p;
1228
1229 for (stripe = 0; stripe < nr_data; stripe++) {
1230 p = page_in_rbio(rbio, stripe, pagenr, 0);
1231 pointers[stripe] = kmap(p);
1232 }
1233
1234
1235 p = rbio_pstripe_page(rbio, pagenr);
1236 SetPageUptodate(p);
1237 pointers[stripe++] = kmap(p);
1238
1239 if (q_stripe != -1) {
1240
1241
1242
1243
1244
1245 p = rbio_qstripe_page(rbio, pagenr);
1246 SetPageUptodate(p);
1247 pointers[stripe++] = kmap(p);
1248
1249 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1250 pointers);
1251 } else {
1252
1253 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1254 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1255 }
1256
1257
1258 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1259 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1260 }
1261
1262
1263
1264
1265
1266
1267 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1268 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1269 struct page *page;
1270 if (stripe < rbio->nr_data) {
1271 page = page_in_rbio(rbio, stripe, pagenr, 1);
1272 if (!page)
1273 continue;
1274 } else {
1275 page = rbio_stripe_page(rbio, stripe, pagenr);
1276 }
1277
1278 ret = rbio_add_io_page(rbio, &bio_list,
1279 page, stripe, pagenr, rbio->stripe_len);
1280 if (ret)
1281 goto cleanup;
1282 }
1283 }
1284
1285 if (likely(!bbio->num_tgtdevs))
1286 goto write_data;
1287
1288 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1289 if (!bbio->tgtdev_map[stripe])
1290 continue;
1291
1292 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1293 struct page *page;
1294 if (stripe < rbio->nr_data) {
1295 page = page_in_rbio(rbio, stripe, pagenr, 1);
1296 if (!page)
1297 continue;
1298 } else {
1299 page = rbio_stripe_page(rbio, stripe, pagenr);
1300 }
1301
1302 ret = rbio_add_io_page(rbio, &bio_list, page,
1303 rbio->bbio->tgtdev_map[stripe],
1304 pagenr, rbio->stripe_len);
1305 if (ret)
1306 goto cleanup;
1307 }
1308 }
1309
1310write_data:
1311 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1312 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1313
1314 while (1) {
1315 bio = bio_list_pop(&bio_list);
1316 if (!bio)
1317 break;
1318
1319 bio->bi_private = rbio;
1320 bio->bi_end_io = raid_write_end_io;
1321 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1322
1323 submit_bio(bio);
1324 }
1325 return;
1326
1327cleanup:
1328 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1329}
1330
1331
1332
1333
1334
1335
1336static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1337 struct bio *bio)
1338{
1339 u64 physical = bio->bi_iter.bi_sector;
1340 u64 stripe_start;
1341 int i;
1342 struct btrfs_bio_stripe *stripe;
1343
1344 physical <<= 9;
1345
1346 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1347 stripe = &rbio->bbio->stripes[i];
1348 stripe_start = stripe->physical;
1349 if (physical >= stripe_start &&
1350 physical < stripe_start + rbio->stripe_len &&
1351 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1352 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1353 return i;
1354 }
1355 }
1356 return -1;
1357}
1358
1359
1360
1361
1362
1363
1364static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1365 struct bio *bio)
1366{
1367 u64 logical = bio->bi_iter.bi_sector;
1368 u64 stripe_start;
1369 int i;
1370
1371 logical <<= 9;
1372
1373 for (i = 0; i < rbio->nr_data; i++) {
1374 stripe_start = rbio->bbio->raid_map[i];
1375 if (logical >= stripe_start &&
1376 logical < stripe_start + rbio->stripe_len) {
1377 return i;
1378 }
1379 }
1380 return -1;
1381}
1382
1383
1384
1385
1386static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1387{
1388 unsigned long flags;
1389 int ret = 0;
1390
1391 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1392
1393
1394 if (rbio->faila == failed || rbio->failb == failed)
1395 goto out;
1396
1397 if (rbio->faila == -1) {
1398
1399 rbio->faila = failed;
1400 atomic_inc(&rbio->error);
1401 } else if (rbio->failb == -1) {
1402
1403 rbio->failb = failed;
1404 atomic_inc(&rbio->error);
1405 } else {
1406 ret = -EIO;
1407 }
1408out:
1409 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1410
1411 return ret;
1412}
1413
1414
1415
1416
1417
1418static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1419 struct bio *bio)
1420{
1421 int failed = find_bio_stripe(rbio, bio);
1422
1423 if (failed < 0)
1424 return -EIO;
1425
1426 return fail_rbio_index(rbio, failed);
1427}
1428
1429
1430
1431
1432
1433static void set_bio_pages_uptodate(struct bio *bio)
1434{
1435 struct bio_vec bvec;
1436 struct bvec_iter iter;
1437
1438 if (bio_flagged(bio, BIO_CLONED))
1439 bio->bi_iter = btrfs_io_bio(bio)->iter;
1440
1441 bio_for_each_segment(bvec, bio, iter)
1442 SetPageUptodate(bvec.bv_page);
1443}
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453static void raid_rmw_end_io(struct bio *bio)
1454{
1455 struct btrfs_raid_bio *rbio = bio->bi_private;
1456
1457 if (bio->bi_status)
1458 fail_bio_stripe(rbio, bio);
1459 else
1460 set_bio_pages_uptodate(bio);
1461
1462 bio_put(bio);
1463
1464 if (!atomic_dec_and_test(&rbio->stripes_pending))
1465 return;
1466
1467 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1468 goto cleanup;
1469
1470
1471
1472
1473
1474
1475 validate_rbio_for_rmw(rbio);
1476 return;
1477
1478cleanup:
1479
1480 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1481}
1482
1483static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1484{
1485 btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
1486 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1487}
1488
1489static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1490{
1491 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1492 read_rebuild_work, NULL, NULL);
1493
1494 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1495}
1496
1497
1498
1499
1500
1501static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1502{
1503 int bios_to_read = 0;
1504 struct bio_list bio_list;
1505 int ret;
1506 int pagenr;
1507 int stripe;
1508 struct bio *bio;
1509
1510 bio_list_init(&bio_list);
1511
1512 ret = alloc_rbio_pages(rbio);
1513 if (ret)
1514 goto cleanup;
1515
1516 index_rbio_pages(rbio);
1517
1518 atomic_set(&rbio->error, 0);
1519
1520
1521
1522
1523 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1524 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1525 struct page *page;
1526
1527
1528
1529
1530
1531
1532 page = page_in_rbio(rbio, stripe, pagenr, 1);
1533 if (page)
1534 continue;
1535
1536 page = rbio_stripe_page(rbio, stripe, pagenr);
1537
1538
1539
1540
1541 if (PageUptodate(page))
1542 continue;
1543
1544 ret = rbio_add_io_page(rbio, &bio_list, page,
1545 stripe, pagenr, rbio->stripe_len);
1546 if (ret)
1547 goto cleanup;
1548 }
1549 }
1550
1551 bios_to_read = bio_list_size(&bio_list);
1552 if (!bios_to_read) {
1553
1554
1555
1556
1557
1558
1559 goto finish;
1560 }
1561
1562
1563
1564
1565
1566 atomic_set(&rbio->stripes_pending, bios_to_read);
1567 while (1) {
1568 bio = bio_list_pop(&bio_list);
1569 if (!bio)
1570 break;
1571
1572 bio->bi_private = rbio;
1573 bio->bi_end_io = raid_rmw_end_io;
1574 bio_set_op_attrs(bio, REQ_OP_READ, 0);
1575
1576 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1577
1578 submit_bio(bio);
1579 }
1580
1581 return 0;
1582
1583cleanup:
1584 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1585 return -EIO;
1586
1587finish:
1588 validate_rbio_for_rmw(rbio);
1589 return 0;
1590}
1591
1592
1593
1594
1595
1596static int full_stripe_write(struct btrfs_raid_bio *rbio)
1597{
1598 int ret;
1599
1600 ret = alloc_rbio_parity_pages(rbio);
1601 if (ret) {
1602 __free_raid_bio(rbio);
1603 return ret;
1604 }
1605
1606 ret = lock_stripe_add(rbio);
1607 if (ret == 0)
1608 finish_rmw(rbio);
1609 return 0;
1610}
1611
1612
1613
1614
1615
1616
1617static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1618{
1619 int ret;
1620
1621 ret = lock_stripe_add(rbio);
1622 if (ret == 0)
1623 async_rmw_stripe(rbio);
1624 return 0;
1625}
1626
1627
1628
1629
1630
1631
1632
1633static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1634{
1635
1636 if (!rbio_is_full(rbio))
1637 return partial_stripe_write(rbio);
1638 return full_stripe_write(rbio);
1639}
1640
1641
1642
1643
1644
1645
1646
1647
1648struct btrfs_plug_cb {
1649 struct blk_plug_cb cb;
1650 struct btrfs_fs_info *info;
1651 struct list_head rbio_list;
1652 struct btrfs_work work;
1653};
1654
1655
1656
1657
1658static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1659{
1660 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1661 plug_list);
1662 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1663 plug_list);
1664 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1665 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1666
1667 if (a_sector < b_sector)
1668 return -1;
1669 if (a_sector > b_sector)
1670 return 1;
1671 return 0;
1672}
1673
1674static void run_plug(struct btrfs_plug_cb *plug)
1675{
1676 struct btrfs_raid_bio *cur;
1677 struct btrfs_raid_bio *last = NULL;
1678
1679
1680
1681
1682
1683
1684 list_sort(NULL, &plug->rbio_list, plug_cmp);
1685 while (!list_empty(&plug->rbio_list)) {
1686 cur = list_entry(plug->rbio_list.next,
1687 struct btrfs_raid_bio, plug_list);
1688 list_del_init(&cur->plug_list);
1689
1690 if (rbio_is_full(cur)) {
1691
1692 full_stripe_write(cur);
1693 continue;
1694 }
1695 if (last) {
1696 if (rbio_can_merge(last, cur)) {
1697 merge_rbio(last, cur);
1698 __free_raid_bio(cur);
1699 continue;
1700
1701 }
1702 __raid56_parity_write(last);
1703 }
1704 last = cur;
1705 }
1706 if (last) {
1707 __raid56_parity_write(last);
1708 }
1709 kfree(plug);
1710}
1711
1712
1713
1714
1715
1716static void unplug_work(struct btrfs_work *work)
1717{
1718 struct btrfs_plug_cb *plug;
1719 plug = container_of(work, struct btrfs_plug_cb, work);
1720 run_plug(plug);
1721}
1722
1723static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1724{
1725 struct btrfs_plug_cb *plug;
1726 plug = container_of(cb, struct btrfs_plug_cb, cb);
1727
1728 if (from_schedule) {
1729 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1730 unplug_work, NULL, NULL);
1731 btrfs_queue_work(plug->info->rmw_workers,
1732 &plug->work);
1733 return;
1734 }
1735 run_plug(plug);
1736}
1737
1738
1739
1740
1741int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1742 struct btrfs_bio *bbio, u64 stripe_len)
1743{
1744 struct btrfs_raid_bio *rbio;
1745 struct btrfs_plug_cb *plug = NULL;
1746 struct blk_plug_cb *cb;
1747 int ret;
1748
1749 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1750 if (IS_ERR(rbio)) {
1751 btrfs_put_bbio(bbio);
1752 return PTR_ERR(rbio);
1753 }
1754 bio_list_add(&rbio->bio_list, bio);
1755 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1756 rbio->operation = BTRFS_RBIO_WRITE;
1757
1758 btrfs_bio_counter_inc_noblocked(fs_info);
1759 rbio->generic_bio_cnt = 1;
1760
1761
1762
1763
1764
1765 if (rbio_is_full(rbio)) {
1766 ret = full_stripe_write(rbio);
1767 if (ret)
1768 btrfs_bio_counter_dec(fs_info);
1769 return ret;
1770 }
1771
1772 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1773 if (cb) {
1774 plug = container_of(cb, struct btrfs_plug_cb, cb);
1775 if (!plug->info) {
1776 plug->info = fs_info;
1777 INIT_LIST_HEAD(&plug->rbio_list);
1778 }
1779 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1780 ret = 0;
1781 } else {
1782 ret = __raid56_parity_write(rbio);
1783 if (ret)
1784 btrfs_bio_counter_dec(fs_info);
1785 }
1786 return ret;
1787}
1788
1789
1790
1791
1792
1793
1794static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1795{
1796 int pagenr, stripe;
1797 void **pointers;
1798 int faila = -1, failb = -1;
1799 struct page *page;
1800 blk_status_t err;
1801 int i;
1802
1803 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1804 if (!pointers) {
1805 err = BLK_STS_RESOURCE;
1806 goto cleanup_io;
1807 }
1808
1809 faila = rbio->faila;
1810 failb = rbio->failb;
1811
1812 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1813 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1814 spin_lock_irq(&rbio->bio_list_lock);
1815 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1816 spin_unlock_irq(&rbio->bio_list_lock);
1817 }
1818
1819 index_rbio_pages(rbio);
1820
1821 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1822
1823
1824
1825
1826 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1827 !test_bit(pagenr, rbio->dbitmap))
1828 continue;
1829
1830
1831
1832
1833 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1834
1835
1836
1837
1838 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1839 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1840 (stripe == faila || stripe == failb)) {
1841 page = page_in_rbio(rbio, stripe, pagenr, 0);
1842 } else {
1843 page = rbio_stripe_page(rbio, stripe, pagenr);
1844 }
1845 pointers[stripe] = kmap(page);
1846 }
1847
1848
1849 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1850
1851
1852
1853
1854 if (failb < 0) {
1855 if (faila == rbio->nr_data) {
1856
1857
1858
1859
1860
1861 err = BLK_STS_IOERR;
1862 goto cleanup;
1863 }
1864
1865
1866
1867
1868 goto pstripe;
1869 }
1870
1871
1872 if (faila > failb) {
1873 int tmp = failb;
1874 failb = faila;
1875 faila = tmp;
1876 }
1877
1878
1879
1880
1881
1882
1883
1884 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1885 if (rbio->bbio->raid_map[faila] ==
1886 RAID5_P_STRIPE) {
1887 err = BLK_STS_IOERR;
1888 goto cleanup;
1889 }
1890
1891
1892
1893
1894 goto pstripe;
1895 }
1896
1897 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1898 raid6_datap_recov(rbio->real_stripes,
1899 PAGE_SIZE, faila, pointers);
1900 } else {
1901 raid6_2data_recov(rbio->real_stripes,
1902 PAGE_SIZE, faila, failb,
1903 pointers);
1904 }
1905 } else {
1906 void *p;
1907
1908
1909 BUG_ON(failb != -1);
1910pstripe:
1911
1912 memcpy(pointers[faila],
1913 pointers[rbio->nr_data],
1914 PAGE_SIZE);
1915
1916
1917 p = pointers[faila];
1918 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1919 pointers[stripe] = pointers[stripe + 1];
1920 pointers[rbio->nr_data - 1] = p;
1921
1922
1923 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1924 }
1925
1926
1927
1928
1929
1930
1931 if (rbio->operation == BTRFS_RBIO_WRITE) {
1932 for (i = 0; i < rbio->stripe_npages; i++) {
1933 if (faila != -1) {
1934 page = rbio_stripe_page(rbio, faila, i);
1935 SetPageUptodate(page);
1936 }
1937 if (failb != -1) {
1938 page = rbio_stripe_page(rbio, failb, i);
1939 SetPageUptodate(page);
1940 }
1941 }
1942 }
1943 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1944
1945
1946
1947
1948 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1949 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1950 (stripe == faila || stripe == failb)) {
1951 page = page_in_rbio(rbio, stripe, pagenr, 0);
1952 } else {
1953 page = rbio_stripe_page(rbio, stripe, pagenr);
1954 }
1955 kunmap(page);
1956 }
1957 }
1958
1959 err = BLK_STS_OK;
1960cleanup:
1961 kfree(pointers);
1962
1963cleanup_io:
1964 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1965 if (err == BLK_STS_OK)
1966 cache_rbio_pages(rbio);
1967 else
1968 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1969
1970 rbio_orig_end_io(rbio, err);
1971 } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1972 rbio_orig_end_io(rbio, err);
1973 } else if (err == BLK_STS_OK) {
1974 rbio->faila = -1;
1975 rbio->failb = -1;
1976
1977 if (rbio->operation == BTRFS_RBIO_WRITE)
1978 finish_rmw(rbio);
1979 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1980 finish_parity_scrub(rbio, 0);
1981 else
1982 BUG();
1983 } else {
1984 rbio_orig_end_io(rbio, err);
1985 }
1986}
1987
1988
1989
1990
1991
1992static void raid_recover_end_io(struct bio *bio)
1993{
1994 struct btrfs_raid_bio *rbio = bio->bi_private;
1995
1996
1997
1998
1999
2000 if (bio->bi_status)
2001 fail_bio_stripe(rbio, bio);
2002 else
2003 set_bio_pages_uptodate(bio);
2004 bio_put(bio);
2005
2006 if (!atomic_dec_and_test(&rbio->stripes_pending))
2007 return;
2008
2009 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2010 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2011 else
2012 __raid_recover_end_io(rbio);
2013}
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2024{
2025 int bios_to_read = 0;
2026 struct bio_list bio_list;
2027 int ret;
2028 int pagenr;
2029 int stripe;
2030 struct bio *bio;
2031
2032 bio_list_init(&bio_list);
2033
2034 ret = alloc_rbio_pages(rbio);
2035 if (ret)
2036 goto cleanup;
2037
2038 atomic_set(&rbio->error, 0);
2039
2040
2041
2042
2043
2044
2045 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2046 if (rbio->faila == stripe || rbio->failb == stripe) {
2047 atomic_inc(&rbio->error);
2048 continue;
2049 }
2050
2051 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2052 struct page *p;
2053
2054
2055
2056
2057
2058 p = rbio_stripe_page(rbio, stripe, pagenr);
2059 if (PageUptodate(p))
2060 continue;
2061
2062 ret = rbio_add_io_page(rbio, &bio_list,
2063 rbio_stripe_page(rbio, stripe, pagenr),
2064 stripe, pagenr, rbio->stripe_len);
2065 if (ret < 0)
2066 goto cleanup;
2067 }
2068 }
2069
2070 bios_to_read = bio_list_size(&bio_list);
2071 if (!bios_to_read) {
2072
2073
2074
2075
2076
2077 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2078 __raid_recover_end_io(rbio);
2079 goto out;
2080 } else {
2081 goto cleanup;
2082 }
2083 }
2084
2085
2086
2087
2088
2089 atomic_set(&rbio->stripes_pending, bios_to_read);
2090 while (1) {
2091 bio = bio_list_pop(&bio_list);
2092 if (!bio)
2093 break;
2094
2095 bio->bi_private = rbio;
2096 bio->bi_end_io = raid_recover_end_io;
2097 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2098
2099 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2100
2101 submit_bio(bio);
2102 }
2103out:
2104 return 0;
2105
2106cleanup:
2107 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2108 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2109 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2110 return -EIO;
2111}
2112
2113
2114
2115
2116
2117
2118
2119int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2120 struct btrfs_bio *bbio, u64 stripe_len,
2121 int mirror_num, int generic_io)
2122{
2123 struct btrfs_raid_bio *rbio;
2124 int ret;
2125
2126 if (generic_io) {
2127 ASSERT(bbio->mirror_num == mirror_num);
2128 btrfs_io_bio(bio)->mirror_num = mirror_num;
2129 }
2130
2131 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2132 if (IS_ERR(rbio)) {
2133 if (generic_io)
2134 btrfs_put_bbio(bbio);
2135 return PTR_ERR(rbio);
2136 }
2137
2138 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2139 bio_list_add(&rbio->bio_list, bio);
2140 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2141
2142 rbio->faila = find_logical_bio_stripe(rbio, bio);
2143 if (rbio->faila == -1) {
2144 btrfs_warn(fs_info,
2145 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2146 __func__, (u64)bio->bi_iter.bi_sector << 9,
2147 (u64)bio->bi_iter.bi_size, bbio->map_type);
2148 if (generic_io)
2149 btrfs_put_bbio(bbio);
2150 kfree(rbio);
2151 return -EIO;
2152 }
2153
2154 if (generic_io) {
2155 btrfs_bio_counter_inc_noblocked(fs_info);
2156 rbio->generic_bio_cnt = 1;
2157 } else {
2158 btrfs_get_bbio(bbio);
2159 }
2160
2161
2162
2163
2164
2165 if (mirror_num == 3)
2166 rbio->failb = rbio->real_stripes - 2;
2167
2168 ret = lock_stripe_add(rbio);
2169
2170
2171
2172
2173
2174
2175
2176
2177 if (ret == 0)
2178 __raid56_parity_recover(rbio);
2179
2180
2181
2182
2183
2184 return 0;
2185
2186}
2187
2188static void rmw_work(struct btrfs_work *work)
2189{
2190 struct btrfs_raid_bio *rbio;
2191
2192 rbio = container_of(work, struct btrfs_raid_bio, work);
2193 raid56_rmw_stripe(rbio);
2194}
2195
2196static void read_rebuild_work(struct btrfs_work *work)
2197{
2198 struct btrfs_raid_bio *rbio;
2199
2200 rbio = container_of(work, struct btrfs_raid_bio, work);
2201 __raid56_parity_recover(rbio);
2202}
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214struct btrfs_raid_bio *
2215raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2216 struct btrfs_bio *bbio, u64 stripe_len,
2217 struct btrfs_device *scrub_dev,
2218 unsigned long *dbitmap, int stripe_nsectors)
2219{
2220 struct btrfs_raid_bio *rbio;
2221 int i;
2222
2223 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2224 if (IS_ERR(rbio))
2225 return NULL;
2226 bio_list_add(&rbio->bio_list, bio);
2227
2228
2229
2230
2231 ASSERT(!bio->bi_iter.bi_size);
2232 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2233
2234 for (i = 0; i < rbio->real_stripes; i++) {
2235 if (bbio->stripes[i].dev == scrub_dev) {
2236 rbio->scrubp = i;
2237 break;
2238 }
2239 }
2240
2241
2242 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2243 ASSERT(rbio->stripe_npages == stripe_nsectors);
2244 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2245
2246
2247
2248
2249
2250 rbio->generic_bio_cnt = 1;
2251
2252 return rbio;
2253}
2254
2255
2256void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2257 u64 logical)
2258{
2259 int stripe_offset;
2260 int index;
2261
2262 ASSERT(logical >= rbio->bbio->raid_map[0]);
2263 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2264 rbio->stripe_len * rbio->nr_data);
2265 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2266 index = stripe_offset >> PAGE_SHIFT;
2267 rbio->bio_pages[index] = page;
2268}
2269
2270
2271
2272
2273
2274static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2275{
2276 int i;
2277 int bit;
2278 int index;
2279 struct page *page;
2280
2281 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2282 for (i = 0; i < rbio->real_stripes; i++) {
2283 index = i * rbio->stripe_npages + bit;
2284 if (rbio->stripe_pages[index])
2285 continue;
2286
2287 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2288 if (!page)
2289 return -ENOMEM;
2290 rbio->stripe_pages[index] = page;
2291 }
2292 }
2293 return 0;
2294}
2295
2296static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2297 int need_check)
2298{
2299 struct btrfs_bio *bbio = rbio->bbio;
2300 void *pointers[rbio->real_stripes];
2301 DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2302 int nr_data = rbio->nr_data;
2303 int stripe;
2304 int pagenr;
2305 int p_stripe = -1;
2306 int q_stripe = -1;
2307 struct page *p_page = NULL;
2308 struct page *q_page = NULL;
2309 struct bio_list bio_list;
2310 struct bio *bio;
2311 int is_replace = 0;
2312 int ret;
2313
2314 bio_list_init(&bio_list);
2315
2316 if (rbio->real_stripes - rbio->nr_data == 1) {
2317 p_stripe = rbio->real_stripes - 1;
2318 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2319 p_stripe = rbio->real_stripes - 2;
2320 q_stripe = rbio->real_stripes - 1;
2321 } else {
2322 BUG();
2323 }
2324
2325 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2326 is_replace = 1;
2327 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2328 }
2329
2330
2331
2332
2333
2334
2335 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2336
2337 if (!need_check)
2338 goto writeback;
2339
2340 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2341 if (!p_page)
2342 goto cleanup;
2343 SetPageUptodate(p_page);
2344
2345 if (q_stripe != -1) {
2346 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2347 if (!q_page) {
2348 __free_page(p_page);
2349 goto cleanup;
2350 }
2351 SetPageUptodate(q_page);
2352 }
2353
2354 atomic_set(&rbio->error, 0);
2355
2356 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2357 struct page *p;
2358 void *parity;
2359
2360 for (stripe = 0; stripe < nr_data; stripe++) {
2361 p = page_in_rbio(rbio, stripe, pagenr, 0);
2362 pointers[stripe] = kmap(p);
2363 }
2364
2365
2366 pointers[stripe++] = kmap(p_page);
2367
2368 if (q_stripe != -1) {
2369
2370
2371
2372
2373
2374 pointers[stripe++] = kmap(q_page);
2375
2376 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2377 pointers);
2378 } else {
2379
2380 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2381 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2382 }
2383
2384
2385 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2386 parity = kmap(p);
2387 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2388 memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2389 else
2390
2391 bitmap_clear(rbio->dbitmap, pagenr, 1);
2392 kunmap(p);
2393
2394 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2395 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2396 }
2397
2398 __free_page(p_page);
2399 if (q_page)
2400 __free_page(q_page);
2401
2402writeback:
2403
2404
2405
2406
2407
2408 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2409 struct page *page;
2410
2411 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2412 ret = rbio_add_io_page(rbio, &bio_list,
2413 page, rbio->scrubp, pagenr, rbio->stripe_len);
2414 if (ret)
2415 goto cleanup;
2416 }
2417
2418 if (!is_replace)
2419 goto submit_write;
2420
2421 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2422 struct page *page;
2423
2424 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2425 ret = rbio_add_io_page(rbio, &bio_list, page,
2426 bbio->tgtdev_map[rbio->scrubp],
2427 pagenr, rbio->stripe_len);
2428 if (ret)
2429 goto cleanup;
2430 }
2431
2432submit_write:
2433 nr_data = bio_list_size(&bio_list);
2434 if (!nr_data) {
2435
2436 rbio_orig_end_io(rbio, BLK_STS_OK);
2437 return;
2438 }
2439
2440 atomic_set(&rbio->stripes_pending, nr_data);
2441
2442 while (1) {
2443 bio = bio_list_pop(&bio_list);
2444 if (!bio)
2445 break;
2446
2447 bio->bi_private = rbio;
2448 bio->bi_end_io = raid_write_end_io;
2449 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2450
2451 submit_bio(bio);
2452 }
2453 return;
2454
2455cleanup:
2456 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2457}
2458
2459static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2460{
2461 if (stripe >= 0 && stripe < rbio->nr_data)
2462 return 1;
2463 return 0;
2464}
2465
2466
2467
2468
2469
2470
2471
2472
2473static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2474{
2475 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2476 goto cleanup;
2477
2478 if (rbio->faila >= 0 || rbio->failb >= 0) {
2479 int dfail = 0, failp = -1;
2480
2481 if (is_data_stripe(rbio, rbio->faila))
2482 dfail++;
2483 else if (is_parity_stripe(rbio->faila))
2484 failp = rbio->faila;
2485
2486 if (is_data_stripe(rbio, rbio->failb))
2487 dfail++;
2488 else if (is_parity_stripe(rbio->failb))
2489 failp = rbio->failb;
2490
2491
2492
2493
2494
2495
2496 if (dfail > rbio->bbio->max_errors - 1)
2497 goto cleanup;
2498
2499
2500
2501
2502
2503 if (dfail == 0) {
2504 finish_parity_scrub(rbio, 0);
2505 return;
2506 }
2507
2508
2509
2510
2511
2512
2513
2514 if (failp != rbio->scrubp)
2515 goto cleanup;
2516
2517 __raid_recover_end_io(rbio);
2518 } else {
2519 finish_parity_scrub(rbio, 1);
2520 }
2521 return;
2522
2523cleanup:
2524 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2525}
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535static void raid56_parity_scrub_end_io(struct bio *bio)
2536{
2537 struct btrfs_raid_bio *rbio = bio->bi_private;
2538
2539 if (bio->bi_status)
2540 fail_bio_stripe(rbio, bio);
2541 else
2542 set_bio_pages_uptodate(bio);
2543
2544 bio_put(bio);
2545
2546 if (!atomic_dec_and_test(&rbio->stripes_pending))
2547 return;
2548
2549
2550
2551
2552
2553
2554 validate_rbio_for_parity_scrub(rbio);
2555}
2556
2557static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2558{
2559 int bios_to_read = 0;
2560 struct bio_list bio_list;
2561 int ret;
2562 int pagenr;
2563 int stripe;
2564 struct bio *bio;
2565
2566 ret = alloc_rbio_essential_pages(rbio);
2567 if (ret)
2568 goto cleanup;
2569
2570 bio_list_init(&bio_list);
2571
2572 atomic_set(&rbio->error, 0);
2573
2574
2575
2576
2577 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2578 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2579 struct page *page;
2580
2581
2582
2583
2584
2585
2586 page = page_in_rbio(rbio, stripe, pagenr, 1);
2587 if (page)
2588 continue;
2589
2590 page = rbio_stripe_page(rbio, stripe, pagenr);
2591
2592
2593
2594
2595 if (PageUptodate(page))
2596 continue;
2597
2598 ret = rbio_add_io_page(rbio, &bio_list, page,
2599 stripe, pagenr, rbio->stripe_len);
2600 if (ret)
2601 goto cleanup;
2602 }
2603 }
2604
2605 bios_to_read = bio_list_size(&bio_list);
2606 if (!bios_to_read) {
2607
2608
2609
2610
2611
2612
2613 goto finish;
2614 }
2615
2616
2617
2618
2619
2620 atomic_set(&rbio->stripes_pending, bios_to_read);
2621 while (1) {
2622 bio = bio_list_pop(&bio_list);
2623 if (!bio)
2624 break;
2625
2626 bio->bi_private = rbio;
2627 bio->bi_end_io = raid56_parity_scrub_end_io;
2628 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2629
2630 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2631
2632 submit_bio(bio);
2633 }
2634
2635 return;
2636
2637cleanup:
2638 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2639 return;
2640
2641finish:
2642 validate_rbio_for_parity_scrub(rbio);
2643}
2644
2645static void scrub_parity_work(struct btrfs_work *work)
2646{
2647 struct btrfs_raid_bio *rbio;
2648
2649 rbio = container_of(work, struct btrfs_raid_bio, work);
2650 raid56_parity_scrub_stripe(rbio);
2651}
2652
2653static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2654{
2655 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2656 scrub_parity_work, NULL, NULL);
2657
2658 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2659}
2660
2661void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2662{
2663 if (!lock_stripe_add(rbio))
2664 async_scrub_parity(rbio);
2665}
2666
2667
2668
2669struct btrfs_raid_bio *
2670raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2671 struct btrfs_bio *bbio, u64 length)
2672{
2673 struct btrfs_raid_bio *rbio;
2674
2675 rbio = alloc_rbio(fs_info, bbio, length);
2676 if (IS_ERR(rbio))
2677 return NULL;
2678
2679 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2680 bio_list_add(&rbio->bio_list, bio);
2681
2682
2683
2684
2685 ASSERT(!bio->bi_iter.bi_size);
2686
2687 rbio->faila = find_logical_bio_stripe(rbio, bio);
2688 if (rbio->faila == -1) {
2689 BUG();
2690 kfree(rbio);
2691 return NULL;
2692 }
2693
2694
2695
2696
2697
2698 rbio->generic_bio_cnt = 1;
2699
2700 return rbio;
2701}
2702
2703static void missing_raid56_work(struct btrfs_work *work)
2704{
2705 struct btrfs_raid_bio *rbio;
2706
2707 rbio = container_of(work, struct btrfs_raid_bio, work);
2708 __raid56_parity_recover(rbio);
2709}
2710
2711static void async_missing_raid56(struct btrfs_raid_bio *rbio)
2712{
2713 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2714 missing_raid56_work, NULL, NULL);
2715
2716 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2717}
2718
2719void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2720{
2721 if (!lock_stripe_add(rbio))
2722 async_missing_raid56(rbio);
2723}
2724