1
2
3
4
5
6
7#include <linux/sched.h>
8#include <linux/bio.h>
9#include <linux/slab.h>
10#include <linux/blkdev.h>
11#include <linux/raid/pq.h>
12#include <linux/hash.h>
13#include <linux/list_sort.h>
14#include <linux/raid/xor.h>
15#include <linux/mm.h>
16#include "ctree.h"
17#include "disk-io.h"
18#include "volumes.h"
19#include "raid56.h"
20#include "async-thread.h"
21
22
23#define RBIO_RMW_LOCKED_BIT 1
24
25
26
27
28
29#define RBIO_CACHE_BIT 2
30
31
32
33
34#define RBIO_CACHE_READY_BIT 3
35
36#define RBIO_CACHE_SIZE 1024
37
38#define BTRFS_STRIPE_HASH_TABLE_BITS 11
39
40
41struct btrfs_stripe_hash {
42 struct list_head hash_list;
43 spinlock_t lock;
44};
45
46
47struct btrfs_stripe_hash_table {
48 struct list_head stripe_cache;
49 spinlock_t cache_lock;
50 int cache_size;
51 struct btrfs_stripe_hash table[];
52};
53
54enum btrfs_rbio_ops {
55 BTRFS_RBIO_WRITE,
56 BTRFS_RBIO_READ_REBUILD,
57 BTRFS_RBIO_PARITY_SCRUB,
58 BTRFS_RBIO_REBUILD_MISSING,
59};
60
61struct btrfs_raid_bio {
62 struct btrfs_fs_info *fs_info;
63 struct btrfs_bio *bbio;
64
65
66
67
68
69
70 struct list_head hash_list;
71
72
73
74
75 struct list_head stripe_cache;
76
77
78
79
80 struct btrfs_work work;
81
82
83
84
85
86
87 struct bio_list bio_list;
88 spinlock_t bio_list_lock;
89
90
91
92
93
94
95
96 struct list_head plug_list;
97
98
99
100
101
102 unsigned long flags;
103
104
105 int stripe_len;
106
107
108 int nr_data;
109
110 int real_stripes;
111
112 int stripe_npages;
113
114
115
116
117
118
119 enum btrfs_rbio_ops operation;
120
121
122 int faila;
123
124
125 int failb;
126
127 int scrubp;
128
129
130
131
132 int nr_pages;
133
134
135
136
137
138
139 int bio_list_bytes;
140
141 int generic_bio_cnt;
142
143 refcount_t refs;
144
145 atomic_t stripes_pending;
146
147 atomic_t error;
148
149
150
151
152
153
154
155
156
157 struct page **stripe_pages;
158
159
160
161
162
163 struct page **bio_pages;
164
165
166
167
168 unsigned long *dbitmap;
169
170
171 void **finish_pointers;
172
173
174 unsigned long *finish_pbitmap;
175};
176
177static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
178static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
179static void rmw_work(struct btrfs_work *work);
180static void read_rebuild_work(struct btrfs_work *work);
181static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
182static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
183static void __free_raid_bio(struct btrfs_raid_bio *rbio);
184static void index_rbio_pages(struct btrfs_raid_bio *rbio);
185static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
186
187static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
188 int need_check);
189static void scrub_parity_work(struct btrfs_work *work);
190
191static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
192{
193 btrfs_init_work(&rbio->work, work_func, NULL, NULL);
194 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
195}
196
197
198
199
200
201int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
202{
203 struct btrfs_stripe_hash_table *table;
204 struct btrfs_stripe_hash_table *x;
205 struct btrfs_stripe_hash *cur;
206 struct btrfs_stripe_hash *h;
207 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
208 int i;
209
210 if (info->stripe_hash_table)
211 return 0;
212
213
214
215
216
217
218
219
220 table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
221 if (!table)
222 return -ENOMEM;
223
224 spin_lock_init(&table->cache_lock);
225 INIT_LIST_HEAD(&table->stripe_cache);
226
227 h = table->table;
228
229 for (i = 0; i < num_entries; i++) {
230 cur = h + i;
231 INIT_LIST_HEAD(&cur->hash_list);
232 spin_lock_init(&cur->lock);
233 }
234
235 x = cmpxchg(&info->stripe_hash_table, NULL, table);
236 if (x)
237 kvfree(x);
238 return 0;
239}
240
241
242
243
244
245
246
247
248
249
250static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
251{
252 int i;
253 char *s;
254 char *d;
255 int ret;
256
257 ret = alloc_rbio_pages(rbio);
258 if (ret)
259 return;
260
261 for (i = 0; i < rbio->nr_pages; i++) {
262 if (!rbio->bio_pages[i])
263 continue;
264
265 s = kmap(rbio->bio_pages[i]);
266 d = kmap(rbio->stripe_pages[i]);
267
268 copy_page(d, s);
269
270 kunmap(rbio->bio_pages[i]);
271 kunmap(rbio->stripe_pages[i]);
272 SetPageUptodate(rbio->stripe_pages[i]);
273 }
274 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
275}
276
277
278
279
280static int rbio_bucket(struct btrfs_raid_bio *rbio)
281{
282 u64 num = rbio->bbio->raid_map[0];
283
284
285
286
287
288
289
290
291
292 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
293}
294
295
296
297
298
299static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
300{
301 int i;
302 struct page *s;
303 struct page *d;
304
305 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
306 return;
307
308 for (i = 0; i < dest->nr_pages; i++) {
309 s = src->stripe_pages[i];
310 if (!s || !PageUptodate(s)) {
311 continue;
312 }
313
314 d = dest->stripe_pages[i];
315 if (d)
316 __free_page(d);
317
318 dest->stripe_pages[i] = s;
319 src->stripe_pages[i] = NULL;
320 }
321}
322
323
324
325
326
327
328
329
330static void merge_rbio(struct btrfs_raid_bio *dest,
331 struct btrfs_raid_bio *victim)
332{
333 bio_list_merge(&dest->bio_list, &victim->bio_list);
334 dest->bio_list_bytes += victim->bio_list_bytes;
335 dest->generic_bio_cnt += victim->generic_bio_cnt;
336 bio_list_init(&victim->bio_list);
337}
338
339
340
341
342
343static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
344{
345 int bucket = rbio_bucket(rbio);
346 struct btrfs_stripe_hash_table *table;
347 struct btrfs_stripe_hash *h;
348 int freeit = 0;
349
350
351
352
353 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
354 return;
355
356 table = rbio->fs_info->stripe_hash_table;
357 h = table->table + bucket;
358
359
360
361
362 spin_lock(&h->lock);
363
364
365
366
367
368 spin_lock(&rbio->bio_list_lock);
369
370 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
371 list_del_init(&rbio->stripe_cache);
372 table->cache_size -= 1;
373 freeit = 1;
374
375
376
377
378
379
380
381
382
383
384 if (bio_list_empty(&rbio->bio_list)) {
385 if (!list_empty(&rbio->hash_list)) {
386 list_del_init(&rbio->hash_list);
387 refcount_dec(&rbio->refs);
388 BUG_ON(!list_empty(&rbio->plug_list));
389 }
390 }
391 }
392
393 spin_unlock(&rbio->bio_list_lock);
394 spin_unlock(&h->lock);
395
396 if (freeit)
397 __free_raid_bio(rbio);
398}
399
400
401
402
403static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
404{
405 struct btrfs_stripe_hash_table *table;
406 unsigned long flags;
407
408 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
409 return;
410
411 table = rbio->fs_info->stripe_hash_table;
412
413 spin_lock_irqsave(&table->cache_lock, flags);
414 __remove_rbio_from_cache(rbio);
415 spin_unlock_irqrestore(&table->cache_lock, flags);
416}
417
418
419
420
421static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
422{
423 struct btrfs_stripe_hash_table *table;
424 unsigned long flags;
425 struct btrfs_raid_bio *rbio;
426
427 table = info->stripe_hash_table;
428
429 spin_lock_irqsave(&table->cache_lock, flags);
430 while (!list_empty(&table->stripe_cache)) {
431 rbio = list_entry(table->stripe_cache.next,
432 struct btrfs_raid_bio,
433 stripe_cache);
434 __remove_rbio_from_cache(rbio);
435 }
436 spin_unlock_irqrestore(&table->cache_lock, flags);
437}
438
439
440
441
442
443void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
444{
445 if (!info->stripe_hash_table)
446 return;
447 btrfs_clear_rbio_cache(info);
448 kvfree(info->stripe_hash_table);
449 info->stripe_hash_table = NULL;
450}
451
452
453
454
455
456
457
458
459
460
461
462
463static void cache_rbio(struct btrfs_raid_bio *rbio)
464{
465 struct btrfs_stripe_hash_table *table;
466 unsigned long flags;
467
468 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
469 return;
470
471 table = rbio->fs_info->stripe_hash_table;
472
473 spin_lock_irqsave(&table->cache_lock, flags);
474 spin_lock(&rbio->bio_list_lock);
475
476
477 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
478 refcount_inc(&rbio->refs);
479
480 if (!list_empty(&rbio->stripe_cache)){
481 list_move(&rbio->stripe_cache, &table->stripe_cache);
482 } else {
483 list_add(&rbio->stripe_cache, &table->stripe_cache);
484 table->cache_size += 1;
485 }
486
487 spin_unlock(&rbio->bio_list_lock);
488
489 if (table->cache_size > RBIO_CACHE_SIZE) {
490 struct btrfs_raid_bio *found;
491
492 found = list_entry(table->stripe_cache.prev,
493 struct btrfs_raid_bio,
494 stripe_cache);
495
496 if (found != rbio)
497 __remove_rbio_from_cache(found);
498 }
499
500 spin_unlock_irqrestore(&table->cache_lock, flags);
501}
502
503
504
505
506
507
508static void run_xor(void **pages, int src_cnt, ssize_t len)
509{
510 int src_off = 0;
511 int xor_src_cnt = 0;
512 void *dest = pages[src_cnt];
513
514 while(src_cnt > 0) {
515 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
516 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
517
518 src_cnt -= xor_src_cnt;
519 src_off += xor_src_cnt;
520 }
521}
522
523
524
525
526
527static int rbio_is_full(struct btrfs_raid_bio *rbio)
528{
529 unsigned long flags;
530 unsigned long size = rbio->bio_list_bytes;
531 int ret = 1;
532
533 spin_lock_irqsave(&rbio->bio_list_lock, flags);
534 if (size != rbio->nr_data * rbio->stripe_len)
535 ret = 0;
536 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
537 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
538
539 return ret;
540}
541
542
543
544
545
546
547
548
549
550
551
552static int rbio_can_merge(struct btrfs_raid_bio *last,
553 struct btrfs_raid_bio *cur)
554{
555 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
556 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
557 return 0;
558
559
560
561
562
563
564
565
566 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
567 test_bit(RBIO_CACHE_BIT, &cur->flags))
568 return 0;
569
570 if (last->bbio->raid_map[0] !=
571 cur->bbio->raid_map[0])
572 return 0;
573
574
575 if (last->operation != cur->operation)
576 return 0;
577
578
579
580
581
582
583
584
585 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
586 return 0;
587
588 if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
589 return 0;
590
591 if (last->operation == BTRFS_RBIO_READ_REBUILD) {
592 int fa = last->faila;
593 int fb = last->failb;
594 int cur_fa = cur->faila;
595 int cur_fb = cur->failb;
596
597 if (last->faila >= last->failb) {
598 fa = last->failb;
599 fb = last->faila;
600 }
601
602 if (cur->faila >= cur->failb) {
603 cur_fa = cur->failb;
604 cur_fb = cur->faila;
605 }
606
607 if (fa != cur_fa || fb != cur_fb)
608 return 0;
609 }
610 return 1;
611}
612
613static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
614 int index)
615{
616 return stripe * rbio->stripe_npages + index;
617}
618
619
620
621
622
623static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
624 int index)
625{
626 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
627}
628
629
630
631
632static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
633{
634 return rbio_stripe_page(rbio, rbio->nr_data, index);
635}
636
637
638
639
640
641static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
642{
643 if (rbio->nr_data + 1 == rbio->real_stripes)
644 return NULL;
645 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
646}
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
671{
672 struct btrfs_stripe_hash *h;
673 struct btrfs_raid_bio *cur;
674 struct btrfs_raid_bio *pending;
675 unsigned long flags;
676 struct btrfs_raid_bio *freeit = NULL;
677 struct btrfs_raid_bio *cache_drop = NULL;
678 int ret = 0;
679
680 h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
681
682 spin_lock_irqsave(&h->lock, flags);
683 list_for_each_entry(cur, &h->hash_list, hash_list) {
684 if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
685 continue;
686
687 spin_lock(&cur->bio_list_lock);
688
689
690 if (bio_list_empty(&cur->bio_list) &&
691 list_empty(&cur->plug_list) &&
692 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
693 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
694 list_del_init(&cur->hash_list);
695 refcount_dec(&cur->refs);
696
697 steal_rbio(cur, rbio);
698 cache_drop = cur;
699 spin_unlock(&cur->bio_list_lock);
700
701 goto lockit;
702 }
703
704
705 if (rbio_can_merge(cur, rbio)) {
706 merge_rbio(cur, rbio);
707 spin_unlock(&cur->bio_list_lock);
708 freeit = rbio;
709 ret = 1;
710 goto out;
711 }
712
713
714
715
716
717
718
719 list_for_each_entry(pending, &cur->plug_list, plug_list) {
720 if (rbio_can_merge(pending, rbio)) {
721 merge_rbio(pending, rbio);
722 spin_unlock(&cur->bio_list_lock);
723 freeit = rbio;
724 ret = 1;
725 goto out;
726 }
727 }
728
729
730
731
732
733 list_add_tail(&rbio->plug_list, &cur->plug_list);
734 spin_unlock(&cur->bio_list_lock);
735 ret = 1;
736 goto out;
737 }
738lockit:
739 refcount_inc(&rbio->refs);
740 list_add(&rbio->hash_list, &h->hash_list);
741out:
742 spin_unlock_irqrestore(&h->lock, flags);
743 if (cache_drop)
744 remove_rbio_from_cache(cache_drop);
745 if (freeit)
746 __free_raid_bio(freeit);
747 return ret;
748}
749
750
751
752
753
754static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
755{
756 int bucket;
757 struct btrfs_stripe_hash *h;
758 unsigned long flags;
759 int keep_cache = 0;
760
761 bucket = rbio_bucket(rbio);
762 h = rbio->fs_info->stripe_hash_table->table + bucket;
763
764 if (list_empty(&rbio->plug_list))
765 cache_rbio(rbio);
766
767 spin_lock_irqsave(&h->lock, flags);
768 spin_lock(&rbio->bio_list_lock);
769
770 if (!list_empty(&rbio->hash_list)) {
771
772
773
774
775
776 if (list_empty(&rbio->plug_list) &&
777 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
778 keep_cache = 1;
779 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
780 BUG_ON(!bio_list_empty(&rbio->bio_list));
781 goto done;
782 }
783
784 list_del_init(&rbio->hash_list);
785 refcount_dec(&rbio->refs);
786
787
788
789
790
791
792 if (!list_empty(&rbio->plug_list)) {
793 struct btrfs_raid_bio *next;
794 struct list_head *head = rbio->plug_list.next;
795
796 next = list_entry(head, struct btrfs_raid_bio,
797 plug_list);
798
799 list_del_init(&rbio->plug_list);
800
801 list_add(&next->hash_list, &h->hash_list);
802 refcount_inc(&next->refs);
803 spin_unlock(&rbio->bio_list_lock);
804 spin_unlock_irqrestore(&h->lock, flags);
805
806 if (next->operation == BTRFS_RBIO_READ_REBUILD)
807 start_async_work(next, read_rebuild_work);
808 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
809 steal_rbio(rbio, next);
810 start_async_work(next, read_rebuild_work);
811 } else if (next->operation == BTRFS_RBIO_WRITE) {
812 steal_rbio(rbio, next);
813 start_async_work(next, rmw_work);
814 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
815 steal_rbio(rbio, next);
816 start_async_work(next, scrub_parity_work);
817 }
818
819 goto done_nolock;
820 }
821 }
822done:
823 spin_unlock(&rbio->bio_list_lock);
824 spin_unlock_irqrestore(&h->lock, flags);
825
826done_nolock:
827 if (!keep_cache)
828 remove_rbio_from_cache(rbio);
829}
830
831static void __free_raid_bio(struct btrfs_raid_bio *rbio)
832{
833 int i;
834
835 if (!refcount_dec_and_test(&rbio->refs))
836 return;
837
838 WARN_ON(!list_empty(&rbio->stripe_cache));
839 WARN_ON(!list_empty(&rbio->hash_list));
840 WARN_ON(!bio_list_empty(&rbio->bio_list));
841
842 for (i = 0; i < rbio->nr_pages; i++) {
843 if (rbio->stripe_pages[i]) {
844 __free_page(rbio->stripe_pages[i]);
845 rbio->stripe_pages[i] = NULL;
846 }
847 }
848
849 btrfs_put_bbio(rbio->bbio);
850 kfree(rbio);
851}
852
853static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
854{
855 struct bio *next;
856
857 while (cur) {
858 next = cur->bi_next;
859 cur->bi_next = NULL;
860 cur->bi_status = err;
861 bio_endio(cur);
862 cur = next;
863 }
864}
865
866
867
868
869
870static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
871{
872 struct bio *cur = bio_list_get(&rbio->bio_list);
873 struct bio *extra;
874
875 if (rbio->generic_bio_cnt)
876 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
877
878
879
880
881
882
883
884
885
886 unlock_stripe(rbio);
887 extra = bio_list_get(&rbio->bio_list);
888 __free_raid_bio(rbio);
889
890 rbio_endio_bio_list(cur, err);
891 if (extra)
892 rbio_endio_bio_list(extra, err);
893}
894
895
896
897
898
899static void raid_write_end_io(struct bio *bio)
900{
901 struct btrfs_raid_bio *rbio = bio->bi_private;
902 blk_status_t err = bio->bi_status;
903 int max_errors;
904
905 if (err)
906 fail_bio_stripe(rbio, bio);
907
908 bio_put(bio);
909
910 if (!atomic_dec_and_test(&rbio->stripes_pending))
911 return;
912
913 err = BLK_STS_OK;
914
915
916 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
917 0 : rbio->bbio->max_errors;
918 if (atomic_read(&rbio->error) > max_errors)
919 err = BLK_STS_IOERR;
920
921 rbio_orig_end_io(rbio, err);
922}
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
941 int index, int pagenr, int bio_list_only)
942{
943 int chunk_page;
944 struct page *p = NULL;
945
946 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
947
948 spin_lock_irq(&rbio->bio_list_lock);
949 p = rbio->bio_pages[chunk_page];
950 spin_unlock_irq(&rbio->bio_list_lock);
951
952 if (p || bio_list_only)
953 return p;
954
955 return rbio->stripe_pages[chunk_page];
956}
957
958
959
960
961
962static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
963{
964 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
965}
966
967
968
969
970
971static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
972 struct btrfs_bio *bbio,
973 u64 stripe_len)
974{
975 struct btrfs_raid_bio *rbio;
976 int nr_data = 0;
977 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
978 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
979 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
980 void *p;
981
982 rbio = kzalloc(sizeof(*rbio) +
983 sizeof(*rbio->stripe_pages) * num_pages +
984 sizeof(*rbio->bio_pages) * num_pages +
985 sizeof(*rbio->finish_pointers) * real_stripes +
986 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
987 sizeof(*rbio->finish_pbitmap) *
988 BITS_TO_LONGS(stripe_npages),
989 GFP_NOFS);
990 if (!rbio)
991 return ERR_PTR(-ENOMEM);
992
993 bio_list_init(&rbio->bio_list);
994 INIT_LIST_HEAD(&rbio->plug_list);
995 spin_lock_init(&rbio->bio_list_lock);
996 INIT_LIST_HEAD(&rbio->stripe_cache);
997 INIT_LIST_HEAD(&rbio->hash_list);
998 rbio->bbio = bbio;
999 rbio->fs_info = fs_info;
1000 rbio->stripe_len = stripe_len;
1001 rbio->nr_pages = num_pages;
1002 rbio->real_stripes = real_stripes;
1003 rbio->stripe_npages = stripe_npages;
1004 rbio->faila = -1;
1005 rbio->failb = -1;
1006 refcount_set(&rbio->refs, 1);
1007 atomic_set(&rbio->error, 0);
1008 atomic_set(&rbio->stripes_pending, 0);
1009
1010
1011
1012
1013
1014 p = rbio + 1;
1015#define CONSUME_ALLOC(ptr, count) do { \
1016 ptr = p; \
1017 p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
1018 } while (0)
1019 CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1020 CONSUME_ALLOC(rbio->bio_pages, num_pages);
1021 CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1022 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1023 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1024#undef CONSUME_ALLOC
1025
1026 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1027 nr_data = real_stripes - 1;
1028 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1029 nr_data = real_stripes - 2;
1030 else
1031 BUG();
1032
1033 rbio->nr_data = nr_data;
1034 return rbio;
1035}
1036
1037
1038static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1039{
1040 int i;
1041 struct page *page;
1042
1043 for (i = 0; i < rbio->nr_pages; i++) {
1044 if (rbio->stripe_pages[i])
1045 continue;
1046 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1047 if (!page)
1048 return -ENOMEM;
1049 rbio->stripe_pages[i] = page;
1050 }
1051 return 0;
1052}
1053
1054
1055static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1056{
1057 int i;
1058 struct page *page;
1059
1060 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1061
1062 for (; i < rbio->nr_pages; i++) {
1063 if (rbio->stripe_pages[i])
1064 continue;
1065 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1066 if (!page)
1067 return -ENOMEM;
1068 rbio->stripe_pages[i] = page;
1069 }
1070 return 0;
1071}
1072
1073
1074
1075
1076
1077
1078static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1079 struct bio_list *bio_list,
1080 struct page *page,
1081 int stripe_nr,
1082 unsigned long page_index,
1083 unsigned long bio_max_len)
1084{
1085 struct bio *last = bio_list->tail;
1086 int ret;
1087 struct bio *bio;
1088 struct btrfs_bio_stripe *stripe;
1089 u64 disk_start;
1090
1091 stripe = &rbio->bbio->stripes[stripe_nr];
1092 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1093
1094
1095 if (!stripe->dev->bdev)
1096 return fail_rbio_index(rbio, stripe_nr);
1097
1098
1099 if (last) {
1100 u64 last_end = (u64)last->bi_iter.bi_sector << 9;
1101 last_end += last->bi_iter.bi_size;
1102
1103
1104
1105
1106
1107 if (last_end == disk_start && !last->bi_status &&
1108 last->bi_disk == stripe->dev->bdev->bd_disk &&
1109 last->bi_partno == stripe->dev->bdev->bd_partno) {
1110 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1111 if (ret == PAGE_SIZE)
1112 return 0;
1113 }
1114 }
1115
1116
1117 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1118 btrfs_io_bio(bio)->device = stripe->dev;
1119 bio->bi_iter.bi_size = 0;
1120 bio_set_dev(bio, stripe->dev->bdev);
1121 bio->bi_iter.bi_sector = disk_start >> 9;
1122
1123 bio_add_page(bio, page, PAGE_SIZE, 0);
1124 bio_list_add(bio_list, bio);
1125 return 0;
1126}
1127
1128
1129
1130
1131
1132
1133
1134
1135static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1136{
1137 if (rbio->faila >= 0 || rbio->failb >= 0) {
1138 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1139 __raid56_parity_recover(rbio);
1140 } else {
1141 finish_rmw(rbio);
1142 }
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1154{
1155 struct bio *bio;
1156 u64 start;
1157 unsigned long stripe_offset;
1158 unsigned long page_index;
1159
1160 spin_lock_irq(&rbio->bio_list_lock);
1161 bio_list_for_each(bio, &rbio->bio_list) {
1162 struct bio_vec bvec;
1163 struct bvec_iter iter;
1164 int i = 0;
1165
1166 start = (u64)bio->bi_iter.bi_sector << 9;
1167 stripe_offset = start - rbio->bbio->raid_map[0];
1168 page_index = stripe_offset >> PAGE_SHIFT;
1169
1170 if (bio_flagged(bio, BIO_CLONED))
1171 bio->bi_iter = btrfs_io_bio(bio)->iter;
1172
1173 bio_for_each_segment(bvec, bio, iter) {
1174 rbio->bio_pages[page_index + i] = bvec.bv_page;
1175 i++;
1176 }
1177 }
1178 spin_unlock_irq(&rbio->bio_list_lock);
1179}
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1190{
1191 struct btrfs_bio *bbio = rbio->bbio;
1192 void **pointers = rbio->finish_pointers;
1193 int nr_data = rbio->nr_data;
1194 int stripe;
1195 int pagenr;
1196 bool has_qstripe;
1197 struct bio_list bio_list;
1198 struct bio *bio;
1199 int ret;
1200
1201 bio_list_init(&bio_list);
1202
1203 if (rbio->real_stripes - rbio->nr_data == 1)
1204 has_qstripe = false;
1205 else if (rbio->real_stripes - rbio->nr_data == 2)
1206 has_qstripe = true;
1207 else
1208 BUG();
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218 spin_lock_irq(&rbio->bio_list_lock);
1219 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1220 spin_unlock_irq(&rbio->bio_list_lock);
1221
1222 atomic_set(&rbio->error, 0);
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233 index_rbio_pages(rbio);
1234 if (!rbio_is_full(rbio))
1235 cache_rbio_pages(rbio);
1236 else
1237 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1238
1239 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1240 struct page *p;
1241
1242 for (stripe = 0; stripe < nr_data; stripe++) {
1243 p = page_in_rbio(rbio, stripe, pagenr, 0);
1244 pointers[stripe] = kmap(p);
1245 }
1246
1247
1248 p = rbio_pstripe_page(rbio, pagenr);
1249 SetPageUptodate(p);
1250 pointers[stripe++] = kmap(p);
1251
1252 if (has_qstripe) {
1253
1254
1255
1256
1257
1258 p = rbio_qstripe_page(rbio, pagenr);
1259 SetPageUptodate(p);
1260 pointers[stripe++] = kmap(p);
1261
1262 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1263 pointers);
1264 } else {
1265
1266 copy_page(pointers[nr_data], pointers[0]);
1267 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1268 }
1269
1270
1271 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1272 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1273 }
1274
1275
1276
1277
1278
1279
1280 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1281 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1282 struct page *page;
1283 if (stripe < rbio->nr_data) {
1284 page = page_in_rbio(rbio, stripe, pagenr, 1);
1285 if (!page)
1286 continue;
1287 } else {
1288 page = rbio_stripe_page(rbio, stripe, pagenr);
1289 }
1290
1291 ret = rbio_add_io_page(rbio, &bio_list,
1292 page, stripe, pagenr, rbio->stripe_len);
1293 if (ret)
1294 goto cleanup;
1295 }
1296 }
1297
1298 if (likely(!bbio->num_tgtdevs))
1299 goto write_data;
1300
1301 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1302 if (!bbio->tgtdev_map[stripe])
1303 continue;
1304
1305 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1306 struct page *page;
1307 if (stripe < rbio->nr_data) {
1308 page = page_in_rbio(rbio, stripe, pagenr, 1);
1309 if (!page)
1310 continue;
1311 } else {
1312 page = rbio_stripe_page(rbio, stripe, pagenr);
1313 }
1314
1315 ret = rbio_add_io_page(rbio, &bio_list, page,
1316 rbio->bbio->tgtdev_map[stripe],
1317 pagenr, rbio->stripe_len);
1318 if (ret)
1319 goto cleanup;
1320 }
1321 }
1322
1323write_data:
1324 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1325 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1326
1327 while ((bio = bio_list_pop(&bio_list))) {
1328 bio->bi_private = rbio;
1329 bio->bi_end_io = raid_write_end_io;
1330 bio->bi_opf = REQ_OP_WRITE;
1331
1332 submit_bio(bio);
1333 }
1334 return;
1335
1336cleanup:
1337 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1338
1339 while ((bio = bio_list_pop(&bio_list)))
1340 bio_put(bio);
1341}
1342
1343
1344
1345
1346
1347
1348static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1349 struct bio *bio)
1350{
1351 u64 physical = bio->bi_iter.bi_sector;
1352 int i;
1353 struct btrfs_bio_stripe *stripe;
1354
1355 physical <<= 9;
1356
1357 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1358 stripe = &rbio->bbio->stripes[i];
1359 if (in_range(physical, stripe->physical, rbio->stripe_len) &&
1360 stripe->dev->bdev &&
1361 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1362 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1363 return i;
1364 }
1365 }
1366 return -1;
1367}
1368
1369
1370
1371
1372
1373
1374static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1375 struct bio *bio)
1376{
1377 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1378 int i;
1379
1380 for (i = 0; i < rbio->nr_data; i++) {
1381 u64 stripe_start = rbio->bbio->raid_map[i];
1382
1383 if (in_range(logical, stripe_start, rbio->stripe_len))
1384 return i;
1385 }
1386 return -1;
1387}
1388
1389
1390
1391
1392static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1393{
1394 unsigned long flags;
1395 int ret = 0;
1396
1397 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1398
1399
1400 if (rbio->faila == failed || rbio->failb == failed)
1401 goto out;
1402
1403 if (rbio->faila == -1) {
1404
1405 rbio->faila = failed;
1406 atomic_inc(&rbio->error);
1407 } else if (rbio->failb == -1) {
1408
1409 rbio->failb = failed;
1410 atomic_inc(&rbio->error);
1411 } else {
1412 ret = -EIO;
1413 }
1414out:
1415 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1416
1417 return ret;
1418}
1419
1420
1421
1422
1423
1424static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1425 struct bio *bio)
1426{
1427 int failed = find_bio_stripe(rbio, bio);
1428
1429 if (failed < 0)
1430 return -EIO;
1431
1432 return fail_rbio_index(rbio, failed);
1433}
1434
1435
1436
1437
1438
1439static void set_bio_pages_uptodate(struct bio *bio)
1440{
1441 struct bio_vec *bvec;
1442 struct bvec_iter_all iter_all;
1443
1444 ASSERT(!bio_flagged(bio, BIO_CLONED));
1445
1446 bio_for_each_segment_all(bvec, bio, iter_all)
1447 SetPageUptodate(bvec->bv_page);
1448}
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458static void raid_rmw_end_io(struct bio *bio)
1459{
1460 struct btrfs_raid_bio *rbio = bio->bi_private;
1461
1462 if (bio->bi_status)
1463 fail_bio_stripe(rbio, bio);
1464 else
1465 set_bio_pages_uptodate(bio);
1466
1467 bio_put(bio);
1468
1469 if (!atomic_dec_and_test(&rbio->stripes_pending))
1470 return;
1471
1472 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1473 goto cleanup;
1474
1475
1476
1477
1478
1479
1480 validate_rbio_for_rmw(rbio);
1481 return;
1482
1483cleanup:
1484
1485 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1486}
1487
1488
1489
1490
1491
1492static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1493{
1494 int bios_to_read = 0;
1495 struct bio_list bio_list;
1496 int ret;
1497 int pagenr;
1498 int stripe;
1499 struct bio *bio;
1500
1501 bio_list_init(&bio_list);
1502
1503 ret = alloc_rbio_pages(rbio);
1504 if (ret)
1505 goto cleanup;
1506
1507 index_rbio_pages(rbio);
1508
1509 atomic_set(&rbio->error, 0);
1510
1511
1512
1513
1514 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1515 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1516 struct page *page;
1517
1518
1519
1520
1521
1522
1523 page = page_in_rbio(rbio, stripe, pagenr, 1);
1524 if (page)
1525 continue;
1526
1527 page = rbio_stripe_page(rbio, stripe, pagenr);
1528
1529
1530
1531
1532 if (PageUptodate(page))
1533 continue;
1534
1535 ret = rbio_add_io_page(rbio, &bio_list, page,
1536 stripe, pagenr, rbio->stripe_len);
1537 if (ret)
1538 goto cleanup;
1539 }
1540 }
1541
1542 bios_to_read = bio_list_size(&bio_list);
1543 if (!bios_to_read) {
1544
1545
1546
1547
1548
1549
1550 goto finish;
1551 }
1552
1553
1554
1555
1556
1557 atomic_set(&rbio->stripes_pending, bios_to_read);
1558 while ((bio = bio_list_pop(&bio_list))) {
1559 bio->bi_private = rbio;
1560 bio->bi_end_io = raid_rmw_end_io;
1561 bio->bi_opf = REQ_OP_READ;
1562
1563 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1564
1565 submit_bio(bio);
1566 }
1567
1568 return 0;
1569
1570cleanup:
1571 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1572
1573 while ((bio = bio_list_pop(&bio_list)))
1574 bio_put(bio);
1575
1576 return -EIO;
1577
1578finish:
1579 validate_rbio_for_rmw(rbio);
1580 return 0;
1581}
1582
1583
1584
1585
1586
1587static int full_stripe_write(struct btrfs_raid_bio *rbio)
1588{
1589 int ret;
1590
1591 ret = alloc_rbio_parity_pages(rbio);
1592 if (ret) {
1593 __free_raid_bio(rbio);
1594 return ret;
1595 }
1596
1597 ret = lock_stripe_add(rbio);
1598 if (ret == 0)
1599 finish_rmw(rbio);
1600 return 0;
1601}
1602
1603
1604
1605
1606
1607
1608static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1609{
1610 int ret;
1611
1612 ret = lock_stripe_add(rbio);
1613 if (ret == 0)
1614 start_async_work(rbio, rmw_work);
1615 return 0;
1616}
1617
1618
1619
1620
1621
1622
1623
1624static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1625{
1626
1627 if (!rbio_is_full(rbio))
1628 return partial_stripe_write(rbio);
1629 return full_stripe_write(rbio);
1630}
1631
1632
1633
1634
1635
1636
1637
1638
1639struct btrfs_plug_cb {
1640 struct blk_plug_cb cb;
1641 struct btrfs_fs_info *info;
1642 struct list_head rbio_list;
1643 struct btrfs_work work;
1644};
1645
1646
1647
1648
1649static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1650{
1651 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1652 plug_list);
1653 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1654 plug_list);
1655 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1656 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1657
1658 if (a_sector < b_sector)
1659 return -1;
1660 if (a_sector > b_sector)
1661 return 1;
1662 return 0;
1663}
1664
1665static void run_plug(struct btrfs_plug_cb *plug)
1666{
1667 struct btrfs_raid_bio *cur;
1668 struct btrfs_raid_bio *last = NULL;
1669
1670
1671
1672
1673
1674
1675 list_sort(NULL, &plug->rbio_list, plug_cmp);
1676 while (!list_empty(&plug->rbio_list)) {
1677 cur = list_entry(plug->rbio_list.next,
1678 struct btrfs_raid_bio, plug_list);
1679 list_del_init(&cur->plug_list);
1680
1681 if (rbio_is_full(cur)) {
1682 int ret;
1683
1684
1685 ret = full_stripe_write(cur);
1686 BUG_ON(ret);
1687 continue;
1688 }
1689 if (last) {
1690 if (rbio_can_merge(last, cur)) {
1691 merge_rbio(last, cur);
1692 __free_raid_bio(cur);
1693 continue;
1694
1695 }
1696 __raid56_parity_write(last);
1697 }
1698 last = cur;
1699 }
1700 if (last) {
1701 __raid56_parity_write(last);
1702 }
1703 kfree(plug);
1704}
1705
1706
1707
1708
1709
1710static void unplug_work(struct btrfs_work *work)
1711{
1712 struct btrfs_plug_cb *plug;
1713 plug = container_of(work, struct btrfs_plug_cb, work);
1714 run_plug(plug);
1715}
1716
1717static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1718{
1719 struct btrfs_plug_cb *plug;
1720 plug = container_of(cb, struct btrfs_plug_cb, cb);
1721
1722 if (from_schedule) {
1723 btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
1724 btrfs_queue_work(plug->info->rmw_workers,
1725 &plug->work);
1726 return;
1727 }
1728 run_plug(plug);
1729}
1730
1731
1732
1733
1734int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1735 struct btrfs_bio *bbio, u64 stripe_len)
1736{
1737 struct btrfs_raid_bio *rbio;
1738 struct btrfs_plug_cb *plug = NULL;
1739 struct blk_plug_cb *cb;
1740 int ret;
1741
1742 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1743 if (IS_ERR(rbio)) {
1744 btrfs_put_bbio(bbio);
1745 return PTR_ERR(rbio);
1746 }
1747 bio_list_add(&rbio->bio_list, bio);
1748 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1749 rbio->operation = BTRFS_RBIO_WRITE;
1750
1751 btrfs_bio_counter_inc_noblocked(fs_info);
1752 rbio->generic_bio_cnt = 1;
1753
1754
1755
1756
1757
1758 if (rbio_is_full(rbio)) {
1759 ret = full_stripe_write(rbio);
1760 if (ret)
1761 btrfs_bio_counter_dec(fs_info);
1762 return ret;
1763 }
1764
1765 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1766 if (cb) {
1767 plug = container_of(cb, struct btrfs_plug_cb, cb);
1768 if (!plug->info) {
1769 plug->info = fs_info;
1770 INIT_LIST_HEAD(&plug->rbio_list);
1771 }
1772 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1773 ret = 0;
1774 } else {
1775 ret = __raid56_parity_write(rbio);
1776 if (ret)
1777 btrfs_bio_counter_dec(fs_info);
1778 }
1779 return ret;
1780}
1781
1782
1783
1784
1785
1786
1787static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1788{
1789 int pagenr, stripe;
1790 void **pointers;
1791 int faila = -1, failb = -1;
1792 struct page *page;
1793 blk_status_t err;
1794 int i;
1795
1796 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1797 if (!pointers) {
1798 err = BLK_STS_RESOURCE;
1799 goto cleanup_io;
1800 }
1801
1802 faila = rbio->faila;
1803 failb = rbio->failb;
1804
1805 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1806 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1807 spin_lock_irq(&rbio->bio_list_lock);
1808 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1809 spin_unlock_irq(&rbio->bio_list_lock);
1810 }
1811
1812 index_rbio_pages(rbio);
1813
1814 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1815
1816
1817
1818
1819 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1820 !test_bit(pagenr, rbio->dbitmap))
1821 continue;
1822
1823
1824
1825
1826 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1827
1828
1829
1830
1831 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1832 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1833 (stripe == faila || stripe == failb)) {
1834 page = page_in_rbio(rbio, stripe, pagenr, 0);
1835 } else {
1836 page = rbio_stripe_page(rbio, stripe, pagenr);
1837 }
1838 pointers[stripe] = kmap(page);
1839 }
1840
1841
1842 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1843
1844
1845
1846
1847 if (failb < 0) {
1848 if (faila == rbio->nr_data) {
1849
1850
1851
1852
1853
1854 err = BLK_STS_IOERR;
1855 goto cleanup;
1856 }
1857
1858
1859
1860
1861 goto pstripe;
1862 }
1863
1864
1865 if (faila > failb)
1866 swap(faila, failb);
1867
1868
1869
1870
1871
1872
1873
1874 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1875 if (rbio->bbio->raid_map[faila] ==
1876 RAID5_P_STRIPE) {
1877 err = BLK_STS_IOERR;
1878 goto cleanup;
1879 }
1880
1881
1882
1883
1884 goto pstripe;
1885 }
1886
1887 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1888 raid6_datap_recov(rbio->real_stripes,
1889 PAGE_SIZE, faila, pointers);
1890 } else {
1891 raid6_2data_recov(rbio->real_stripes,
1892 PAGE_SIZE, faila, failb,
1893 pointers);
1894 }
1895 } else {
1896 void *p;
1897
1898
1899 BUG_ON(failb != -1);
1900pstripe:
1901
1902 copy_page(pointers[faila], pointers[rbio->nr_data]);
1903
1904
1905 p = pointers[faila];
1906 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1907 pointers[stripe] = pointers[stripe + 1];
1908 pointers[rbio->nr_data - 1] = p;
1909
1910
1911 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1912 }
1913
1914
1915
1916
1917
1918
1919 if (rbio->operation == BTRFS_RBIO_WRITE) {
1920 for (i = 0; i < rbio->stripe_npages; i++) {
1921 if (faila != -1) {
1922 page = rbio_stripe_page(rbio, faila, i);
1923 SetPageUptodate(page);
1924 }
1925 if (failb != -1) {
1926 page = rbio_stripe_page(rbio, failb, i);
1927 SetPageUptodate(page);
1928 }
1929 }
1930 }
1931 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1932
1933
1934
1935
1936 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1937 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1938 (stripe == faila || stripe == failb)) {
1939 page = page_in_rbio(rbio, stripe, pagenr, 0);
1940 } else {
1941 page = rbio_stripe_page(rbio, stripe, pagenr);
1942 }
1943 kunmap(page);
1944 }
1945 }
1946
1947 err = BLK_STS_OK;
1948cleanup:
1949 kfree(pointers);
1950
1951cleanup_io:
1952
1953
1954
1955
1956
1957 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1958 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974 if (err == BLK_STS_OK && rbio->failb < 0)
1975 cache_rbio_pages(rbio);
1976 else
1977 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1978
1979 rbio_orig_end_io(rbio, err);
1980 } else if (err == BLK_STS_OK) {
1981 rbio->faila = -1;
1982 rbio->failb = -1;
1983
1984 if (rbio->operation == BTRFS_RBIO_WRITE)
1985 finish_rmw(rbio);
1986 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1987 finish_parity_scrub(rbio, 0);
1988 else
1989 BUG();
1990 } else {
1991 rbio_orig_end_io(rbio, err);
1992 }
1993}
1994
1995
1996
1997
1998
1999static void raid_recover_end_io(struct bio *bio)
2000{
2001 struct btrfs_raid_bio *rbio = bio->bi_private;
2002
2003
2004
2005
2006
2007 if (bio->bi_status)
2008 fail_bio_stripe(rbio, bio);
2009 else
2010 set_bio_pages_uptodate(bio);
2011 bio_put(bio);
2012
2013 if (!atomic_dec_and_test(&rbio->stripes_pending))
2014 return;
2015
2016 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2017 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2018 else
2019 __raid_recover_end_io(rbio);
2020}
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2031{
2032 int bios_to_read = 0;
2033 struct bio_list bio_list;
2034 int ret;
2035 int pagenr;
2036 int stripe;
2037 struct bio *bio;
2038
2039 bio_list_init(&bio_list);
2040
2041 ret = alloc_rbio_pages(rbio);
2042 if (ret)
2043 goto cleanup;
2044
2045 atomic_set(&rbio->error, 0);
2046
2047
2048
2049
2050
2051
2052 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2053 if (rbio->faila == stripe || rbio->failb == stripe) {
2054 atomic_inc(&rbio->error);
2055 continue;
2056 }
2057
2058 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2059 struct page *p;
2060
2061
2062
2063
2064
2065 p = rbio_stripe_page(rbio, stripe, pagenr);
2066 if (PageUptodate(p))
2067 continue;
2068
2069 ret = rbio_add_io_page(rbio, &bio_list,
2070 rbio_stripe_page(rbio, stripe, pagenr),
2071 stripe, pagenr, rbio->stripe_len);
2072 if (ret < 0)
2073 goto cleanup;
2074 }
2075 }
2076
2077 bios_to_read = bio_list_size(&bio_list);
2078 if (!bios_to_read) {
2079
2080
2081
2082
2083
2084 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2085 __raid_recover_end_io(rbio);
2086 return 0;
2087 } else {
2088 goto cleanup;
2089 }
2090 }
2091
2092
2093
2094
2095
2096 atomic_set(&rbio->stripes_pending, bios_to_read);
2097 while ((bio = bio_list_pop(&bio_list))) {
2098 bio->bi_private = rbio;
2099 bio->bi_end_io = raid_recover_end_io;
2100 bio->bi_opf = REQ_OP_READ;
2101
2102 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2103
2104 submit_bio(bio);
2105 }
2106
2107 return 0;
2108
2109cleanup:
2110 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2111 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2112 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2113
2114 while ((bio = bio_list_pop(&bio_list)))
2115 bio_put(bio);
2116
2117 return -EIO;
2118}
2119
2120
2121
2122
2123
2124
2125
2126int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2127 struct btrfs_bio *bbio, u64 stripe_len,
2128 int mirror_num, int generic_io)
2129{
2130 struct btrfs_raid_bio *rbio;
2131 int ret;
2132
2133 if (generic_io) {
2134 ASSERT(bbio->mirror_num == mirror_num);
2135 btrfs_io_bio(bio)->mirror_num = mirror_num;
2136 }
2137
2138 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2139 if (IS_ERR(rbio)) {
2140 if (generic_io)
2141 btrfs_put_bbio(bbio);
2142 return PTR_ERR(rbio);
2143 }
2144
2145 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2146 bio_list_add(&rbio->bio_list, bio);
2147 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2148
2149 rbio->faila = find_logical_bio_stripe(rbio, bio);
2150 if (rbio->faila == -1) {
2151 btrfs_warn(fs_info,
2152 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2153 __func__, (u64)bio->bi_iter.bi_sector << 9,
2154 (u64)bio->bi_iter.bi_size, bbio->map_type);
2155 if (generic_io)
2156 btrfs_put_bbio(bbio);
2157 kfree(rbio);
2158 return -EIO;
2159 }
2160
2161 if (generic_io) {
2162 btrfs_bio_counter_inc_noblocked(fs_info);
2163 rbio->generic_bio_cnt = 1;
2164 } else {
2165 btrfs_get_bbio(bbio);
2166 }
2167
2168
2169
2170
2171
2172
2173 if (mirror_num > 2) {
2174
2175
2176
2177
2178
2179 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2180 ASSERT(rbio->failb > 0);
2181 if (rbio->failb <= rbio->faila)
2182 rbio->failb--;
2183 }
2184
2185 ret = lock_stripe_add(rbio);
2186
2187
2188
2189
2190
2191
2192
2193
2194 if (ret == 0)
2195 __raid56_parity_recover(rbio);
2196
2197
2198
2199
2200
2201 return 0;
2202
2203}
2204
2205static void rmw_work(struct btrfs_work *work)
2206{
2207 struct btrfs_raid_bio *rbio;
2208
2209 rbio = container_of(work, struct btrfs_raid_bio, work);
2210 raid56_rmw_stripe(rbio);
2211}
2212
2213static void read_rebuild_work(struct btrfs_work *work)
2214{
2215 struct btrfs_raid_bio *rbio;
2216
2217 rbio = container_of(work, struct btrfs_raid_bio, work);
2218 __raid56_parity_recover(rbio);
2219}
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231struct btrfs_raid_bio *
2232raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2233 struct btrfs_bio *bbio, u64 stripe_len,
2234 struct btrfs_device *scrub_dev,
2235 unsigned long *dbitmap, int stripe_nsectors)
2236{
2237 struct btrfs_raid_bio *rbio;
2238 int i;
2239
2240 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2241 if (IS_ERR(rbio))
2242 return NULL;
2243 bio_list_add(&rbio->bio_list, bio);
2244
2245
2246
2247
2248 ASSERT(!bio->bi_iter.bi_size);
2249 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2250
2251
2252
2253
2254
2255
2256 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2257 if (bbio->stripes[i].dev == scrub_dev) {
2258 rbio->scrubp = i;
2259 break;
2260 }
2261 }
2262 ASSERT(i < rbio->real_stripes);
2263
2264
2265 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2266 ASSERT(rbio->stripe_npages == stripe_nsectors);
2267 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2268
2269
2270
2271
2272
2273 rbio->generic_bio_cnt = 1;
2274
2275 return rbio;
2276}
2277
2278
2279void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2280 u64 logical)
2281{
2282 int stripe_offset;
2283 int index;
2284
2285 ASSERT(logical >= rbio->bbio->raid_map[0]);
2286 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2287 rbio->stripe_len * rbio->nr_data);
2288 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2289 index = stripe_offset >> PAGE_SHIFT;
2290 rbio->bio_pages[index] = page;
2291}
2292
2293
2294
2295
2296
2297static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2298{
2299 int i;
2300 int bit;
2301 int index;
2302 struct page *page;
2303
2304 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2305 for (i = 0; i < rbio->real_stripes; i++) {
2306 index = i * rbio->stripe_npages + bit;
2307 if (rbio->stripe_pages[index])
2308 continue;
2309
2310 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2311 if (!page)
2312 return -ENOMEM;
2313 rbio->stripe_pages[index] = page;
2314 }
2315 }
2316 return 0;
2317}
2318
2319static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2320 int need_check)
2321{
2322 struct btrfs_bio *bbio = rbio->bbio;
2323 void **pointers = rbio->finish_pointers;
2324 unsigned long *pbitmap = rbio->finish_pbitmap;
2325 int nr_data = rbio->nr_data;
2326 int stripe;
2327 int pagenr;
2328 bool has_qstripe;
2329 struct page *p_page = NULL;
2330 struct page *q_page = NULL;
2331 struct bio_list bio_list;
2332 struct bio *bio;
2333 int is_replace = 0;
2334 int ret;
2335
2336 bio_list_init(&bio_list);
2337
2338 if (rbio->real_stripes - rbio->nr_data == 1)
2339 has_qstripe = false;
2340 else if (rbio->real_stripes - rbio->nr_data == 2)
2341 has_qstripe = true;
2342 else
2343 BUG();
2344
2345 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2346 is_replace = 1;
2347 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2348 }
2349
2350
2351
2352
2353
2354
2355 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2356
2357 if (!need_check)
2358 goto writeback;
2359
2360 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2361 if (!p_page)
2362 goto cleanup;
2363 SetPageUptodate(p_page);
2364
2365 if (has_qstripe) {
2366 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2367 if (!q_page) {
2368 __free_page(p_page);
2369 goto cleanup;
2370 }
2371 SetPageUptodate(q_page);
2372 }
2373
2374 atomic_set(&rbio->error, 0);
2375
2376 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2377 struct page *p;
2378 void *parity;
2379
2380 for (stripe = 0; stripe < nr_data; stripe++) {
2381 p = page_in_rbio(rbio, stripe, pagenr, 0);
2382 pointers[stripe] = kmap(p);
2383 }
2384
2385
2386 pointers[stripe++] = kmap(p_page);
2387
2388 if (has_qstripe) {
2389
2390
2391
2392
2393 pointers[stripe++] = kmap(q_page);
2394
2395 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2396 pointers);
2397 } else {
2398
2399 copy_page(pointers[nr_data], pointers[0]);
2400 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2401 }
2402
2403
2404 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2405 parity = kmap(p);
2406 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2407 copy_page(parity, pointers[rbio->scrubp]);
2408 else
2409
2410 bitmap_clear(rbio->dbitmap, pagenr, 1);
2411 kunmap(p);
2412
2413 for (stripe = 0; stripe < nr_data; stripe++)
2414 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2415 kunmap(p_page);
2416 }
2417
2418 __free_page(p_page);
2419 if (q_page)
2420 __free_page(q_page);
2421
2422writeback:
2423
2424
2425
2426
2427
2428 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2429 struct page *page;
2430
2431 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2432 ret = rbio_add_io_page(rbio, &bio_list,
2433 page, rbio->scrubp, pagenr, rbio->stripe_len);
2434 if (ret)
2435 goto cleanup;
2436 }
2437
2438 if (!is_replace)
2439 goto submit_write;
2440
2441 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2442 struct page *page;
2443
2444 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2445 ret = rbio_add_io_page(rbio, &bio_list, page,
2446 bbio->tgtdev_map[rbio->scrubp],
2447 pagenr, rbio->stripe_len);
2448 if (ret)
2449 goto cleanup;
2450 }
2451
2452submit_write:
2453 nr_data = bio_list_size(&bio_list);
2454 if (!nr_data) {
2455
2456 rbio_orig_end_io(rbio, BLK_STS_OK);
2457 return;
2458 }
2459
2460 atomic_set(&rbio->stripes_pending, nr_data);
2461
2462 while ((bio = bio_list_pop(&bio_list))) {
2463 bio->bi_private = rbio;
2464 bio->bi_end_io = raid_write_end_io;
2465 bio->bi_opf = REQ_OP_WRITE;
2466
2467 submit_bio(bio);
2468 }
2469 return;
2470
2471cleanup:
2472 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2473
2474 while ((bio = bio_list_pop(&bio_list)))
2475 bio_put(bio);
2476}
2477
2478static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2479{
2480 if (stripe >= 0 && stripe < rbio->nr_data)
2481 return 1;
2482 return 0;
2483}
2484
2485
2486
2487
2488
2489
2490
2491
2492static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2493{
2494 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2495 goto cleanup;
2496
2497 if (rbio->faila >= 0 || rbio->failb >= 0) {
2498 int dfail = 0, failp = -1;
2499
2500 if (is_data_stripe(rbio, rbio->faila))
2501 dfail++;
2502 else if (is_parity_stripe(rbio->faila))
2503 failp = rbio->faila;
2504
2505 if (is_data_stripe(rbio, rbio->failb))
2506 dfail++;
2507 else if (is_parity_stripe(rbio->failb))
2508 failp = rbio->failb;
2509
2510
2511
2512
2513
2514
2515 if (dfail > rbio->bbio->max_errors - 1)
2516 goto cleanup;
2517
2518
2519
2520
2521
2522 if (dfail == 0) {
2523 finish_parity_scrub(rbio, 0);
2524 return;
2525 }
2526
2527
2528
2529
2530
2531
2532
2533 if (failp != rbio->scrubp)
2534 goto cleanup;
2535
2536 __raid_recover_end_io(rbio);
2537 } else {
2538 finish_parity_scrub(rbio, 1);
2539 }
2540 return;
2541
2542cleanup:
2543 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2544}
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554static void raid56_parity_scrub_end_io(struct bio *bio)
2555{
2556 struct btrfs_raid_bio *rbio = bio->bi_private;
2557
2558 if (bio->bi_status)
2559 fail_bio_stripe(rbio, bio);
2560 else
2561 set_bio_pages_uptodate(bio);
2562
2563 bio_put(bio);
2564
2565 if (!atomic_dec_and_test(&rbio->stripes_pending))
2566 return;
2567
2568
2569
2570
2571
2572
2573 validate_rbio_for_parity_scrub(rbio);
2574}
2575
2576static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2577{
2578 int bios_to_read = 0;
2579 struct bio_list bio_list;
2580 int ret;
2581 int pagenr;
2582 int stripe;
2583 struct bio *bio;
2584
2585 bio_list_init(&bio_list);
2586
2587 ret = alloc_rbio_essential_pages(rbio);
2588 if (ret)
2589 goto cleanup;
2590
2591 atomic_set(&rbio->error, 0);
2592
2593
2594
2595
2596 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2597 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2598 struct page *page;
2599
2600
2601
2602
2603
2604
2605 page = page_in_rbio(rbio, stripe, pagenr, 1);
2606 if (page)
2607 continue;
2608
2609 page = rbio_stripe_page(rbio, stripe, pagenr);
2610
2611
2612
2613
2614 if (PageUptodate(page))
2615 continue;
2616
2617 ret = rbio_add_io_page(rbio, &bio_list, page,
2618 stripe, pagenr, rbio->stripe_len);
2619 if (ret)
2620 goto cleanup;
2621 }
2622 }
2623
2624 bios_to_read = bio_list_size(&bio_list);
2625 if (!bios_to_read) {
2626
2627
2628
2629
2630
2631
2632 goto finish;
2633 }
2634
2635
2636
2637
2638
2639 atomic_set(&rbio->stripes_pending, bios_to_read);
2640 while ((bio = bio_list_pop(&bio_list))) {
2641 bio->bi_private = rbio;
2642 bio->bi_end_io = raid56_parity_scrub_end_io;
2643 bio->bi_opf = REQ_OP_READ;
2644
2645 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2646
2647 submit_bio(bio);
2648 }
2649
2650 return;
2651
2652cleanup:
2653 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2654
2655 while ((bio = bio_list_pop(&bio_list)))
2656 bio_put(bio);
2657
2658 return;
2659
2660finish:
2661 validate_rbio_for_parity_scrub(rbio);
2662}
2663
2664static void scrub_parity_work(struct btrfs_work *work)
2665{
2666 struct btrfs_raid_bio *rbio;
2667
2668 rbio = container_of(work, struct btrfs_raid_bio, work);
2669 raid56_parity_scrub_stripe(rbio);
2670}
2671
2672void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2673{
2674 if (!lock_stripe_add(rbio))
2675 start_async_work(rbio, scrub_parity_work);
2676}
2677
2678
2679
2680struct btrfs_raid_bio *
2681raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2682 struct btrfs_bio *bbio, u64 length)
2683{
2684 struct btrfs_raid_bio *rbio;
2685
2686 rbio = alloc_rbio(fs_info, bbio, length);
2687 if (IS_ERR(rbio))
2688 return NULL;
2689
2690 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2691 bio_list_add(&rbio->bio_list, bio);
2692
2693
2694
2695
2696 ASSERT(!bio->bi_iter.bi_size);
2697
2698 rbio->faila = find_logical_bio_stripe(rbio, bio);
2699 if (rbio->faila == -1) {
2700 BUG();
2701 kfree(rbio);
2702 return NULL;
2703 }
2704
2705
2706
2707
2708
2709 rbio->generic_bio_cnt = 1;
2710
2711 return rbio;
2712}
2713
2714void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2715{
2716 if (!lock_stripe_add(rbio))
2717 start_async_work(rbio, read_rebuild_work);
2718}
2719