linux/fs/btrfs/raid56.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2012 Fusion-io  All rights reserved.
   4 * Copyright (C) 2012 Intel Corp. All rights reserved.
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/bio.h>
   9#include <linux/slab.h>
  10#include <linux/blkdev.h>
  11#include <linux/raid/pq.h>
  12#include <linux/hash.h>
  13#include <linux/list_sort.h>
  14#include <linux/raid/xor.h>
  15#include <linux/mm.h>
  16#include "ctree.h"
  17#include "disk-io.h"
  18#include "volumes.h"
  19#include "raid56.h"
  20#include "async-thread.h"
  21
  22/* set when additional merges to this rbio are not allowed */
  23#define RBIO_RMW_LOCKED_BIT     1
  24
  25/*
  26 * set when this rbio is sitting in the hash, but it is just a cache
  27 * of past RMW
  28 */
  29#define RBIO_CACHE_BIT          2
  30
  31/*
  32 * set when it is safe to trust the stripe_pages for caching
  33 */
  34#define RBIO_CACHE_READY_BIT    3
  35
  36#define RBIO_CACHE_SIZE 1024
  37
  38#define BTRFS_STRIPE_HASH_TABLE_BITS                            11
  39
  40/* Used by the raid56 code to lock stripes for read/modify/write */
  41struct btrfs_stripe_hash {
  42        struct list_head hash_list;
  43        spinlock_t lock;
  44};
  45
  46/* Used by the raid56 code to lock stripes for read/modify/write */
  47struct btrfs_stripe_hash_table {
  48        struct list_head stripe_cache;
  49        spinlock_t cache_lock;
  50        int cache_size;
  51        struct btrfs_stripe_hash table[];
  52};
  53
  54enum btrfs_rbio_ops {
  55        BTRFS_RBIO_WRITE,
  56        BTRFS_RBIO_READ_REBUILD,
  57        BTRFS_RBIO_PARITY_SCRUB,
  58        BTRFS_RBIO_REBUILD_MISSING,
  59};
  60
  61struct btrfs_raid_bio {
  62        struct btrfs_fs_info *fs_info;
  63        struct btrfs_bio *bbio;
  64
  65        /* while we're doing rmw on a stripe
  66         * we put it into a hash table so we can
  67         * lock the stripe and merge more rbios
  68         * into it.
  69         */
  70        struct list_head hash_list;
  71
  72        /*
  73         * LRU list for the stripe cache
  74         */
  75        struct list_head stripe_cache;
  76
  77        /*
  78         * for scheduling work in the helper threads
  79         */
  80        struct btrfs_work work;
  81
  82        /*
  83         * bio list and bio_list_lock are used
  84         * to add more bios into the stripe
  85         * in hopes of avoiding the full rmw
  86         */
  87        struct bio_list bio_list;
  88        spinlock_t bio_list_lock;
  89
  90        /* also protected by the bio_list_lock, the
  91         * plug list is used by the plugging code
  92         * to collect partial bios while plugged.  The
  93         * stripe locking code also uses it to hand off
  94         * the stripe lock to the next pending IO
  95         */
  96        struct list_head plug_list;
  97
  98        /*
  99         * flags that tell us if it is safe to
 100         * merge with this bio
 101         */
 102        unsigned long flags;
 103
 104        /* size of each individual stripe on disk */
 105        int stripe_len;
 106
 107        /* number of data stripes (no p/q) */
 108        int nr_data;
 109
 110        int real_stripes;
 111
 112        int stripe_npages;
 113        /*
 114         * set if we're doing a parity rebuild
 115         * for a read from higher up, which is handled
 116         * differently from a parity rebuild as part of
 117         * rmw
 118         */
 119        enum btrfs_rbio_ops operation;
 120
 121        /* first bad stripe */
 122        int faila;
 123
 124        /* second bad stripe (for raid6 use) */
 125        int failb;
 126
 127        int scrubp;
 128        /*
 129         * number of pages needed to represent the full
 130         * stripe
 131         */
 132        int nr_pages;
 133
 134        /*
 135         * size of all the bios in the bio_list.  This
 136         * helps us decide if the rbio maps to a full
 137         * stripe or not
 138         */
 139        int bio_list_bytes;
 140
 141        int generic_bio_cnt;
 142
 143        refcount_t refs;
 144
 145        atomic_t stripes_pending;
 146
 147        atomic_t error;
 148        /*
 149         * these are two arrays of pointers.  We allocate the
 150         * rbio big enough to hold them both and setup their
 151         * locations when the rbio is allocated
 152         */
 153
 154        /* pointers to pages that we allocated for
 155         * reading/writing stripes directly from the disk (including P/Q)
 156         */
 157        struct page **stripe_pages;
 158
 159        /*
 160         * pointers to the pages in the bio_list.  Stored
 161         * here for faster lookup
 162         */
 163        struct page **bio_pages;
 164
 165        /*
 166         * bitmap to record which horizontal stripe has data
 167         */
 168        unsigned long *dbitmap;
 169
 170        /* allocated with real_stripes-many pointers for finish_*() calls */
 171        void **finish_pointers;
 172
 173        /* allocated with stripe_npages-many bits for finish_*() calls */
 174        unsigned long *finish_pbitmap;
 175};
 176
 177static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
 178static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
 179static void rmw_work(struct btrfs_work *work);
 180static void read_rebuild_work(struct btrfs_work *work);
 181static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
 182static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
 183static void __free_raid_bio(struct btrfs_raid_bio *rbio);
 184static void index_rbio_pages(struct btrfs_raid_bio *rbio);
 185static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
 186
 187static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
 188                                         int need_check);
 189static void scrub_parity_work(struct btrfs_work *work);
 190
 191static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
 192{
 193        btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL);
 194        btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
 195}
 196
 197/*
 198 * the stripe hash table is used for locking, and to collect
 199 * bios in hopes of making a full stripe
 200 */
 201int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
 202{
 203        struct btrfs_stripe_hash_table *table;
 204        struct btrfs_stripe_hash_table *x;
 205        struct btrfs_stripe_hash *cur;
 206        struct btrfs_stripe_hash *h;
 207        int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
 208        int i;
 209        int table_size;
 210
 211        if (info->stripe_hash_table)
 212                return 0;
 213
 214        /*
 215         * The table is large, starting with order 4 and can go as high as
 216         * order 7 in case lock debugging is turned on.
 217         *
 218         * Try harder to allocate and fallback to vmalloc to lower the chance
 219         * of a failing mount.
 220         */
 221        table_size = sizeof(*table) + sizeof(*h) * num_entries;
 222        table = kvzalloc(table_size, GFP_KERNEL);
 223        if (!table)
 224                return -ENOMEM;
 225
 226        spin_lock_init(&table->cache_lock);
 227        INIT_LIST_HEAD(&table->stripe_cache);
 228
 229        h = table->table;
 230
 231        for (i = 0; i < num_entries; i++) {
 232                cur = h + i;
 233                INIT_LIST_HEAD(&cur->hash_list);
 234                spin_lock_init(&cur->lock);
 235        }
 236
 237        x = cmpxchg(&info->stripe_hash_table, NULL, table);
 238        if (x)
 239                kvfree(x);
 240        return 0;
 241}
 242
 243/*
 244 * caching an rbio means to copy anything from the
 245 * bio_pages array into the stripe_pages array.  We
 246 * use the page uptodate bit in the stripe cache array
 247 * to indicate if it has valid data
 248 *
 249 * once the caching is done, we set the cache ready
 250 * bit.
 251 */
 252static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
 253{
 254        int i;
 255        char *s;
 256        char *d;
 257        int ret;
 258
 259        ret = alloc_rbio_pages(rbio);
 260        if (ret)
 261                return;
 262
 263        for (i = 0; i < rbio->nr_pages; i++) {
 264                if (!rbio->bio_pages[i])
 265                        continue;
 266
 267                s = kmap(rbio->bio_pages[i]);
 268                d = kmap(rbio->stripe_pages[i]);
 269
 270                copy_page(d, s);
 271
 272                kunmap(rbio->bio_pages[i]);
 273                kunmap(rbio->stripe_pages[i]);
 274                SetPageUptodate(rbio->stripe_pages[i]);
 275        }
 276        set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
 277}
 278
 279/*
 280 * we hash on the first logical address of the stripe
 281 */
 282static int rbio_bucket(struct btrfs_raid_bio *rbio)
 283{
 284        u64 num = rbio->bbio->raid_map[0];
 285
 286        /*
 287         * we shift down quite a bit.  We're using byte
 288         * addressing, and most of the lower bits are zeros.
 289         * This tends to upset hash_64, and it consistently
 290         * returns just one or two different values.
 291         *
 292         * shifting off the lower bits fixes things.
 293         */
 294        return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
 295}
 296
 297/*
 298 * stealing an rbio means taking all the uptodate pages from the stripe
 299 * array in the source rbio and putting them into the destination rbio
 300 */
 301static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
 302{
 303        int i;
 304        struct page *s;
 305        struct page *d;
 306
 307        if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
 308                return;
 309
 310        for (i = 0; i < dest->nr_pages; i++) {
 311                s = src->stripe_pages[i];
 312                if (!s || !PageUptodate(s)) {
 313                        continue;
 314                }
 315
 316                d = dest->stripe_pages[i];
 317                if (d)
 318                        __free_page(d);
 319
 320                dest->stripe_pages[i] = s;
 321                src->stripe_pages[i] = NULL;
 322        }
 323}
 324
 325/*
 326 * merging means we take the bio_list from the victim and
 327 * splice it into the destination.  The victim should
 328 * be discarded afterwards.
 329 *
 330 * must be called with dest->rbio_list_lock held
 331 */
 332static void merge_rbio(struct btrfs_raid_bio *dest,
 333                       struct btrfs_raid_bio *victim)
 334{
 335        bio_list_merge(&dest->bio_list, &victim->bio_list);
 336        dest->bio_list_bytes += victim->bio_list_bytes;
 337        dest->generic_bio_cnt += victim->generic_bio_cnt;
 338        bio_list_init(&victim->bio_list);
 339}
 340
 341/*
 342 * used to prune items that are in the cache.  The caller
 343 * must hold the hash table lock.
 344 */
 345static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 346{
 347        int bucket = rbio_bucket(rbio);
 348        struct btrfs_stripe_hash_table *table;
 349        struct btrfs_stripe_hash *h;
 350        int freeit = 0;
 351
 352        /*
 353         * check the bit again under the hash table lock.
 354         */
 355        if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 356                return;
 357
 358        table = rbio->fs_info->stripe_hash_table;
 359        h = table->table + bucket;
 360
 361        /* hold the lock for the bucket because we may be
 362         * removing it from the hash table
 363         */
 364        spin_lock(&h->lock);
 365
 366        /*
 367         * hold the lock for the bio list because we need
 368         * to make sure the bio list is empty
 369         */
 370        spin_lock(&rbio->bio_list_lock);
 371
 372        if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 373                list_del_init(&rbio->stripe_cache);
 374                table->cache_size -= 1;
 375                freeit = 1;
 376
 377                /* if the bio list isn't empty, this rbio is
 378                 * still involved in an IO.  We take it out
 379                 * of the cache list, and drop the ref that
 380                 * was held for the list.
 381                 *
 382                 * If the bio_list was empty, we also remove
 383                 * the rbio from the hash_table, and drop
 384                 * the corresponding ref
 385                 */
 386                if (bio_list_empty(&rbio->bio_list)) {
 387                        if (!list_empty(&rbio->hash_list)) {
 388                                list_del_init(&rbio->hash_list);
 389                                refcount_dec(&rbio->refs);
 390                                BUG_ON(!list_empty(&rbio->plug_list));
 391                        }
 392                }
 393        }
 394
 395        spin_unlock(&rbio->bio_list_lock);
 396        spin_unlock(&h->lock);
 397
 398        if (freeit)
 399                __free_raid_bio(rbio);
 400}
 401
 402/*
 403 * prune a given rbio from the cache
 404 */
 405static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 406{
 407        struct btrfs_stripe_hash_table *table;
 408        unsigned long flags;
 409
 410        if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 411                return;
 412
 413        table = rbio->fs_info->stripe_hash_table;
 414
 415        spin_lock_irqsave(&table->cache_lock, flags);
 416        __remove_rbio_from_cache(rbio);
 417        spin_unlock_irqrestore(&table->cache_lock, flags);
 418}
 419
 420/*
 421 * remove everything in the cache
 422 */
 423static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
 424{
 425        struct btrfs_stripe_hash_table *table;
 426        unsigned long flags;
 427        struct btrfs_raid_bio *rbio;
 428
 429        table = info->stripe_hash_table;
 430
 431        spin_lock_irqsave(&table->cache_lock, flags);
 432        while (!list_empty(&table->stripe_cache)) {
 433                rbio = list_entry(table->stripe_cache.next,
 434                                  struct btrfs_raid_bio,
 435                                  stripe_cache);
 436                __remove_rbio_from_cache(rbio);
 437        }
 438        spin_unlock_irqrestore(&table->cache_lock, flags);
 439}
 440
 441/*
 442 * remove all cached entries and free the hash table
 443 * used by unmount
 444 */
 445void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
 446{
 447        if (!info->stripe_hash_table)
 448                return;
 449        btrfs_clear_rbio_cache(info);
 450        kvfree(info->stripe_hash_table);
 451        info->stripe_hash_table = NULL;
 452}
 453
 454/*
 455 * insert an rbio into the stripe cache.  It
 456 * must have already been prepared by calling
 457 * cache_rbio_pages
 458 *
 459 * If this rbio was already cached, it gets
 460 * moved to the front of the lru.
 461 *
 462 * If the size of the rbio cache is too big, we
 463 * prune an item.
 464 */
 465static void cache_rbio(struct btrfs_raid_bio *rbio)
 466{
 467        struct btrfs_stripe_hash_table *table;
 468        unsigned long flags;
 469
 470        if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
 471                return;
 472
 473        table = rbio->fs_info->stripe_hash_table;
 474
 475        spin_lock_irqsave(&table->cache_lock, flags);
 476        spin_lock(&rbio->bio_list_lock);
 477
 478        /* bump our ref if we were not in the list before */
 479        if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
 480                refcount_inc(&rbio->refs);
 481
 482        if (!list_empty(&rbio->stripe_cache)){
 483                list_move(&rbio->stripe_cache, &table->stripe_cache);
 484        } else {
 485                list_add(&rbio->stripe_cache, &table->stripe_cache);
 486                table->cache_size += 1;
 487        }
 488
 489        spin_unlock(&rbio->bio_list_lock);
 490
 491        if (table->cache_size > RBIO_CACHE_SIZE) {
 492                struct btrfs_raid_bio *found;
 493
 494                found = list_entry(table->stripe_cache.prev,
 495                                  struct btrfs_raid_bio,
 496                                  stripe_cache);
 497
 498                if (found != rbio)
 499                        __remove_rbio_from_cache(found);
 500        }
 501
 502        spin_unlock_irqrestore(&table->cache_lock, flags);
 503}
 504
 505/*
 506 * helper function to run the xor_blocks api.  It is only
 507 * able to do MAX_XOR_BLOCKS at a time, so we need to
 508 * loop through.
 509 */
 510static void run_xor(void **pages, int src_cnt, ssize_t len)
 511{
 512        int src_off = 0;
 513        int xor_src_cnt = 0;
 514        void *dest = pages[src_cnt];
 515
 516        while(src_cnt > 0) {
 517                xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
 518                xor_blocks(xor_src_cnt, len, dest, pages + src_off);
 519
 520                src_cnt -= xor_src_cnt;
 521                src_off += xor_src_cnt;
 522        }
 523}
 524
 525/*
 526 * Returns true if the bio list inside this rbio covers an entire stripe (no
 527 * rmw required).
 528 */
 529static int rbio_is_full(struct btrfs_raid_bio *rbio)
 530{
 531        unsigned long flags;
 532        unsigned long size = rbio->bio_list_bytes;
 533        int ret = 1;
 534
 535        spin_lock_irqsave(&rbio->bio_list_lock, flags);
 536        if (size != rbio->nr_data * rbio->stripe_len)
 537                ret = 0;
 538        BUG_ON(size > rbio->nr_data * rbio->stripe_len);
 539        spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
 540
 541        return ret;
 542}
 543
 544/*
 545 * returns 1 if it is safe to merge two rbios together.
 546 * The merging is safe if the two rbios correspond to
 547 * the same stripe and if they are both going in the same
 548 * direction (read vs write), and if neither one is
 549 * locked for final IO
 550 *
 551 * The caller is responsible for locking such that
 552 * rmw_locked is safe to test
 553 */
 554static int rbio_can_merge(struct btrfs_raid_bio *last,
 555                          struct btrfs_raid_bio *cur)
 556{
 557        if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
 558            test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
 559                return 0;
 560
 561        /*
 562         * we can't merge with cached rbios, since the
 563         * idea is that when we merge the destination
 564         * rbio is going to run our IO for us.  We can
 565         * steal from cached rbios though, other functions
 566         * handle that.
 567         */
 568        if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
 569            test_bit(RBIO_CACHE_BIT, &cur->flags))
 570                return 0;
 571
 572        if (last->bbio->raid_map[0] !=
 573            cur->bbio->raid_map[0])
 574                return 0;
 575
 576        /* we can't merge with different operations */
 577        if (last->operation != cur->operation)
 578                return 0;
 579        /*
 580         * We've need read the full stripe from the drive.
 581         * check and repair the parity and write the new results.
 582         *
 583         * We're not allowed to add any new bios to the
 584         * bio list here, anyone else that wants to
 585         * change this stripe needs to do their own rmw.
 586         */
 587        if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
 588                return 0;
 589
 590        if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
 591                return 0;
 592
 593        if (last->operation == BTRFS_RBIO_READ_REBUILD) {
 594                int fa = last->faila;
 595                int fb = last->failb;
 596                int cur_fa = cur->faila;
 597                int cur_fb = cur->failb;
 598
 599                if (last->faila >= last->failb) {
 600                        fa = last->failb;
 601                        fb = last->faila;
 602                }
 603
 604                if (cur->faila >= cur->failb) {
 605                        cur_fa = cur->failb;
 606                        cur_fb = cur->faila;
 607                }
 608
 609                if (fa != cur_fa || fb != cur_fb)
 610                        return 0;
 611        }
 612        return 1;
 613}
 614
 615static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
 616                                  int index)
 617{
 618        return stripe * rbio->stripe_npages + index;
 619}
 620
 621/*
 622 * these are just the pages from the rbio array, not from anything
 623 * the FS sent down to us
 624 */
 625static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
 626                                     int index)
 627{
 628        return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
 629}
 630
 631/*
 632 * helper to index into the pstripe
 633 */
 634static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
 635{
 636        return rbio_stripe_page(rbio, rbio->nr_data, index);
 637}
 638
 639/*
 640 * helper to index into the qstripe, returns null
 641 * if there is no qstripe
 642 */
 643static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
 644{
 645        if (rbio->nr_data + 1 == rbio->real_stripes)
 646                return NULL;
 647        return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
 648}
 649
 650/*
 651 * The first stripe in the table for a logical address
 652 * has the lock.  rbios are added in one of three ways:
 653 *
 654 * 1) Nobody has the stripe locked yet.  The rbio is given
 655 * the lock and 0 is returned.  The caller must start the IO
 656 * themselves.
 657 *
 658 * 2) Someone has the stripe locked, but we're able to merge
 659 * with the lock owner.  The rbio is freed and the IO will
 660 * start automatically along with the existing rbio.  1 is returned.
 661 *
 662 * 3) Someone has the stripe locked, but we're not able to merge.
 663 * The rbio is added to the lock owner's plug list, or merged into
 664 * an rbio already on the plug list.  When the lock owner unlocks,
 665 * the next rbio on the list is run and the IO is started automatically.
 666 * 1 is returned
 667 *
 668 * If we return 0, the caller still owns the rbio and must continue with
 669 * IO submission.  If we return 1, the caller must assume the rbio has
 670 * already been freed.
 671 */
 672static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
 673{
 674        int bucket = rbio_bucket(rbio);
 675        struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
 676        struct btrfs_raid_bio *cur;
 677        struct btrfs_raid_bio *pending;
 678        unsigned long flags;
 679        struct btrfs_raid_bio *freeit = NULL;
 680        struct btrfs_raid_bio *cache_drop = NULL;
 681        int ret = 0;
 682
 683        spin_lock_irqsave(&h->lock, flags);
 684        list_for_each_entry(cur, &h->hash_list, hash_list) {
 685                if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
 686                        spin_lock(&cur->bio_list_lock);
 687
 688                        /* can we steal this cached rbio's pages? */
 689                        if (bio_list_empty(&cur->bio_list) &&
 690                            list_empty(&cur->plug_list) &&
 691                            test_bit(RBIO_CACHE_BIT, &cur->flags) &&
 692                            !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
 693                                list_del_init(&cur->hash_list);
 694                                refcount_dec(&cur->refs);
 695
 696                                steal_rbio(cur, rbio);
 697                                cache_drop = cur;
 698                                spin_unlock(&cur->bio_list_lock);
 699
 700                                goto lockit;
 701                        }
 702
 703                        /* can we merge into the lock owner? */
 704                        if (rbio_can_merge(cur, rbio)) {
 705                                merge_rbio(cur, rbio);
 706                                spin_unlock(&cur->bio_list_lock);
 707                                freeit = rbio;
 708                                ret = 1;
 709                                goto out;
 710                        }
 711
 712
 713                        /*
 714                         * we couldn't merge with the running
 715                         * rbio, see if we can merge with the
 716                         * pending ones.  We don't have to
 717                         * check for rmw_locked because there
 718                         * is no way they are inside finish_rmw
 719                         * right now
 720                         */
 721                        list_for_each_entry(pending, &cur->plug_list,
 722                                            plug_list) {
 723                                if (rbio_can_merge(pending, rbio)) {
 724                                        merge_rbio(pending, rbio);
 725                                        spin_unlock(&cur->bio_list_lock);
 726                                        freeit = rbio;
 727                                        ret = 1;
 728                                        goto out;
 729                                }
 730                        }
 731
 732                        /* no merging, put us on the tail of the plug list,
 733                         * our rbio will be started with the currently
 734                         * running rbio unlocks
 735                         */
 736                        list_add_tail(&rbio->plug_list, &cur->plug_list);
 737                        spin_unlock(&cur->bio_list_lock);
 738                        ret = 1;
 739                        goto out;
 740                }
 741        }
 742lockit:
 743        refcount_inc(&rbio->refs);
 744        list_add(&rbio->hash_list, &h->hash_list);
 745out:
 746        spin_unlock_irqrestore(&h->lock, flags);
 747        if (cache_drop)
 748                remove_rbio_from_cache(cache_drop);
 749        if (freeit)
 750                __free_raid_bio(freeit);
 751        return ret;
 752}
 753
 754/*
 755 * called as rmw or parity rebuild is completed.  If the plug list has more
 756 * rbios waiting for this stripe, the next one on the list will be started
 757 */
 758static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
 759{
 760        int bucket;
 761        struct btrfs_stripe_hash *h;
 762        unsigned long flags;
 763        int keep_cache = 0;
 764
 765        bucket = rbio_bucket(rbio);
 766        h = rbio->fs_info->stripe_hash_table->table + bucket;
 767
 768        if (list_empty(&rbio->plug_list))
 769                cache_rbio(rbio);
 770
 771        spin_lock_irqsave(&h->lock, flags);
 772        spin_lock(&rbio->bio_list_lock);
 773
 774        if (!list_empty(&rbio->hash_list)) {
 775                /*
 776                 * if we're still cached and there is no other IO
 777                 * to perform, just leave this rbio here for others
 778                 * to steal from later
 779                 */
 780                if (list_empty(&rbio->plug_list) &&
 781                    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 782                        keep_cache = 1;
 783                        clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
 784                        BUG_ON(!bio_list_empty(&rbio->bio_list));
 785                        goto done;
 786                }
 787
 788                list_del_init(&rbio->hash_list);
 789                refcount_dec(&rbio->refs);
 790
 791                /*
 792                 * we use the plug list to hold all the rbios
 793                 * waiting for the chance to lock this stripe.
 794                 * hand the lock over to one of them.
 795                 */
 796                if (!list_empty(&rbio->plug_list)) {
 797                        struct btrfs_raid_bio *next;
 798                        struct list_head *head = rbio->plug_list.next;
 799
 800                        next = list_entry(head, struct btrfs_raid_bio,
 801                                          plug_list);
 802
 803                        list_del_init(&rbio->plug_list);
 804
 805                        list_add(&next->hash_list, &h->hash_list);
 806                        refcount_inc(&next->refs);
 807                        spin_unlock(&rbio->bio_list_lock);
 808                        spin_unlock_irqrestore(&h->lock, flags);
 809
 810                        if (next->operation == BTRFS_RBIO_READ_REBUILD)
 811                                start_async_work(next, read_rebuild_work);
 812                        else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
 813                                steal_rbio(rbio, next);
 814                                start_async_work(next, read_rebuild_work);
 815                        } else if (next->operation == BTRFS_RBIO_WRITE) {
 816                                steal_rbio(rbio, next);
 817                                start_async_work(next, rmw_work);
 818                        } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
 819                                steal_rbio(rbio, next);
 820                                start_async_work(next, scrub_parity_work);
 821                        }
 822
 823                        goto done_nolock;
 824                }
 825        }
 826done:
 827        spin_unlock(&rbio->bio_list_lock);
 828        spin_unlock_irqrestore(&h->lock, flags);
 829
 830done_nolock:
 831        if (!keep_cache)
 832                remove_rbio_from_cache(rbio);
 833}
 834
 835static void __free_raid_bio(struct btrfs_raid_bio *rbio)
 836{
 837        int i;
 838
 839        if (!refcount_dec_and_test(&rbio->refs))
 840                return;
 841
 842        WARN_ON(!list_empty(&rbio->stripe_cache));
 843        WARN_ON(!list_empty(&rbio->hash_list));
 844        WARN_ON(!bio_list_empty(&rbio->bio_list));
 845
 846        for (i = 0; i < rbio->nr_pages; i++) {
 847                if (rbio->stripe_pages[i]) {
 848                        __free_page(rbio->stripe_pages[i]);
 849                        rbio->stripe_pages[i] = NULL;
 850                }
 851        }
 852
 853        btrfs_put_bbio(rbio->bbio);
 854        kfree(rbio);
 855}
 856
 857static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
 858{
 859        struct bio *next;
 860
 861        while (cur) {
 862                next = cur->bi_next;
 863                cur->bi_next = NULL;
 864                cur->bi_status = err;
 865                bio_endio(cur);
 866                cur = next;
 867        }
 868}
 869
 870/*
 871 * this frees the rbio and runs through all the bios in the
 872 * bio_list and calls end_io on them
 873 */
 874static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
 875{
 876        struct bio *cur = bio_list_get(&rbio->bio_list);
 877        struct bio *extra;
 878
 879        if (rbio->generic_bio_cnt)
 880                btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
 881
 882        /*
 883         * At this moment, rbio->bio_list is empty, however since rbio does not
 884         * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
 885         * hash list, rbio may be merged with others so that rbio->bio_list
 886         * becomes non-empty.
 887         * Once unlock_stripe() is done, rbio->bio_list will not be updated any
 888         * more and we can call bio_endio() on all queued bios.
 889         */
 890        unlock_stripe(rbio);
 891        extra = bio_list_get(&rbio->bio_list);
 892        __free_raid_bio(rbio);
 893
 894        rbio_endio_bio_list(cur, err);
 895        if (extra)
 896                rbio_endio_bio_list(extra, err);
 897}
 898
 899/*
 900 * end io function used by finish_rmw.  When we finally
 901 * get here, we've written a full stripe
 902 */
 903static void raid_write_end_io(struct bio *bio)
 904{
 905        struct btrfs_raid_bio *rbio = bio->bi_private;
 906        blk_status_t err = bio->bi_status;
 907        int max_errors;
 908
 909        if (err)
 910                fail_bio_stripe(rbio, bio);
 911
 912        bio_put(bio);
 913
 914        if (!atomic_dec_and_test(&rbio->stripes_pending))
 915                return;
 916
 917        err = BLK_STS_OK;
 918
 919        /* OK, we have read all the stripes we need to. */
 920        max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
 921                     0 : rbio->bbio->max_errors;
 922        if (atomic_read(&rbio->error) > max_errors)
 923                err = BLK_STS_IOERR;
 924
 925        rbio_orig_end_io(rbio, err);
 926}
 927
 928/*
 929 * the read/modify/write code wants to use the original bio for
 930 * any pages it included, and then use the rbio for everything
 931 * else.  This function decides if a given index (stripe number)
 932 * and page number in that stripe fall inside the original bio
 933 * or the rbio.
 934 *
 935 * if you set bio_list_only, you'll get a NULL back for any ranges
 936 * that are outside the bio_list
 937 *
 938 * This doesn't take any refs on anything, you get a bare page pointer
 939 * and the caller must bump refs as required.
 940 *
 941 * You must call index_rbio_pages once before you can trust
 942 * the answers from this function.
 943 */
 944static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
 945                                 int index, int pagenr, int bio_list_only)
 946{
 947        int chunk_page;
 948        struct page *p = NULL;
 949
 950        chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
 951
 952        spin_lock_irq(&rbio->bio_list_lock);
 953        p = rbio->bio_pages[chunk_page];
 954        spin_unlock_irq(&rbio->bio_list_lock);
 955
 956        if (p || bio_list_only)
 957                return p;
 958
 959        return rbio->stripe_pages[chunk_page];
 960}
 961
 962/*
 963 * number of pages we need for the entire stripe across all the
 964 * drives
 965 */
 966static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
 967{
 968        return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
 969}
 970
 971/*
 972 * allocation and initial setup for the btrfs_raid_bio.  Not
 973 * this does not allocate any pages for rbio->pages.
 974 */
 975static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
 976                                         struct btrfs_bio *bbio,
 977                                         u64 stripe_len)
 978{
 979        struct btrfs_raid_bio *rbio;
 980        int nr_data = 0;
 981        int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
 982        int num_pages = rbio_nr_pages(stripe_len, real_stripes);
 983        int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
 984        void *p;
 985
 986        rbio = kzalloc(sizeof(*rbio) +
 987                       sizeof(*rbio->stripe_pages) * num_pages +
 988                       sizeof(*rbio->bio_pages) * num_pages +
 989                       sizeof(*rbio->finish_pointers) * real_stripes +
 990                       sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
 991                       sizeof(*rbio->finish_pbitmap) *
 992                                BITS_TO_LONGS(stripe_npages),
 993                       GFP_NOFS);
 994        if (!rbio)
 995                return ERR_PTR(-ENOMEM);
 996
 997        bio_list_init(&rbio->bio_list);
 998        INIT_LIST_HEAD(&rbio->plug_list);
 999        spin_lock_init(&rbio->bio_list_lock);
1000        INIT_LIST_HEAD(&rbio->stripe_cache);
1001        INIT_LIST_HEAD(&rbio->hash_list);
1002        rbio->bbio = bbio;
1003        rbio->fs_info = fs_info;
1004        rbio->stripe_len = stripe_len;
1005        rbio->nr_pages = num_pages;
1006        rbio->real_stripes = real_stripes;
1007        rbio->stripe_npages = stripe_npages;
1008        rbio->faila = -1;
1009        rbio->failb = -1;
1010        refcount_set(&rbio->refs, 1);
1011        atomic_set(&rbio->error, 0);
1012        atomic_set(&rbio->stripes_pending, 0);
1013
1014        /*
1015         * the stripe_pages, bio_pages, etc arrays point to the extra
1016         * memory we allocated past the end of the rbio
1017         */
1018        p = rbio + 1;
1019#define CONSUME_ALLOC(ptr, count)       do {                            \
1020                ptr = p;                                                \
1021                p = (unsigned char *)p + sizeof(*(ptr)) * (count);      \
1022        } while (0)
1023        CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1024        CONSUME_ALLOC(rbio->bio_pages, num_pages);
1025        CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1026        CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1027        CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1028#undef  CONSUME_ALLOC
1029
1030        if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1031                nr_data = real_stripes - 1;
1032        else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1033                nr_data = real_stripes - 2;
1034        else
1035                BUG();
1036
1037        rbio->nr_data = nr_data;
1038        return rbio;
1039}
1040
1041/* allocate pages for all the stripes in the bio, including parity */
1042static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1043{
1044        int i;
1045        struct page *page;
1046
1047        for (i = 0; i < rbio->nr_pages; i++) {
1048                if (rbio->stripe_pages[i])
1049                        continue;
1050                page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1051                if (!page)
1052                        return -ENOMEM;
1053                rbio->stripe_pages[i] = page;
1054        }
1055        return 0;
1056}
1057
1058/* only allocate pages for p/q stripes */
1059static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1060{
1061        int i;
1062        struct page *page;
1063
1064        i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1065
1066        for (; i < rbio->nr_pages; i++) {
1067                if (rbio->stripe_pages[i])
1068                        continue;
1069                page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1070                if (!page)
1071                        return -ENOMEM;
1072                rbio->stripe_pages[i] = page;
1073        }
1074        return 0;
1075}
1076
1077/*
1078 * add a single page from a specific stripe into our list of bios for IO
1079 * this will try to merge into existing bios if possible, and returns
1080 * zero if all went well.
1081 */
1082static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1083                            struct bio_list *bio_list,
1084                            struct page *page,
1085                            int stripe_nr,
1086                            unsigned long page_index,
1087                            unsigned long bio_max_len)
1088{
1089        struct bio *last = bio_list->tail;
1090        u64 last_end = 0;
1091        int ret;
1092        struct bio *bio;
1093        struct btrfs_bio_stripe *stripe;
1094        u64 disk_start;
1095
1096        stripe = &rbio->bbio->stripes[stripe_nr];
1097        disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1098
1099        /* if the device is missing, just fail this stripe */
1100        if (!stripe->dev->bdev)
1101                return fail_rbio_index(rbio, stripe_nr);
1102
1103        /* see if we can add this page onto our existing bio */
1104        if (last) {
1105                last_end = (u64)last->bi_iter.bi_sector << 9;
1106                last_end += last->bi_iter.bi_size;
1107
1108                /*
1109                 * we can't merge these if they are from different
1110                 * devices or if they are not contiguous
1111                 */
1112                if (last_end == disk_start && stripe->dev->bdev &&
1113                    !last->bi_status &&
1114                    last->bi_disk == stripe->dev->bdev->bd_disk &&
1115                    last->bi_partno == stripe->dev->bdev->bd_partno) {
1116                        ret = bio_add_page(last, page, PAGE_SIZE, 0);
1117                        if (ret == PAGE_SIZE)
1118                                return 0;
1119                }
1120        }
1121
1122        /* put a new bio on the list */
1123        bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1124        bio->bi_iter.bi_size = 0;
1125        bio_set_dev(bio, stripe->dev->bdev);
1126        bio->bi_iter.bi_sector = disk_start >> 9;
1127
1128        bio_add_page(bio, page, PAGE_SIZE, 0);
1129        bio_list_add(bio_list, bio);
1130        return 0;
1131}
1132
1133/*
1134 * while we're doing the read/modify/write cycle, we could
1135 * have errors in reading pages off the disk.  This checks
1136 * for errors and if we're not able to read the page it'll
1137 * trigger parity reconstruction.  The rmw will be finished
1138 * after we've reconstructed the failed stripes
1139 */
1140static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1141{
1142        if (rbio->faila >= 0 || rbio->failb >= 0) {
1143                BUG_ON(rbio->faila == rbio->real_stripes - 1);
1144                __raid56_parity_recover(rbio);
1145        } else {
1146                finish_rmw(rbio);
1147        }
1148}
1149
1150/*
1151 * helper function to walk our bio list and populate the bio_pages array with
1152 * the result.  This seems expensive, but it is faster than constantly
1153 * searching through the bio list as we setup the IO in finish_rmw or stripe
1154 * reconstruction.
1155 *
1156 * This must be called before you trust the answers from page_in_rbio
1157 */
1158static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1159{
1160        struct bio *bio;
1161        u64 start;
1162        unsigned long stripe_offset;
1163        unsigned long page_index;
1164
1165        spin_lock_irq(&rbio->bio_list_lock);
1166        bio_list_for_each(bio, &rbio->bio_list) {
1167                struct bio_vec bvec;
1168                struct bvec_iter iter;
1169                int i = 0;
1170
1171                start = (u64)bio->bi_iter.bi_sector << 9;
1172                stripe_offset = start - rbio->bbio->raid_map[0];
1173                page_index = stripe_offset >> PAGE_SHIFT;
1174
1175                if (bio_flagged(bio, BIO_CLONED))
1176                        bio->bi_iter = btrfs_io_bio(bio)->iter;
1177
1178                bio_for_each_segment(bvec, bio, iter) {
1179                        rbio->bio_pages[page_index + i] = bvec.bv_page;
1180                        i++;
1181                }
1182        }
1183        spin_unlock_irq(&rbio->bio_list_lock);
1184}
1185
1186/*
1187 * this is called from one of two situations.  We either
1188 * have a full stripe from the higher layers, or we've read all
1189 * the missing bits off disk.
1190 *
1191 * This will calculate the parity and then send down any
1192 * changed blocks.
1193 */
1194static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1195{
1196        struct btrfs_bio *bbio = rbio->bbio;
1197        void **pointers = rbio->finish_pointers;
1198        int nr_data = rbio->nr_data;
1199        int stripe;
1200        int pagenr;
1201        int p_stripe = -1;
1202        int q_stripe = -1;
1203        struct bio_list bio_list;
1204        struct bio *bio;
1205        int ret;
1206
1207        bio_list_init(&bio_list);
1208
1209        if (rbio->real_stripes - rbio->nr_data == 1) {
1210                p_stripe = rbio->real_stripes - 1;
1211        } else if (rbio->real_stripes - rbio->nr_data == 2) {
1212                p_stripe = rbio->real_stripes - 2;
1213                q_stripe = rbio->real_stripes - 1;
1214        } else {
1215                BUG();
1216        }
1217
1218        /* at this point we either have a full stripe,
1219         * or we've read the full stripe from the drive.
1220         * recalculate the parity and write the new results.
1221         *
1222         * We're not allowed to add any new bios to the
1223         * bio list here, anyone else that wants to
1224         * change this stripe needs to do their own rmw.
1225         */
1226        spin_lock_irq(&rbio->bio_list_lock);
1227        set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1228        spin_unlock_irq(&rbio->bio_list_lock);
1229
1230        atomic_set(&rbio->error, 0);
1231
1232        /*
1233         * now that we've set rmw_locked, run through the
1234         * bio list one last time and map the page pointers
1235         *
1236         * We don't cache full rbios because we're assuming
1237         * the higher layers are unlikely to use this area of
1238         * the disk again soon.  If they do use it again,
1239         * hopefully they will send another full bio.
1240         */
1241        index_rbio_pages(rbio);
1242        if (!rbio_is_full(rbio))
1243                cache_rbio_pages(rbio);
1244        else
1245                clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1246
1247        for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1248                struct page *p;
1249                /* first collect one page from each data stripe */
1250                for (stripe = 0; stripe < nr_data; stripe++) {
1251                        p = page_in_rbio(rbio, stripe, pagenr, 0);
1252                        pointers[stripe] = kmap(p);
1253                }
1254
1255                /* then add the parity stripe */
1256                p = rbio_pstripe_page(rbio, pagenr);
1257                SetPageUptodate(p);
1258                pointers[stripe++] = kmap(p);
1259
1260                if (q_stripe != -1) {
1261
1262                        /*
1263                         * raid6, add the qstripe and call the
1264                         * library function to fill in our p/q
1265                         */
1266                        p = rbio_qstripe_page(rbio, pagenr);
1267                        SetPageUptodate(p);
1268                        pointers[stripe++] = kmap(p);
1269
1270                        raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1271                                                pointers);
1272                } else {
1273                        /* raid5 */
1274                        copy_page(pointers[nr_data], pointers[0]);
1275                        run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1276                }
1277
1278
1279                for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1280                        kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1281        }
1282
1283        /*
1284         * time to start writing.  Make bios for everything from the
1285         * higher layers (the bio_list in our rbio) and our p/q.  Ignore
1286         * everything else.
1287         */
1288        for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1289                for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1290                        struct page *page;
1291                        if (stripe < rbio->nr_data) {
1292                                page = page_in_rbio(rbio, stripe, pagenr, 1);
1293                                if (!page)
1294                                        continue;
1295                        } else {
1296                               page = rbio_stripe_page(rbio, stripe, pagenr);
1297                        }
1298
1299                        ret = rbio_add_io_page(rbio, &bio_list,
1300                                       page, stripe, pagenr, rbio->stripe_len);
1301                        if (ret)
1302                                goto cleanup;
1303                }
1304        }
1305
1306        if (likely(!bbio->num_tgtdevs))
1307                goto write_data;
1308
1309        for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1310                if (!bbio->tgtdev_map[stripe])
1311                        continue;
1312
1313                for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1314                        struct page *page;
1315                        if (stripe < rbio->nr_data) {
1316                                page = page_in_rbio(rbio, stripe, pagenr, 1);
1317                                if (!page)
1318                                        continue;
1319                        } else {
1320                               page = rbio_stripe_page(rbio, stripe, pagenr);
1321                        }
1322
1323                        ret = rbio_add_io_page(rbio, &bio_list, page,
1324                                               rbio->bbio->tgtdev_map[stripe],
1325                                               pagenr, rbio->stripe_len);
1326                        if (ret)
1327                                goto cleanup;
1328                }
1329        }
1330
1331write_data:
1332        atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1333        BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1334
1335        while (1) {
1336                bio = bio_list_pop(&bio_list);
1337                if (!bio)
1338                        break;
1339
1340                bio->bi_private = rbio;
1341                bio->bi_end_io = raid_write_end_io;
1342                bio->bi_opf = REQ_OP_WRITE;
1343
1344                submit_bio(bio);
1345        }
1346        return;
1347
1348cleanup:
1349        rbio_orig_end_io(rbio, BLK_STS_IOERR);
1350
1351        while ((bio = bio_list_pop(&bio_list)))
1352                bio_put(bio);
1353}
1354
1355/*
1356 * helper to find the stripe number for a given bio.  Used to figure out which
1357 * stripe has failed.  This expects the bio to correspond to a physical disk,
1358 * so it looks up based on physical sector numbers.
1359 */
1360static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1361                           struct bio *bio)
1362{
1363        u64 physical = bio->bi_iter.bi_sector;
1364        u64 stripe_start;
1365        int i;
1366        struct btrfs_bio_stripe *stripe;
1367
1368        physical <<= 9;
1369
1370        for (i = 0; i < rbio->bbio->num_stripes; i++) {
1371                stripe = &rbio->bbio->stripes[i];
1372                stripe_start = stripe->physical;
1373                if (physical >= stripe_start &&
1374                    physical < stripe_start + rbio->stripe_len &&
1375                    stripe->dev->bdev &&
1376                    bio->bi_disk == stripe->dev->bdev->bd_disk &&
1377                    bio->bi_partno == stripe->dev->bdev->bd_partno) {
1378                        return i;
1379                }
1380        }
1381        return -1;
1382}
1383
1384/*
1385 * helper to find the stripe number for a given
1386 * bio (before mapping).  Used to figure out which stripe has
1387 * failed.  This looks up based on logical block numbers.
1388 */
1389static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1390                                   struct bio *bio)
1391{
1392        u64 logical = bio->bi_iter.bi_sector;
1393        u64 stripe_start;
1394        int i;
1395
1396        logical <<= 9;
1397
1398        for (i = 0; i < rbio->nr_data; i++) {
1399                stripe_start = rbio->bbio->raid_map[i];
1400                if (logical >= stripe_start &&
1401                    logical < stripe_start + rbio->stripe_len) {
1402                        return i;
1403                }
1404        }
1405        return -1;
1406}
1407
1408/*
1409 * returns -EIO if we had too many failures
1410 */
1411static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1412{
1413        unsigned long flags;
1414        int ret = 0;
1415
1416        spin_lock_irqsave(&rbio->bio_list_lock, flags);
1417
1418        /* we already know this stripe is bad, move on */
1419        if (rbio->faila == failed || rbio->failb == failed)
1420                goto out;
1421
1422        if (rbio->faila == -1) {
1423                /* first failure on this rbio */
1424                rbio->faila = failed;
1425                atomic_inc(&rbio->error);
1426        } else if (rbio->failb == -1) {
1427                /* second failure on this rbio */
1428                rbio->failb = failed;
1429                atomic_inc(&rbio->error);
1430        } else {
1431                ret = -EIO;
1432        }
1433out:
1434        spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1435
1436        return ret;
1437}
1438
1439/*
1440 * helper to fail a stripe based on a physical disk
1441 * bio.
1442 */
1443static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1444                           struct bio *bio)
1445{
1446        int failed = find_bio_stripe(rbio, bio);
1447
1448        if (failed < 0)
1449                return -EIO;
1450
1451        return fail_rbio_index(rbio, failed);
1452}
1453
1454/*
1455 * this sets each page in the bio uptodate.  It should only be used on private
1456 * rbio pages, nothing that comes in from the higher layers
1457 */
1458static void set_bio_pages_uptodate(struct bio *bio)
1459{
1460        struct bio_vec *bvec;
1461        struct bvec_iter_all iter_all;
1462
1463        ASSERT(!bio_flagged(bio, BIO_CLONED));
1464
1465        bio_for_each_segment_all(bvec, bio, iter_all)
1466                SetPageUptodate(bvec->bv_page);
1467}
1468
1469/*
1470 * end io for the read phase of the rmw cycle.  All the bios here are physical
1471 * stripe bios we've read from the disk so we can recalculate the parity of the
1472 * stripe.
1473 *
1474 * This will usually kick off finish_rmw once all the bios are read in, but it
1475 * may trigger parity reconstruction if we had any errors along the way
1476 */
1477static void raid_rmw_end_io(struct bio *bio)
1478{
1479        struct btrfs_raid_bio *rbio = bio->bi_private;
1480
1481        if (bio->bi_status)
1482                fail_bio_stripe(rbio, bio);
1483        else
1484                set_bio_pages_uptodate(bio);
1485
1486        bio_put(bio);
1487
1488        if (!atomic_dec_and_test(&rbio->stripes_pending))
1489                return;
1490
1491        if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1492                goto cleanup;
1493
1494        /*
1495         * this will normally call finish_rmw to start our write
1496         * but if there are any failed stripes we'll reconstruct
1497         * from parity first
1498         */
1499        validate_rbio_for_rmw(rbio);
1500        return;
1501
1502cleanup:
1503
1504        rbio_orig_end_io(rbio, BLK_STS_IOERR);
1505}
1506
1507/*
1508 * the stripe must be locked by the caller.  It will
1509 * unlock after all the writes are done
1510 */
1511static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1512{
1513        int bios_to_read = 0;
1514        struct bio_list bio_list;
1515        int ret;
1516        int pagenr;
1517        int stripe;
1518        struct bio *bio;
1519
1520        bio_list_init(&bio_list);
1521
1522        ret = alloc_rbio_pages(rbio);
1523        if (ret)
1524                goto cleanup;
1525
1526        index_rbio_pages(rbio);
1527
1528        atomic_set(&rbio->error, 0);
1529        /*
1530         * build a list of bios to read all the missing parts of this
1531         * stripe
1532         */
1533        for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1534                for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1535                        struct page *page;
1536                        /*
1537                         * we want to find all the pages missing from
1538                         * the rbio and read them from the disk.  If
1539                         * page_in_rbio finds a page in the bio list
1540                         * we don't need to read it off the stripe.
1541                         */
1542                        page = page_in_rbio(rbio, stripe, pagenr, 1);
1543                        if (page)
1544                                continue;
1545
1546                        page = rbio_stripe_page(rbio, stripe, pagenr);
1547                        /*
1548                         * the bio cache may have handed us an uptodate
1549                         * page.  If so, be happy and use it
1550                         */
1551                        if (PageUptodate(page))
1552                                continue;
1553
1554                        ret = rbio_add_io_page(rbio, &bio_list, page,
1555                                       stripe, pagenr, rbio->stripe_len);
1556                        if (ret)
1557                                goto cleanup;
1558                }
1559        }
1560
1561        bios_to_read = bio_list_size(&bio_list);
1562        if (!bios_to_read) {
1563                /*
1564                 * this can happen if others have merged with
1565                 * us, it means there is nothing left to read.
1566                 * But if there are missing devices it may not be
1567                 * safe to do the full stripe write yet.
1568                 */
1569                goto finish;
1570        }
1571
1572        /*
1573         * the bbio may be freed once we submit the last bio.  Make sure
1574         * not to touch it after that
1575         */
1576        atomic_set(&rbio->stripes_pending, bios_to_read);
1577        while (1) {
1578                bio = bio_list_pop(&bio_list);
1579                if (!bio)
1580                        break;
1581
1582                bio->bi_private = rbio;
1583                bio->bi_end_io = raid_rmw_end_io;
1584                bio->bi_opf = REQ_OP_READ;
1585
1586                btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1587
1588                submit_bio(bio);
1589        }
1590        /* the actual write will happen once the reads are done */
1591        return 0;
1592
1593cleanup:
1594        rbio_orig_end_io(rbio, BLK_STS_IOERR);
1595
1596        while ((bio = bio_list_pop(&bio_list)))
1597                bio_put(bio);
1598
1599        return -EIO;
1600
1601finish:
1602        validate_rbio_for_rmw(rbio);
1603        return 0;
1604}
1605
1606/*
1607 * if the upper layers pass in a full stripe, we thank them by only allocating
1608 * enough pages to hold the parity, and sending it all down quickly.
1609 */
1610static int full_stripe_write(struct btrfs_raid_bio *rbio)
1611{
1612        int ret;
1613
1614        ret = alloc_rbio_parity_pages(rbio);
1615        if (ret) {
1616                __free_raid_bio(rbio);
1617                return ret;
1618        }
1619
1620        ret = lock_stripe_add(rbio);
1621        if (ret == 0)
1622                finish_rmw(rbio);
1623        return 0;
1624}
1625
1626/*
1627 * partial stripe writes get handed over to async helpers.
1628 * We're really hoping to merge a few more writes into this
1629 * rbio before calculating new parity
1630 */
1631static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1632{
1633        int ret;
1634
1635        ret = lock_stripe_add(rbio);
1636        if (ret == 0)
1637                start_async_work(rbio, rmw_work);
1638        return 0;
1639}
1640
1641/*
1642 * sometimes while we were reading from the drive to
1643 * recalculate parity, enough new bios come into create
1644 * a full stripe.  So we do a check here to see if we can
1645 * go directly to finish_rmw
1646 */
1647static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1648{
1649        /* head off into rmw land if we don't have a full stripe */
1650        if (!rbio_is_full(rbio))
1651                return partial_stripe_write(rbio);
1652        return full_stripe_write(rbio);
1653}
1654
1655/*
1656 * We use plugging call backs to collect full stripes.
1657 * Any time we get a partial stripe write while plugged
1658 * we collect it into a list.  When the unplug comes down,
1659 * we sort the list by logical block number and merge
1660 * everything we can into the same rbios
1661 */
1662struct btrfs_plug_cb {
1663        struct blk_plug_cb cb;
1664        struct btrfs_fs_info *info;
1665        struct list_head rbio_list;
1666        struct btrfs_work work;
1667};
1668
1669/*
1670 * rbios on the plug list are sorted for easier merging.
1671 */
1672static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1673{
1674        struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1675                                                 plug_list);
1676        struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1677                                                 plug_list);
1678        u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1679        u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1680
1681        if (a_sector < b_sector)
1682                return -1;
1683        if (a_sector > b_sector)
1684                return 1;
1685        return 0;
1686}
1687
1688static void run_plug(struct btrfs_plug_cb *plug)
1689{
1690        struct btrfs_raid_bio *cur;
1691        struct btrfs_raid_bio *last = NULL;
1692
1693        /*
1694         * sort our plug list then try to merge
1695         * everything we can in hopes of creating full
1696         * stripes.
1697         */
1698        list_sort(NULL, &plug->rbio_list, plug_cmp);
1699        while (!list_empty(&plug->rbio_list)) {
1700                cur = list_entry(plug->rbio_list.next,
1701                                 struct btrfs_raid_bio, plug_list);
1702                list_del_init(&cur->plug_list);
1703
1704                if (rbio_is_full(cur)) {
1705                        int ret;
1706
1707                        /* we have a full stripe, send it down */
1708                        ret = full_stripe_write(cur);
1709                        BUG_ON(ret);
1710                        continue;
1711                }
1712                if (last) {
1713                        if (rbio_can_merge(last, cur)) {
1714                                merge_rbio(last, cur);
1715                                __free_raid_bio(cur);
1716                                continue;
1717
1718                        }
1719                        __raid56_parity_write(last);
1720                }
1721                last = cur;
1722        }
1723        if (last) {
1724                __raid56_parity_write(last);
1725        }
1726        kfree(plug);
1727}
1728
1729/*
1730 * if the unplug comes from schedule, we have to push the
1731 * work off to a helper thread
1732 */
1733static void unplug_work(struct btrfs_work *work)
1734{
1735        struct btrfs_plug_cb *plug;
1736        plug = container_of(work, struct btrfs_plug_cb, work);
1737        run_plug(plug);
1738}
1739
1740static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1741{
1742        struct btrfs_plug_cb *plug;
1743        plug = container_of(cb, struct btrfs_plug_cb, cb);
1744
1745        if (from_schedule) {
1746                btrfs_init_work(&plug->work, btrfs_rmw_helper,
1747                                unplug_work, NULL, NULL);
1748                btrfs_queue_work(plug->info->rmw_workers,
1749                                 &plug->work);
1750                return;
1751        }
1752        run_plug(plug);
1753}
1754
1755/*
1756 * our main entry point for writes from the rest of the FS.
1757 */
1758int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1759                        struct btrfs_bio *bbio, u64 stripe_len)
1760{
1761        struct btrfs_raid_bio *rbio;
1762        struct btrfs_plug_cb *plug = NULL;
1763        struct blk_plug_cb *cb;
1764        int ret;
1765
1766        rbio = alloc_rbio(fs_info, bbio, stripe_len);
1767        if (IS_ERR(rbio)) {
1768                btrfs_put_bbio(bbio);
1769                return PTR_ERR(rbio);
1770        }
1771        bio_list_add(&rbio->bio_list, bio);
1772        rbio->bio_list_bytes = bio->bi_iter.bi_size;
1773        rbio->operation = BTRFS_RBIO_WRITE;
1774
1775        btrfs_bio_counter_inc_noblocked(fs_info);
1776        rbio->generic_bio_cnt = 1;
1777
1778        /*
1779         * don't plug on full rbios, just get them out the door
1780         * as quickly as we can
1781         */
1782        if (rbio_is_full(rbio)) {
1783                ret = full_stripe_write(rbio);
1784                if (ret)
1785                        btrfs_bio_counter_dec(fs_info);
1786                return ret;
1787        }
1788
1789        cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1790        if (cb) {
1791                plug = container_of(cb, struct btrfs_plug_cb, cb);
1792                if (!plug->info) {
1793                        plug->info = fs_info;
1794                        INIT_LIST_HEAD(&plug->rbio_list);
1795                }
1796                list_add_tail(&rbio->plug_list, &plug->rbio_list);
1797                ret = 0;
1798        } else {
1799                ret = __raid56_parity_write(rbio);
1800                if (ret)
1801                        btrfs_bio_counter_dec(fs_info);
1802        }
1803        return ret;
1804}
1805
1806/*
1807 * all parity reconstruction happens here.  We've read in everything
1808 * we can find from the drives and this does the heavy lifting of
1809 * sorting the good from the bad.
1810 */
1811static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1812{
1813        int pagenr, stripe;
1814        void **pointers;
1815        int faila = -1, failb = -1;
1816        struct page *page;
1817        blk_status_t err;
1818        int i;
1819
1820        pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1821        if (!pointers) {
1822                err = BLK_STS_RESOURCE;
1823                goto cleanup_io;
1824        }
1825
1826        faila = rbio->faila;
1827        failb = rbio->failb;
1828
1829        if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1830            rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1831                spin_lock_irq(&rbio->bio_list_lock);
1832                set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1833                spin_unlock_irq(&rbio->bio_list_lock);
1834        }
1835
1836        index_rbio_pages(rbio);
1837
1838        for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1839                /*
1840                 * Now we just use bitmap to mark the horizontal stripes in
1841                 * which we have data when doing parity scrub.
1842                 */
1843                if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1844                    !test_bit(pagenr, rbio->dbitmap))
1845                        continue;
1846
1847                /* setup our array of pointers with pages
1848                 * from each stripe
1849                 */
1850                for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1851                        /*
1852                         * if we're rebuilding a read, we have to use
1853                         * pages from the bio list
1854                         */
1855                        if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1856                             rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1857                            (stripe == faila || stripe == failb)) {
1858                                page = page_in_rbio(rbio, stripe, pagenr, 0);
1859                        } else {
1860                                page = rbio_stripe_page(rbio, stripe, pagenr);
1861                        }
1862                        pointers[stripe] = kmap(page);
1863                }
1864
1865                /* all raid6 handling here */
1866                if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1867                        /*
1868                         * single failure, rebuild from parity raid5
1869                         * style
1870                         */
1871                        if (failb < 0) {
1872                                if (faila == rbio->nr_data) {
1873                                        /*
1874                                         * Just the P stripe has failed, without
1875                                         * a bad data or Q stripe.
1876                                         * TODO, we should redo the xor here.
1877                                         */
1878                                        err = BLK_STS_IOERR;
1879                                        goto cleanup;
1880                                }
1881                                /*
1882                                 * a single failure in raid6 is rebuilt
1883                                 * in the pstripe code below
1884                                 */
1885                                goto pstripe;
1886                        }
1887
1888                        /* make sure our ps and qs are in order */
1889                        if (faila > failb) {
1890                                int tmp = failb;
1891                                failb = faila;
1892                                faila = tmp;
1893                        }
1894
1895                        /* if the q stripe is failed, do a pstripe reconstruction
1896                         * from the xors.
1897                         * If both the q stripe and the P stripe are failed, we're
1898                         * here due to a crc mismatch and we can't give them the
1899                         * data they want
1900                         */
1901                        if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1902                                if (rbio->bbio->raid_map[faila] ==
1903                                    RAID5_P_STRIPE) {
1904                                        err = BLK_STS_IOERR;
1905                                        goto cleanup;
1906                                }
1907                                /*
1908                                 * otherwise we have one bad data stripe and
1909                                 * a good P stripe.  raid5!
1910                                 */
1911                                goto pstripe;
1912                        }
1913
1914                        if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1915                                raid6_datap_recov(rbio->real_stripes,
1916                                                  PAGE_SIZE, faila, pointers);
1917                        } else {
1918                                raid6_2data_recov(rbio->real_stripes,
1919                                                  PAGE_SIZE, faila, failb,
1920                                                  pointers);
1921                        }
1922                } else {
1923                        void *p;
1924
1925                        /* rebuild from P stripe here (raid5 or raid6) */
1926                        BUG_ON(failb != -1);
1927pstripe:
1928                        /* Copy parity block into failed block to start with */
1929                        copy_page(pointers[faila], pointers[rbio->nr_data]);
1930
1931                        /* rearrange the pointer array */
1932                        p = pointers[faila];
1933                        for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1934                                pointers[stripe] = pointers[stripe + 1];
1935                        pointers[rbio->nr_data - 1] = p;
1936
1937                        /* xor in the rest */
1938                        run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1939                }
1940                /* if we're doing this rebuild as part of an rmw, go through
1941                 * and set all of our private rbio pages in the
1942                 * failed stripes as uptodate.  This way finish_rmw will
1943                 * know they can be trusted.  If this was a read reconstruction,
1944                 * other endio functions will fiddle the uptodate bits
1945                 */
1946                if (rbio->operation == BTRFS_RBIO_WRITE) {
1947                        for (i = 0;  i < rbio->stripe_npages; i++) {
1948                                if (faila != -1) {
1949                                        page = rbio_stripe_page(rbio, faila, i);
1950                                        SetPageUptodate(page);
1951                                }
1952                                if (failb != -1) {
1953                                        page = rbio_stripe_page(rbio, failb, i);
1954                                        SetPageUptodate(page);
1955                                }
1956                        }
1957                }
1958                for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1959                        /*
1960                         * if we're rebuilding a read, we have to use
1961                         * pages from the bio list
1962                         */
1963                        if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1964                             rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1965                            (stripe == faila || stripe == failb)) {
1966                                page = page_in_rbio(rbio, stripe, pagenr, 0);
1967                        } else {
1968                                page = rbio_stripe_page(rbio, stripe, pagenr);
1969                        }
1970                        kunmap(page);
1971                }
1972        }
1973
1974        err = BLK_STS_OK;
1975cleanup:
1976        kfree(pointers);
1977
1978cleanup_io:
1979        /*
1980         * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1981         * valid rbio which is consistent with ondisk content, thus such a
1982         * valid rbio can be cached to avoid further disk reads.
1983         */
1984        if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1985            rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1986                /*
1987                 * - In case of two failures, where rbio->failb != -1:
1988                 *
1989                 *   Do not cache this rbio since the above read reconstruction
1990                 *   (raid6_datap_recov() or raid6_2data_recov()) may have
1991                 *   changed some content of stripes which are not identical to
1992                 *   on-disk content any more, otherwise, a later write/recover
1993                 *   may steal stripe_pages from this rbio and end up with
1994                 *   corruptions or rebuild failures.
1995                 *
1996                 * - In case of single failure, where rbio->failb == -1:
1997                 *
1998                 *   Cache this rbio iff the above read reconstruction is
1999                 *   executed without problems.
2000                 */
2001                if (err == BLK_STS_OK && rbio->failb < 0)
2002                        cache_rbio_pages(rbio);
2003                else
2004                        clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2005
2006                rbio_orig_end_io(rbio, err);
2007        } else if (err == BLK_STS_OK) {
2008                rbio->faila = -1;
2009                rbio->failb = -1;
2010
2011                if (rbio->operation == BTRFS_RBIO_WRITE)
2012                        finish_rmw(rbio);
2013                else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2014                        finish_parity_scrub(rbio, 0);
2015                else
2016                        BUG();
2017        } else {
2018                rbio_orig_end_io(rbio, err);
2019        }
2020}
2021
2022/*
2023 * This is called only for stripes we've read from disk to
2024 * reconstruct the parity.
2025 */
2026static void raid_recover_end_io(struct bio *bio)
2027{
2028        struct btrfs_raid_bio *rbio = bio->bi_private;
2029
2030        /*
2031         * we only read stripe pages off the disk, set them
2032         * up to date if there were no errors
2033         */
2034        if (bio->bi_status)
2035                fail_bio_stripe(rbio, bio);
2036        else
2037                set_bio_pages_uptodate(bio);
2038        bio_put(bio);
2039
2040        if (!atomic_dec_and_test(&rbio->stripes_pending))
2041                return;
2042
2043        if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2044                rbio_orig_end_io(rbio, BLK_STS_IOERR);
2045        else
2046                __raid_recover_end_io(rbio);
2047}
2048
2049/*
2050 * reads everything we need off the disk to reconstruct
2051 * the parity. endio handlers trigger final reconstruction
2052 * when the IO is done.
2053 *
2054 * This is used both for reads from the higher layers and for
2055 * parity construction required to finish a rmw cycle.
2056 */
2057static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2058{
2059        int bios_to_read = 0;
2060        struct bio_list bio_list;
2061        int ret;
2062        int pagenr;
2063        int stripe;
2064        struct bio *bio;
2065
2066        bio_list_init(&bio_list);
2067
2068        ret = alloc_rbio_pages(rbio);
2069        if (ret)
2070                goto cleanup;
2071
2072        atomic_set(&rbio->error, 0);
2073
2074        /*
2075         * read everything that hasn't failed.  Thanks to the
2076         * stripe cache, it is possible that some or all of these
2077         * pages are going to be uptodate.
2078         */
2079        for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2080                if (rbio->faila == stripe || rbio->failb == stripe) {
2081                        atomic_inc(&rbio->error);
2082                        continue;
2083                }
2084
2085                for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2086                        struct page *p;
2087
2088                        /*
2089                         * the rmw code may have already read this
2090                         * page in
2091                         */
2092                        p = rbio_stripe_page(rbio, stripe, pagenr);
2093                        if (PageUptodate(p))
2094                                continue;
2095
2096                        ret = rbio_add_io_page(rbio, &bio_list,
2097                                       rbio_stripe_page(rbio, stripe, pagenr),
2098                                       stripe, pagenr, rbio->stripe_len);
2099                        if (ret < 0)
2100                                goto cleanup;
2101                }
2102        }
2103
2104        bios_to_read = bio_list_size(&bio_list);
2105        if (!bios_to_read) {
2106                /*
2107                 * we might have no bios to read just because the pages
2108                 * were up to date, or we might have no bios to read because
2109                 * the devices were gone.
2110                 */
2111                if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2112                        __raid_recover_end_io(rbio);
2113                        goto out;
2114                } else {
2115                        goto cleanup;
2116                }
2117        }
2118
2119        /*
2120         * the bbio may be freed once we submit the last bio.  Make sure
2121         * not to touch it after that
2122         */
2123        atomic_set(&rbio->stripes_pending, bios_to_read);
2124        while (1) {
2125                bio = bio_list_pop(&bio_list);
2126                if (!bio)
2127                        break;
2128
2129                bio->bi_private = rbio;
2130                bio->bi_end_io = raid_recover_end_io;
2131                bio->bi_opf = REQ_OP_READ;
2132
2133                btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2134
2135                submit_bio(bio);
2136        }
2137out:
2138        return 0;
2139
2140cleanup:
2141        if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2142            rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2143                rbio_orig_end_io(rbio, BLK_STS_IOERR);
2144
2145        while ((bio = bio_list_pop(&bio_list)))
2146                bio_put(bio);
2147
2148        return -EIO;
2149}
2150
2151/*
2152 * the main entry point for reads from the higher layers.  This
2153 * is really only called when the normal read path had a failure,
2154 * so we assume the bio they send down corresponds to a failed part
2155 * of the drive.
2156 */
2157int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2158                          struct btrfs_bio *bbio, u64 stripe_len,
2159                          int mirror_num, int generic_io)
2160{
2161        struct btrfs_raid_bio *rbio;
2162        int ret;
2163
2164        if (generic_io) {
2165                ASSERT(bbio->mirror_num == mirror_num);
2166                btrfs_io_bio(bio)->mirror_num = mirror_num;
2167        }
2168
2169        rbio = alloc_rbio(fs_info, bbio, stripe_len);
2170        if (IS_ERR(rbio)) {
2171                if (generic_io)
2172                        btrfs_put_bbio(bbio);
2173                return PTR_ERR(rbio);
2174        }
2175
2176        rbio->operation = BTRFS_RBIO_READ_REBUILD;
2177        bio_list_add(&rbio->bio_list, bio);
2178        rbio->bio_list_bytes = bio->bi_iter.bi_size;
2179
2180        rbio->faila = find_logical_bio_stripe(rbio, bio);
2181        if (rbio->faila == -1) {
2182                btrfs_warn(fs_info,
2183        "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2184                           __func__, (u64)bio->bi_iter.bi_sector << 9,
2185                           (u64)bio->bi_iter.bi_size, bbio->map_type);
2186                if (generic_io)
2187                        btrfs_put_bbio(bbio);
2188                kfree(rbio);
2189                return -EIO;
2190        }
2191
2192        if (generic_io) {
2193                btrfs_bio_counter_inc_noblocked(fs_info);
2194                rbio->generic_bio_cnt = 1;
2195        } else {
2196                btrfs_get_bbio(bbio);
2197        }
2198
2199        /*
2200         * Loop retry:
2201         * for 'mirror == 2', reconstruct from all other stripes.
2202         * for 'mirror_num > 2', select a stripe to fail on every retry.
2203         */
2204        if (mirror_num > 2) {
2205                /*
2206                 * 'mirror == 3' is to fail the p stripe and
2207                 * reconstruct from the q stripe.  'mirror > 3' is to
2208                 * fail a data stripe and reconstruct from p+q stripe.
2209                 */
2210                rbio->failb = rbio->real_stripes - (mirror_num - 1);
2211                ASSERT(rbio->failb > 0);
2212                if (rbio->failb <= rbio->faila)
2213                        rbio->failb--;
2214        }
2215
2216        ret = lock_stripe_add(rbio);
2217
2218        /*
2219         * __raid56_parity_recover will end the bio with
2220         * any errors it hits.  We don't want to return
2221         * its error value up the stack because our caller
2222         * will end up calling bio_endio with any nonzero
2223         * return
2224         */
2225        if (ret == 0)
2226                __raid56_parity_recover(rbio);
2227        /*
2228         * our rbio has been added to the list of
2229         * rbios that will be handled after the
2230         * currently lock owner is done
2231         */
2232        return 0;
2233
2234}
2235
2236static void rmw_work(struct btrfs_work *work)
2237{
2238        struct btrfs_raid_bio *rbio;
2239
2240        rbio = container_of(work, struct btrfs_raid_bio, work);
2241        raid56_rmw_stripe(rbio);
2242}
2243
2244static void read_rebuild_work(struct btrfs_work *work)
2245{
2246        struct btrfs_raid_bio *rbio;
2247
2248        rbio = container_of(work, struct btrfs_raid_bio, work);
2249        __raid56_parity_recover(rbio);
2250}
2251
2252/*
2253 * The following code is used to scrub/replace the parity stripe
2254 *
2255 * Caller must have already increased bio_counter for getting @bbio.
2256 *
2257 * Note: We need make sure all the pages that add into the scrub/replace
2258 * raid bio are correct and not be changed during the scrub/replace. That
2259 * is those pages just hold metadata or file data with checksum.
2260 */
2261
2262struct btrfs_raid_bio *
2263raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2264                               struct btrfs_bio *bbio, u64 stripe_len,
2265                               struct btrfs_device *scrub_dev,
2266                               unsigned long *dbitmap, int stripe_nsectors)
2267{
2268        struct btrfs_raid_bio *rbio;
2269        int i;
2270
2271        rbio = alloc_rbio(fs_info, bbio, stripe_len);
2272        if (IS_ERR(rbio))
2273                return NULL;
2274        bio_list_add(&rbio->bio_list, bio);
2275        /*
2276         * This is a special bio which is used to hold the completion handler
2277         * and make the scrub rbio is similar to the other types
2278         */
2279        ASSERT(!bio->bi_iter.bi_size);
2280        rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2281
2282        /*
2283         * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2284         * to the end position, so this search can start from the first parity
2285         * stripe.
2286         */
2287        for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2288                if (bbio->stripes[i].dev == scrub_dev) {
2289                        rbio->scrubp = i;
2290                        break;
2291                }
2292        }
2293        ASSERT(i < rbio->real_stripes);
2294
2295        /* Now we just support the sectorsize equals to page size */
2296        ASSERT(fs_info->sectorsize == PAGE_SIZE);
2297        ASSERT(rbio->stripe_npages == stripe_nsectors);
2298        bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2299
2300        /*
2301         * We have already increased bio_counter when getting bbio, record it
2302         * so we can free it at rbio_orig_end_io().
2303         */
2304        rbio->generic_bio_cnt = 1;
2305
2306        return rbio;
2307}
2308
2309/* Used for both parity scrub and missing. */
2310void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2311                            u64 logical)
2312{
2313        int stripe_offset;
2314        int index;
2315
2316        ASSERT(logical >= rbio->bbio->raid_map[0]);
2317        ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2318                                rbio->stripe_len * rbio->nr_data);
2319        stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2320        index = stripe_offset >> PAGE_SHIFT;
2321        rbio->bio_pages[index] = page;
2322}
2323
2324/*
2325 * We just scrub the parity that we have correct data on the same horizontal,
2326 * so we needn't allocate all pages for all the stripes.
2327 */
2328static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2329{
2330        int i;
2331        int bit;
2332        int index;
2333        struct page *page;
2334
2335        for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2336                for (i = 0; i < rbio->real_stripes; i++) {
2337                        index = i * rbio->stripe_npages + bit;
2338                        if (rbio->stripe_pages[index])
2339                                continue;
2340
2341                        page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2342                        if (!page)
2343                                return -ENOMEM;
2344                        rbio->stripe_pages[index] = page;
2345                }
2346        }
2347        return 0;
2348}
2349
2350static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2351                                         int need_check)
2352{
2353        struct btrfs_bio *bbio = rbio->bbio;
2354        void **pointers = rbio->finish_pointers;
2355        unsigned long *pbitmap = rbio->finish_pbitmap;
2356        int nr_data = rbio->nr_data;
2357        int stripe;
2358        int pagenr;
2359        int p_stripe = -1;
2360        int q_stripe = -1;
2361        struct page *p_page = NULL;
2362        struct page *q_page = NULL;
2363        struct bio_list bio_list;
2364        struct bio *bio;
2365        int is_replace = 0;
2366        int ret;
2367
2368        bio_list_init(&bio_list);
2369
2370        if (rbio->real_stripes - rbio->nr_data == 1) {
2371                p_stripe = rbio->real_stripes - 1;
2372        } else if (rbio->real_stripes - rbio->nr_data == 2) {
2373                p_stripe = rbio->real_stripes - 2;
2374                q_stripe = rbio->real_stripes - 1;
2375        } else {
2376                BUG();
2377        }
2378
2379        if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2380                is_replace = 1;
2381                bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2382        }
2383
2384        /*
2385         * Because the higher layers(scrubber) are unlikely to
2386         * use this area of the disk again soon, so don't cache
2387         * it.
2388         */
2389        clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2390
2391        if (!need_check)
2392                goto writeback;
2393
2394        p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2395        if (!p_page)
2396                goto cleanup;
2397        SetPageUptodate(p_page);
2398
2399        if (q_stripe != -1) {
2400                q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2401                if (!q_page) {
2402                        __free_page(p_page);
2403                        goto cleanup;
2404                }
2405                SetPageUptodate(q_page);
2406        }
2407
2408        atomic_set(&rbio->error, 0);
2409
2410        for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2411                struct page *p;
2412                void *parity;
2413                /* first collect one page from each data stripe */
2414                for (stripe = 0; stripe < nr_data; stripe++) {
2415                        p = page_in_rbio(rbio, stripe, pagenr, 0);
2416                        pointers[stripe] = kmap(p);
2417                }
2418
2419                /* then add the parity stripe */
2420                pointers[stripe++] = kmap(p_page);
2421
2422                if (q_stripe != -1) {
2423
2424                        /*
2425                         * raid6, add the qstripe and call the
2426                         * library function to fill in our p/q
2427                         */
2428                        pointers[stripe++] = kmap(q_page);
2429
2430                        raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2431                                                pointers);
2432                } else {
2433                        /* raid5 */
2434                        copy_page(pointers[nr_data], pointers[0]);
2435                        run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2436                }
2437
2438                /* Check scrubbing parity and repair it */
2439                p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2440                parity = kmap(p);
2441                if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2442                        copy_page(parity, pointers[rbio->scrubp]);
2443                else
2444                        /* Parity is right, needn't writeback */
2445                        bitmap_clear(rbio->dbitmap, pagenr, 1);
2446                kunmap(p);
2447
2448                for (stripe = 0; stripe < nr_data; stripe++)
2449                        kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2450                kunmap(p_page);
2451        }
2452
2453        __free_page(p_page);
2454        if (q_page)
2455                __free_page(q_page);
2456
2457writeback:
2458        /*
2459         * time to start writing.  Make bios for everything from the
2460         * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2461         * everything else.
2462         */
2463        for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2464                struct page *page;
2465
2466                page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2467                ret = rbio_add_io_page(rbio, &bio_list,
2468                               page, rbio->scrubp, pagenr, rbio->stripe_len);
2469                if (ret)
2470                        goto cleanup;
2471        }
2472
2473        if (!is_replace)
2474                goto submit_write;
2475
2476        for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2477                struct page *page;
2478
2479                page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2480                ret = rbio_add_io_page(rbio, &bio_list, page,
2481                                       bbio->tgtdev_map[rbio->scrubp],
2482                                       pagenr, rbio->stripe_len);
2483                if (ret)
2484                        goto cleanup;
2485        }
2486
2487submit_write:
2488        nr_data = bio_list_size(&bio_list);
2489        if (!nr_data) {
2490                /* Every parity is right */
2491                rbio_orig_end_io(rbio, BLK_STS_OK);
2492                return;
2493        }
2494
2495        atomic_set(&rbio->stripes_pending, nr_data);
2496
2497        while (1) {
2498                bio = bio_list_pop(&bio_list);
2499                if (!bio)
2500                        break;
2501
2502                bio->bi_private = rbio;
2503                bio->bi_end_io = raid_write_end_io;
2504                bio->bi_opf = REQ_OP_WRITE;
2505
2506                submit_bio(bio);
2507        }
2508        return;
2509
2510cleanup:
2511        rbio_orig_end_io(rbio, BLK_STS_IOERR);
2512
2513        while ((bio = bio_list_pop(&bio_list)))
2514                bio_put(bio);
2515}
2516
2517static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2518{
2519        if (stripe >= 0 && stripe < rbio->nr_data)
2520                return 1;
2521        return 0;
2522}
2523
2524/*
2525 * While we're doing the parity check and repair, we could have errors
2526 * in reading pages off the disk.  This checks for errors and if we're
2527 * not able to read the page it'll trigger parity reconstruction.  The
2528 * parity scrub will be finished after we've reconstructed the failed
2529 * stripes
2530 */
2531static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2532{
2533        if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2534                goto cleanup;
2535
2536        if (rbio->faila >= 0 || rbio->failb >= 0) {
2537                int dfail = 0, failp = -1;
2538
2539                if (is_data_stripe(rbio, rbio->faila))
2540                        dfail++;
2541                else if (is_parity_stripe(rbio->faila))
2542                        failp = rbio->faila;
2543
2544                if (is_data_stripe(rbio, rbio->failb))
2545                        dfail++;
2546                else if (is_parity_stripe(rbio->failb))
2547                        failp = rbio->failb;
2548
2549                /*
2550                 * Because we can not use a scrubbing parity to repair
2551                 * the data, so the capability of the repair is declined.
2552                 * (In the case of RAID5, we can not repair anything)
2553                 */
2554                if (dfail > rbio->bbio->max_errors - 1)
2555                        goto cleanup;
2556
2557                /*
2558                 * If all data is good, only parity is correctly, just
2559                 * repair the parity.
2560                 */
2561                if (dfail == 0) {
2562                        finish_parity_scrub(rbio, 0);
2563                        return;
2564                }
2565
2566                /*
2567                 * Here means we got one corrupted data stripe and one
2568                 * corrupted parity on RAID6, if the corrupted parity
2569                 * is scrubbing parity, luckily, use the other one to repair
2570                 * the data, or we can not repair the data stripe.
2571                 */
2572                if (failp != rbio->scrubp)
2573                        goto cleanup;
2574
2575                __raid_recover_end_io(rbio);
2576        } else {
2577                finish_parity_scrub(rbio, 1);
2578        }
2579        return;
2580
2581cleanup:
2582        rbio_orig_end_io(rbio, BLK_STS_IOERR);
2583}
2584
2585/*
2586 * end io for the read phase of the rmw cycle.  All the bios here are physical
2587 * stripe bios we've read from the disk so we can recalculate the parity of the
2588 * stripe.
2589 *
2590 * This will usually kick off finish_rmw once all the bios are read in, but it
2591 * may trigger parity reconstruction if we had any errors along the way
2592 */
2593static void raid56_parity_scrub_end_io(struct bio *bio)
2594{
2595        struct btrfs_raid_bio *rbio = bio->bi_private;
2596
2597        if (bio->bi_status)
2598                fail_bio_stripe(rbio, bio);
2599        else
2600                set_bio_pages_uptodate(bio);
2601
2602        bio_put(bio);
2603
2604        if (!atomic_dec_and_test(&rbio->stripes_pending))
2605                return;
2606
2607        /*
2608         * this will normally call finish_rmw to start our write
2609         * but if there are any failed stripes we'll reconstruct
2610         * from parity first
2611         */
2612        validate_rbio_for_parity_scrub(rbio);
2613}
2614
2615static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2616{
2617        int bios_to_read = 0;
2618        struct bio_list bio_list;
2619        int ret;
2620        int pagenr;
2621        int stripe;
2622        struct bio *bio;
2623
2624        bio_list_init(&bio_list);
2625
2626        ret = alloc_rbio_essential_pages(rbio);
2627        if (ret)
2628                goto cleanup;
2629
2630        atomic_set(&rbio->error, 0);
2631        /*
2632         * build a list of bios to read all the missing parts of this
2633         * stripe
2634         */
2635        for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2636                for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2637                        struct page *page;
2638                        /*
2639                         * we want to find all the pages missing from
2640                         * the rbio and read them from the disk.  If
2641                         * page_in_rbio finds a page in the bio list
2642                         * we don't need to read it off the stripe.
2643                         */
2644                        page = page_in_rbio(rbio, stripe, pagenr, 1);
2645                        if (page)
2646                                continue;
2647
2648                        page = rbio_stripe_page(rbio, stripe, pagenr);
2649                        /*
2650                         * the bio cache may have handed us an uptodate
2651                         * page.  If so, be happy and use it
2652                         */
2653                        if (PageUptodate(page))
2654                                continue;
2655
2656                        ret = rbio_add_io_page(rbio, &bio_list, page,
2657                                       stripe, pagenr, rbio->stripe_len);
2658                        if (ret)
2659                                goto cleanup;
2660                }
2661        }
2662
2663        bios_to_read = bio_list_size(&bio_list);
2664        if (!bios_to_read) {
2665                /*
2666                 * this can happen if others have merged with
2667                 * us, it means there is nothing left to read.
2668                 * But if there are missing devices it may not be
2669                 * safe to do the full stripe write yet.
2670                 */
2671                goto finish;
2672        }
2673
2674        /*
2675         * the bbio may be freed once we submit the last bio.  Make sure
2676         * not to touch it after that
2677         */
2678        atomic_set(&rbio->stripes_pending, bios_to_read);
2679        while (1) {
2680                bio = bio_list_pop(&bio_list);
2681                if (!bio)
2682                        break;
2683
2684                bio->bi_private = rbio;
2685                bio->bi_end_io = raid56_parity_scrub_end_io;
2686                bio->bi_opf = REQ_OP_READ;
2687
2688                btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2689
2690                submit_bio(bio);
2691        }
2692        /* the actual write will happen once the reads are done */
2693        return;
2694
2695cleanup:
2696        rbio_orig_end_io(rbio, BLK_STS_IOERR);
2697
2698        while ((bio = bio_list_pop(&bio_list)))
2699                bio_put(bio);
2700
2701        return;
2702
2703finish:
2704        validate_rbio_for_parity_scrub(rbio);
2705}
2706
2707static void scrub_parity_work(struct btrfs_work *work)
2708{
2709        struct btrfs_raid_bio *rbio;
2710
2711        rbio = container_of(work, struct btrfs_raid_bio, work);
2712        raid56_parity_scrub_stripe(rbio);
2713}
2714
2715void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2716{
2717        if (!lock_stripe_add(rbio))
2718                start_async_work(rbio, scrub_parity_work);
2719}
2720
2721/* The following code is used for dev replace of a missing RAID 5/6 device. */
2722
2723struct btrfs_raid_bio *
2724raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2725                          struct btrfs_bio *bbio, u64 length)
2726{
2727        struct btrfs_raid_bio *rbio;
2728
2729        rbio = alloc_rbio(fs_info, bbio, length);
2730        if (IS_ERR(rbio))
2731                return NULL;
2732
2733        rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2734        bio_list_add(&rbio->bio_list, bio);
2735        /*
2736         * This is a special bio which is used to hold the completion handler
2737         * and make the scrub rbio is similar to the other types
2738         */
2739        ASSERT(!bio->bi_iter.bi_size);
2740
2741        rbio->faila = find_logical_bio_stripe(rbio, bio);
2742        if (rbio->faila == -1) {
2743                BUG();
2744                kfree(rbio);
2745                return NULL;
2746        }
2747
2748        /*
2749         * When we get bbio, we have already increased bio_counter, record it
2750         * so we can free it at rbio_orig_end_io()
2751         */
2752        rbio->generic_bio_cnt = 1;
2753
2754        return rbio;
2755}
2756
2757void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2758{
2759        if (!lock_stripe_add(rbio))
2760                start_async_work(rbio, read_rebuild_work);
2761}
2762