linux/fs/btrfs/raid56.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 Fusion-io  All rights reserved.
   3 * Copyright (C) 2012 Intel Corp. All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public
   7 * License v2 as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  12 * General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public
  15 * License along with this program; if not, write to the
  16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17 * Boston, MA 021110-1307, USA.
  18 */
  19#include <linux/sched.h>
  20#include <linux/wait.h>
  21#include <linux/bio.h>
  22#include <linux/slab.h>
  23#include <linux/buffer_head.h>
  24#include <linux/blkdev.h>
  25#include <linux/random.h>
  26#include <linux/iocontext.h>
  27#include <linux/capability.h>
  28#include <linux/ratelimit.h>
  29#include <linux/kthread.h>
  30#include <linux/raid/pq.h>
  31#include <linux/hash.h>
  32#include <linux/list_sort.h>
  33#include <linux/raid/xor.h>
  34#include <linux/vmalloc.h>
  35#include <asm/div64.h>
  36#include "ctree.h"
  37#include "extent_map.h"
  38#include "disk-io.h"
  39#include "transaction.h"
  40#include "print-tree.h"
  41#include "volumes.h"
  42#include "raid56.h"
  43#include "async-thread.h"
  44#include "check-integrity.h"
  45#include "rcu-string.h"
  46
  47/* set when additional merges to this rbio are not allowed */
  48#define RBIO_RMW_LOCKED_BIT     1
  49
  50/*
  51 * set when this rbio is sitting in the hash, but it is just a cache
  52 * of past RMW
  53 */
  54#define RBIO_CACHE_BIT          2
  55
  56/*
  57 * set when it is safe to trust the stripe_pages for caching
  58 */
  59#define RBIO_CACHE_READY_BIT    3
  60
  61#define RBIO_CACHE_SIZE 1024
  62
  63enum btrfs_rbio_ops {
  64        BTRFS_RBIO_WRITE,
  65        BTRFS_RBIO_READ_REBUILD,
  66        BTRFS_RBIO_PARITY_SCRUB,
  67        BTRFS_RBIO_REBUILD_MISSING,
  68};
  69
  70struct btrfs_raid_bio {
  71        struct btrfs_fs_info *fs_info;
  72        struct btrfs_bio *bbio;
  73
  74        /* while we're doing rmw on a stripe
  75         * we put it into a hash table so we can
  76         * lock the stripe and merge more rbios
  77         * into it.
  78         */
  79        struct list_head hash_list;
  80
  81        /*
  82         * LRU list for the stripe cache
  83         */
  84        struct list_head stripe_cache;
  85
  86        /*
  87         * for scheduling work in the helper threads
  88         */
  89        struct btrfs_work work;
  90
  91        /*
  92         * bio list and bio_list_lock are used
  93         * to add more bios into the stripe
  94         * in hopes of avoiding the full rmw
  95         */
  96        struct bio_list bio_list;
  97        spinlock_t bio_list_lock;
  98
  99        /* also protected by the bio_list_lock, the
 100         * plug list is used by the plugging code
 101         * to collect partial bios while plugged.  The
 102         * stripe locking code also uses it to hand off
 103         * the stripe lock to the next pending IO
 104         */
 105        struct list_head plug_list;
 106
 107        /*
 108         * flags that tell us if it is safe to
 109         * merge with this bio
 110         */
 111        unsigned long flags;
 112
 113        /* size of each individual stripe on disk */
 114        int stripe_len;
 115
 116        /* number of data stripes (no p/q) */
 117        int nr_data;
 118
 119        int real_stripes;
 120
 121        int stripe_npages;
 122        /*
 123         * set if we're doing a parity rebuild
 124         * for a read from higher up, which is handled
 125         * differently from a parity rebuild as part of
 126         * rmw
 127         */
 128        enum btrfs_rbio_ops operation;
 129
 130        /* first bad stripe */
 131        int faila;
 132
 133        /* second bad stripe (for raid6 use) */
 134        int failb;
 135
 136        int scrubp;
 137        /*
 138         * number of pages needed to represent the full
 139         * stripe
 140         */
 141        int nr_pages;
 142
 143        /*
 144         * size of all the bios in the bio_list.  This
 145         * helps us decide if the rbio maps to a full
 146         * stripe or not
 147         */
 148        int bio_list_bytes;
 149
 150        int generic_bio_cnt;
 151
 152        atomic_t refs;
 153
 154        atomic_t stripes_pending;
 155
 156        atomic_t error;
 157        /*
 158         * these are two arrays of pointers.  We allocate the
 159         * rbio big enough to hold them both and setup their
 160         * locations when the rbio is allocated
 161         */
 162
 163        /* pointers to pages that we allocated for
 164         * reading/writing stripes directly from the disk (including P/Q)
 165         */
 166        struct page **stripe_pages;
 167
 168        /*
 169         * pointers to the pages in the bio_list.  Stored
 170         * here for faster lookup
 171         */
 172        struct page **bio_pages;
 173
 174        /*
 175         * bitmap to record which horizontal stripe has data
 176         */
 177        unsigned long *dbitmap;
 178};
 179
 180static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
 181static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
 182static void rmw_work(struct btrfs_work *work);
 183static void read_rebuild_work(struct btrfs_work *work);
 184static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
 185static void async_read_rebuild(struct btrfs_raid_bio *rbio);
 186static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
 187static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
 188static void __free_raid_bio(struct btrfs_raid_bio *rbio);
 189static void index_rbio_pages(struct btrfs_raid_bio *rbio);
 190static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
 191
 192static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
 193                                         int need_check);
 194static void async_scrub_parity(struct btrfs_raid_bio *rbio);
 195
 196/*
 197 * the stripe hash table is used for locking, and to collect
 198 * bios in hopes of making a full stripe
 199 */
 200int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
 201{
 202        struct btrfs_stripe_hash_table *table;
 203        struct btrfs_stripe_hash_table *x;
 204        struct btrfs_stripe_hash *cur;
 205        struct btrfs_stripe_hash *h;
 206        int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
 207        int i;
 208        int table_size;
 209
 210        if (info->stripe_hash_table)
 211                return 0;
 212
 213        /*
 214         * The table is large, starting with order 4 and can go as high as
 215         * order 7 in case lock debugging is turned on.
 216         *
 217         * Try harder to allocate and fallback to vmalloc to lower the chance
 218         * of a failing mount.
 219         */
 220        table_size = sizeof(*table) + sizeof(*h) * num_entries;
 221        table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
 222        if (!table) {
 223                table = vzalloc(table_size);
 224                if (!table)
 225                        return -ENOMEM;
 226        }
 227
 228        spin_lock_init(&table->cache_lock);
 229        INIT_LIST_HEAD(&table->stripe_cache);
 230
 231        h = table->table;
 232
 233        for (i = 0; i < num_entries; i++) {
 234                cur = h + i;
 235                INIT_LIST_HEAD(&cur->hash_list);
 236                spin_lock_init(&cur->lock);
 237                init_waitqueue_head(&cur->wait);
 238        }
 239
 240        x = cmpxchg(&info->stripe_hash_table, NULL, table);
 241        if (x)
 242                kvfree(x);
 243        return 0;
 244}
 245
 246/*
 247 * caching an rbio means to copy anything from the
 248 * bio_pages array into the stripe_pages array.  We
 249 * use the page uptodate bit in the stripe cache array
 250 * to indicate if it has valid data
 251 *
 252 * once the caching is done, we set the cache ready
 253 * bit.
 254 */
 255static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
 256{
 257        int i;
 258        char *s;
 259        char *d;
 260        int ret;
 261
 262        ret = alloc_rbio_pages(rbio);
 263        if (ret)
 264                return;
 265
 266        for (i = 0; i < rbio->nr_pages; i++) {
 267                if (!rbio->bio_pages[i])
 268                        continue;
 269
 270                s = kmap(rbio->bio_pages[i]);
 271                d = kmap(rbio->stripe_pages[i]);
 272
 273                memcpy(d, s, PAGE_SIZE);
 274
 275                kunmap(rbio->bio_pages[i]);
 276                kunmap(rbio->stripe_pages[i]);
 277                SetPageUptodate(rbio->stripe_pages[i]);
 278        }
 279        set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
 280}
 281
 282/*
 283 * we hash on the first logical address of the stripe
 284 */
 285static int rbio_bucket(struct btrfs_raid_bio *rbio)
 286{
 287        u64 num = rbio->bbio->raid_map[0];
 288
 289        /*
 290         * we shift down quite a bit.  We're using byte
 291         * addressing, and most of the lower bits are zeros.
 292         * This tends to upset hash_64, and it consistently
 293         * returns just one or two different values.
 294         *
 295         * shifting off the lower bits fixes things.
 296         */
 297        return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
 298}
 299
 300/*
 301 * stealing an rbio means taking all the uptodate pages from the stripe
 302 * array in the source rbio and putting them into the destination rbio
 303 */
 304static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
 305{
 306        int i;
 307        struct page *s;
 308        struct page *d;
 309
 310        if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
 311                return;
 312
 313        for (i = 0; i < dest->nr_pages; i++) {
 314                s = src->stripe_pages[i];
 315                if (!s || !PageUptodate(s)) {
 316                        continue;
 317                }
 318
 319                d = dest->stripe_pages[i];
 320                if (d)
 321                        __free_page(d);
 322
 323                dest->stripe_pages[i] = s;
 324                src->stripe_pages[i] = NULL;
 325        }
 326}
 327
 328/*
 329 * merging means we take the bio_list from the victim and
 330 * splice it into the destination.  The victim should
 331 * be discarded afterwards.
 332 *
 333 * must be called with dest->rbio_list_lock held
 334 */
 335static void merge_rbio(struct btrfs_raid_bio *dest,
 336                       struct btrfs_raid_bio *victim)
 337{
 338        bio_list_merge(&dest->bio_list, &victim->bio_list);
 339        dest->bio_list_bytes += victim->bio_list_bytes;
 340        dest->generic_bio_cnt += victim->generic_bio_cnt;
 341        bio_list_init(&victim->bio_list);
 342}
 343
 344/*
 345 * used to prune items that are in the cache.  The caller
 346 * must hold the hash table lock.
 347 */
 348static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 349{
 350        int bucket = rbio_bucket(rbio);
 351        struct btrfs_stripe_hash_table *table;
 352        struct btrfs_stripe_hash *h;
 353        int freeit = 0;
 354
 355        /*
 356         * check the bit again under the hash table lock.
 357         */
 358        if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 359                return;
 360
 361        table = rbio->fs_info->stripe_hash_table;
 362        h = table->table + bucket;
 363
 364        /* hold the lock for the bucket because we may be
 365         * removing it from the hash table
 366         */
 367        spin_lock(&h->lock);
 368
 369        /*
 370         * hold the lock for the bio list because we need
 371         * to make sure the bio list is empty
 372         */
 373        spin_lock(&rbio->bio_list_lock);
 374
 375        if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 376                list_del_init(&rbio->stripe_cache);
 377                table->cache_size -= 1;
 378                freeit = 1;
 379
 380                /* if the bio list isn't empty, this rbio is
 381                 * still involved in an IO.  We take it out
 382                 * of the cache list, and drop the ref that
 383                 * was held for the list.
 384                 *
 385                 * If the bio_list was empty, we also remove
 386                 * the rbio from the hash_table, and drop
 387                 * the corresponding ref
 388                 */
 389                if (bio_list_empty(&rbio->bio_list)) {
 390                        if (!list_empty(&rbio->hash_list)) {
 391                                list_del_init(&rbio->hash_list);
 392                                atomic_dec(&rbio->refs);
 393                                BUG_ON(!list_empty(&rbio->plug_list));
 394                        }
 395                }
 396        }
 397
 398        spin_unlock(&rbio->bio_list_lock);
 399        spin_unlock(&h->lock);
 400
 401        if (freeit)
 402                __free_raid_bio(rbio);
 403}
 404
 405/*
 406 * prune a given rbio from the cache
 407 */
 408static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 409{
 410        struct btrfs_stripe_hash_table *table;
 411        unsigned long flags;
 412
 413        if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 414                return;
 415
 416        table = rbio->fs_info->stripe_hash_table;
 417
 418        spin_lock_irqsave(&table->cache_lock, flags);
 419        __remove_rbio_from_cache(rbio);
 420        spin_unlock_irqrestore(&table->cache_lock, flags);
 421}
 422
 423/*
 424 * remove everything in the cache
 425 */
 426static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
 427{
 428        struct btrfs_stripe_hash_table *table;
 429        unsigned long flags;
 430        struct btrfs_raid_bio *rbio;
 431
 432        table = info->stripe_hash_table;
 433
 434        spin_lock_irqsave(&table->cache_lock, flags);
 435        while (!list_empty(&table->stripe_cache)) {
 436                rbio = list_entry(table->stripe_cache.next,
 437                                  struct btrfs_raid_bio,
 438                                  stripe_cache);
 439                __remove_rbio_from_cache(rbio);
 440        }
 441        spin_unlock_irqrestore(&table->cache_lock, flags);
 442}
 443
 444/*
 445 * remove all cached entries and free the hash table
 446 * used by unmount
 447 */
 448void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
 449{
 450        if (!info->stripe_hash_table)
 451                return;
 452        btrfs_clear_rbio_cache(info);
 453        kvfree(info->stripe_hash_table);
 454        info->stripe_hash_table = NULL;
 455}
 456
 457/*
 458 * insert an rbio into the stripe cache.  It
 459 * must have already been prepared by calling
 460 * cache_rbio_pages
 461 *
 462 * If this rbio was already cached, it gets
 463 * moved to the front of the lru.
 464 *
 465 * If the size of the rbio cache is too big, we
 466 * prune an item.
 467 */
 468static void cache_rbio(struct btrfs_raid_bio *rbio)
 469{
 470        struct btrfs_stripe_hash_table *table;
 471        unsigned long flags;
 472
 473        if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
 474                return;
 475
 476        table = rbio->fs_info->stripe_hash_table;
 477
 478        spin_lock_irqsave(&table->cache_lock, flags);
 479        spin_lock(&rbio->bio_list_lock);
 480
 481        /* bump our ref if we were not in the list before */
 482        if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
 483                atomic_inc(&rbio->refs);
 484
 485        if (!list_empty(&rbio->stripe_cache)){
 486                list_move(&rbio->stripe_cache, &table->stripe_cache);
 487        } else {
 488                list_add(&rbio->stripe_cache, &table->stripe_cache);
 489                table->cache_size += 1;
 490        }
 491
 492        spin_unlock(&rbio->bio_list_lock);
 493
 494        if (table->cache_size > RBIO_CACHE_SIZE) {
 495                struct btrfs_raid_bio *found;
 496
 497                found = list_entry(table->stripe_cache.prev,
 498                                  struct btrfs_raid_bio,
 499                                  stripe_cache);
 500
 501                if (found != rbio)
 502                        __remove_rbio_from_cache(found);
 503        }
 504
 505        spin_unlock_irqrestore(&table->cache_lock, flags);
 506}
 507
 508/*
 509 * helper function to run the xor_blocks api.  It is only
 510 * able to do MAX_XOR_BLOCKS at a time, so we need to
 511 * loop through.
 512 */
 513static void run_xor(void **pages, int src_cnt, ssize_t len)
 514{
 515        int src_off = 0;
 516        int xor_src_cnt = 0;
 517        void *dest = pages[src_cnt];
 518
 519        while(src_cnt > 0) {
 520                xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
 521                xor_blocks(xor_src_cnt, len, dest, pages + src_off);
 522
 523                src_cnt -= xor_src_cnt;
 524                src_off += xor_src_cnt;
 525        }
 526}
 527
 528/*
 529 * returns true if the bio list inside this rbio
 530 * covers an entire stripe (no rmw required).
 531 * Must be called with the bio list lock held, or
 532 * at a time when you know it is impossible to add
 533 * new bios into the list
 534 */
 535static int __rbio_is_full(struct btrfs_raid_bio *rbio)
 536{
 537        unsigned long size = rbio->bio_list_bytes;
 538        int ret = 1;
 539
 540        if (size != rbio->nr_data * rbio->stripe_len)
 541                ret = 0;
 542
 543        BUG_ON(size > rbio->nr_data * rbio->stripe_len);
 544        return ret;
 545}
 546
 547static int rbio_is_full(struct btrfs_raid_bio *rbio)
 548{
 549        unsigned long flags;
 550        int ret;
 551
 552        spin_lock_irqsave(&rbio->bio_list_lock, flags);
 553        ret = __rbio_is_full(rbio);
 554        spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
 555        return ret;
 556}
 557
 558/*
 559 * returns 1 if it is safe to merge two rbios together.
 560 * The merging is safe if the two rbios correspond to
 561 * the same stripe and if they are both going in the same
 562 * direction (read vs write), and if neither one is
 563 * locked for final IO
 564 *
 565 * The caller is responsible for locking such that
 566 * rmw_locked is safe to test
 567 */
 568static int rbio_can_merge(struct btrfs_raid_bio *last,
 569                          struct btrfs_raid_bio *cur)
 570{
 571        if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
 572            test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
 573                return 0;
 574
 575        /*
 576         * we can't merge with cached rbios, since the
 577         * idea is that when we merge the destination
 578         * rbio is going to run our IO for us.  We can
 579         * steal from cached rbios though, other functions
 580         * handle that.
 581         */
 582        if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
 583            test_bit(RBIO_CACHE_BIT, &cur->flags))
 584                return 0;
 585
 586        if (last->bbio->raid_map[0] !=
 587            cur->bbio->raid_map[0])
 588                return 0;
 589
 590        /* we can't merge with different operations */
 591        if (last->operation != cur->operation)
 592                return 0;
 593        /*
 594         * We've need read the full stripe from the drive.
 595         * check and repair the parity and write the new results.
 596         *
 597         * We're not allowed to add any new bios to the
 598         * bio list here, anyone else that wants to
 599         * change this stripe needs to do their own rmw.
 600         */
 601        if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
 602            cur->operation == BTRFS_RBIO_PARITY_SCRUB)
 603                return 0;
 604
 605        if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
 606            cur->operation == BTRFS_RBIO_REBUILD_MISSING)
 607                return 0;
 608
 609        return 1;
 610}
 611
 612static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
 613                                  int index)
 614{
 615        return stripe * rbio->stripe_npages + index;
 616}
 617
 618/*
 619 * these are just the pages from the rbio array, not from anything
 620 * the FS sent down to us
 621 */
 622static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
 623                                     int index)
 624{
 625        return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
 626}
 627
 628/*
 629 * helper to index into the pstripe
 630 */
 631static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
 632{
 633        return rbio_stripe_page(rbio, rbio->nr_data, index);
 634}
 635
 636/*
 637 * helper to index into the qstripe, returns null
 638 * if there is no qstripe
 639 */
 640static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
 641{
 642        if (rbio->nr_data + 1 == rbio->real_stripes)
 643                return NULL;
 644        return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
 645}
 646
 647/*
 648 * The first stripe in the table for a logical address
 649 * has the lock.  rbios are added in one of three ways:
 650 *
 651 * 1) Nobody has the stripe locked yet.  The rbio is given
 652 * the lock and 0 is returned.  The caller must start the IO
 653 * themselves.
 654 *
 655 * 2) Someone has the stripe locked, but we're able to merge
 656 * with the lock owner.  The rbio is freed and the IO will
 657 * start automatically along with the existing rbio.  1 is returned.
 658 *
 659 * 3) Someone has the stripe locked, but we're not able to merge.
 660 * The rbio is added to the lock owner's plug list, or merged into
 661 * an rbio already on the plug list.  When the lock owner unlocks,
 662 * the next rbio on the list is run and the IO is started automatically.
 663 * 1 is returned
 664 *
 665 * If we return 0, the caller still owns the rbio and must continue with
 666 * IO submission.  If we return 1, the caller must assume the rbio has
 667 * already been freed.
 668 */
 669static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
 670{
 671        int bucket = rbio_bucket(rbio);
 672        struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
 673        struct btrfs_raid_bio *cur;
 674        struct btrfs_raid_bio *pending;
 675        unsigned long flags;
 676        DEFINE_WAIT(wait);
 677        struct btrfs_raid_bio *freeit = NULL;
 678        struct btrfs_raid_bio *cache_drop = NULL;
 679        int ret = 0;
 680        int walk = 0;
 681
 682        spin_lock_irqsave(&h->lock, flags);
 683        list_for_each_entry(cur, &h->hash_list, hash_list) {
 684                walk++;
 685                if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
 686                        spin_lock(&cur->bio_list_lock);
 687
 688                        /* can we steal this cached rbio's pages? */
 689                        if (bio_list_empty(&cur->bio_list) &&
 690                            list_empty(&cur->plug_list) &&
 691                            test_bit(RBIO_CACHE_BIT, &cur->flags) &&
 692                            !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
 693                                list_del_init(&cur->hash_list);
 694                                atomic_dec(&cur->refs);
 695
 696                                steal_rbio(cur, rbio);
 697                                cache_drop = cur;
 698                                spin_unlock(&cur->bio_list_lock);
 699
 700                                goto lockit;
 701                        }
 702
 703                        /* can we merge into the lock owner? */
 704                        if (rbio_can_merge(cur, rbio)) {
 705                                merge_rbio(cur, rbio);
 706                                spin_unlock(&cur->bio_list_lock);
 707                                freeit = rbio;
 708                                ret = 1;
 709                                goto out;
 710                        }
 711
 712
 713                        /*
 714                         * we couldn't merge with the running
 715                         * rbio, see if we can merge with the
 716                         * pending ones.  We don't have to
 717                         * check for rmw_locked because there
 718                         * is no way they are inside finish_rmw
 719                         * right now
 720                         */
 721                        list_for_each_entry(pending, &cur->plug_list,
 722                                            plug_list) {
 723                                if (rbio_can_merge(pending, rbio)) {
 724                                        merge_rbio(pending, rbio);
 725                                        spin_unlock(&cur->bio_list_lock);
 726                                        freeit = rbio;
 727                                        ret = 1;
 728                                        goto out;
 729                                }
 730                        }
 731
 732                        /* no merging, put us on the tail of the plug list,
 733                         * our rbio will be started with the currently
 734                         * running rbio unlocks
 735                         */
 736                        list_add_tail(&rbio->plug_list, &cur->plug_list);
 737                        spin_unlock(&cur->bio_list_lock);
 738                        ret = 1;
 739                        goto out;
 740                }
 741        }
 742lockit:
 743        atomic_inc(&rbio->refs);
 744        list_add(&rbio->hash_list, &h->hash_list);
 745out:
 746        spin_unlock_irqrestore(&h->lock, flags);
 747        if (cache_drop)
 748                remove_rbio_from_cache(cache_drop);
 749        if (freeit)
 750                __free_raid_bio(freeit);
 751        return ret;
 752}
 753
 754/*
 755 * called as rmw or parity rebuild is completed.  If the plug list has more
 756 * rbios waiting for this stripe, the next one on the list will be started
 757 */
 758static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
 759{
 760        int bucket;
 761        struct btrfs_stripe_hash *h;
 762        unsigned long flags;
 763        int keep_cache = 0;
 764
 765        bucket = rbio_bucket(rbio);
 766        h = rbio->fs_info->stripe_hash_table->table + bucket;
 767
 768        if (list_empty(&rbio->plug_list))
 769                cache_rbio(rbio);
 770
 771        spin_lock_irqsave(&h->lock, flags);
 772        spin_lock(&rbio->bio_list_lock);
 773
 774        if (!list_empty(&rbio->hash_list)) {
 775                /*
 776                 * if we're still cached and there is no other IO
 777                 * to perform, just leave this rbio here for others
 778                 * to steal from later
 779                 */
 780                if (list_empty(&rbio->plug_list) &&
 781                    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 782                        keep_cache = 1;
 783                        clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
 784                        BUG_ON(!bio_list_empty(&rbio->bio_list));
 785                        goto done;
 786                }
 787
 788                list_del_init(&rbio->hash_list);
 789                atomic_dec(&rbio->refs);
 790
 791                /*
 792                 * we use the plug list to hold all the rbios
 793                 * waiting for the chance to lock this stripe.
 794                 * hand the lock over to one of them.
 795                 */
 796                if (!list_empty(&rbio->plug_list)) {
 797                        struct btrfs_raid_bio *next;
 798                        struct list_head *head = rbio->plug_list.next;
 799
 800                        next = list_entry(head, struct btrfs_raid_bio,
 801                                          plug_list);
 802
 803                        list_del_init(&rbio->plug_list);
 804
 805                        list_add(&next->hash_list, &h->hash_list);
 806                        atomic_inc(&next->refs);
 807                        spin_unlock(&rbio->bio_list_lock);
 808                        spin_unlock_irqrestore(&h->lock, flags);
 809
 810                        if (next->operation == BTRFS_RBIO_READ_REBUILD)
 811                                async_read_rebuild(next);
 812                        else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
 813                                steal_rbio(rbio, next);
 814                                async_read_rebuild(next);
 815                        } else if (next->operation == BTRFS_RBIO_WRITE) {
 816                                steal_rbio(rbio, next);
 817                                async_rmw_stripe(next);
 818                        } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
 819                                steal_rbio(rbio, next);
 820                                async_scrub_parity(next);
 821                        }
 822
 823                        goto done_nolock;
 824                        /*
 825                         * The barrier for this waitqueue_active is not needed,
 826                         * we're protected by h->lock and can't miss a wakeup.
 827                         */
 828                } else if (waitqueue_active(&h->wait)) {
 829                        spin_unlock(&rbio->bio_list_lock);
 830                        spin_unlock_irqrestore(&h->lock, flags);
 831                        wake_up(&h->wait);
 832                        goto done_nolock;
 833                }
 834        }
 835done:
 836        spin_unlock(&rbio->bio_list_lock);
 837        spin_unlock_irqrestore(&h->lock, flags);
 838
 839done_nolock:
 840        if (!keep_cache)
 841                remove_rbio_from_cache(rbio);
 842}
 843
 844static void __free_raid_bio(struct btrfs_raid_bio *rbio)
 845{
 846        int i;
 847
 848        WARN_ON(atomic_read(&rbio->refs) < 0);
 849        if (!atomic_dec_and_test(&rbio->refs))
 850                return;
 851
 852        WARN_ON(!list_empty(&rbio->stripe_cache));
 853        WARN_ON(!list_empty(&rbio->hash_list));
 854        WARN_ON(!bio_list_empty(&rbio->bio_list));
 855
 856        for (i = 0; i < rbio->nr_pages; i++) {
 857                if (rbio->stripe_pages[i]) {
 858                        __free_page(rbio->stripe_pages[i]);
 859                        rbio->stripe_pages[i] = NULL;
 860                }
 861        }
 862
 863        btrfs_put_bbio(rbio->bbio);
 864        kfree(rbio);
 865}
 866
 867static void free_raid_bio(struct btrfs_raid_bio *rbio)
 868{
 869        unlock_stripe(rbio);
 870        __free_raid_bio(rbio);
 871}
 872
 873/*
 874 * this frees the rbio and runs through all the bios in the
 875 * bio_list and calls end_io on them
 876 */
 877static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
 878{
 879        struct bio *cur = bio_list_get(&rbio->bio_list);
 880        struct bio *next;
 881
 882        if (rbio->generic_bio_cnt)
 883                btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
 884
 885        free_raid_bio(rbio);
 886
 887        while (cur) {
 888                next = cur->bi_next;
 889                cur->bi_next = NULL;
 890                cur->bi_error = err;
 891                bio_endio(cur);
 892                cur = next;
 893        }
 894}
 895
 896/*
 897 * end io function used by finish_rmw.  When we finally
 898 * get here, we've written a full stripe
 899 */
 900static void raid_write_end_io(struct bio *bio)
 901{
 902        struct btrfs_raid_bio *rbio = bio->bi_private;
 903        int err = bio->bi_error;
 904        int max_errors;
 905
 906        if (err)
 907                fail_bio_stripe(rbio, bio);
 908
 909        bio_put(bio);
 910
 911        if (!atomic_dec_and_test(&rbio->stripes_pending))
 912                return;
 913
 914        err = 0;
 915
 916        /* OK, we have read all the stripes we need to. */
 917        max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
 918                     0 : rbio->bbio->max_errors;
 919        if (atomic_read(&rbio->error) > max_errors)
 920                err = -EIO;
 921
 922        rbio_orig_end_io(rbio, err);
 923}
 924
 925/*
 926 * the read/modify/write code wants to use the original bio for
 927 * any pages it included, and then use the rbio for everything
 928 * else.  This function decides if a given index (stripe number)
 929 * and page number in that stripe fall inside the original bio
 930 * or the rbio.
 931 *
 932 * if you set bio_list_only, you'll get a NULL back for any ranges
 933 * that are outside the bio_list
 934 *
 935 * This doesn't take any refs on anything, you get a bare page pointer
 936 * and the caller must bump refs as required.
 937 *
 938 * You must call index_rbio_pages once before you can trust
 939 * the answers from this function.
 940 */
 941static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
 942                                 int index, int pagenr, int bio_list_only)
 943{
 944        int chunk_page;
 945        struct page *p = NULL;
 946
 947        chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
 948
 949        spin_lock_irq(&rbio->bio_list_lock);
 950        p = rbio->bio_pages[chunk_page];
 951        spin_unlock_irq(&rbio->bio_list_lock);
 952
 953        if (p || bio_list_only)
 954                return p;
 955
 956        return rbio->stripe_pages[chunk_page];
 957}
 958
 959/*
 960 * number of pages we need for the entire stripe across all the
 961 * drives
 962 */
 963static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
 964{
 965        return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
 966}
 967
 968/*
 969 * allocation and initial setup for the btrfs_raid_bio.  Not
 970 * this does not allocate any pages for rbio->pages.
 971 */
 972static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
 973                          struct btrfs_bio *bbio, u64 stripe_len)
 974{
 975        struct btrfs_raid_bio *rbio;
 976        int nr_data = 0;
 977        int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
 978        int num_pages = rbio_nr_pages(stripe_len, real_stripes);
 979        int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
 980        void *p;
 981
 982        rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
 983                       DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
 984                       sizeof(long), GFP_NOFS);
 985        if (!rbio)
 986                return ERR_PTR(-ENOMEM);
 987
 988        bio_list_init(&rbio->bio_list);
 989        INIT_LIST_HEAD(&rbio->plug_list);
 990        spin_lock_init(&rbio->bio_list_lock);
 991        INIT_LIST_HEAD(&rbio->stripe_cache);
 992        INIT_LIST_HEAD(&rbio->hash_list);
 993        rbio->bbio = bbio;
 994        rbio->fs_info = root->fs_info;
 995        rbio->stripe_len = stripe_len;
 996        rbio->nr_pages = num_pages;
 997        rbio->real_stripes = real_stripes;
 998        rbio->stripe_npages = stripe_npages;
 999        rbio->faila = -1;
1000        rbio->failb = -1;
1001        atomic_set(&rbio->refs, 1);
1002        atomic_set(&rbio->error, 0);
1003        atomic_set(&rbio->stripes_pending, 0);
1004
1005        /*
1006         * the stripe_pages and bio_pages array point to the extra
1007         * memory we allocated past the end of the rbio
1008         */
1009        p = rbio + 1;
1010        rbio->stripe_pages = p;
1011        rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1012        rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
1013
1014        if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1015                nr_data = real_stripes - 1;
1016        else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1017                nr_data = real_stripes - 2;
1018        else
1019                BUG();
1020
1021        rbio->nr_data = nr_data;
1022        return rbio;
1023}
1024
1025/* allocate pages for all the stripes in the bio, including parity */
1026static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1027{
1028        int i;
1029        struct page *page;
1030
1031        for (i = 0; i < rbio->nr_pages; i++) {
1032                if (rbio->stripe_pages[i])
1033                        continue;
1034                page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1035                if (!page)
1036                        return -ENOMEM;
1037                rbio->stripe_pages[i] = page;
1038        }
1039        return 0;
1040}
1041
1042/* only allocate pages for p/q stripes */
1043static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1044{
1045        int i;
1046        struct page *page;
1047
1048        i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1049
1050        for (; i < rbio->nr_pages; i++) {
1051                if (rbio->stripe_pages[i])
1052                        continue;
1053                page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1054                if (!page)
1055                        return -ENOMEM;
1056                rbio->stripe_pages[i] = page;
1057        }
1058        return 0;
1059}
1060
1061/*
1062 * add a single page from a specific stripe into our list of bios for IO
1063 * this will try to merge into existing bios if possible, and returns
1064 * zero if all went well.
1065 */
1066static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1067                            struct bio_list *bio_list,
1068                            struct page *page,
1069                            int stripe_nr,
1070                            unsigned long page_index,
1071                            unsigned long bio_max_len)
1072{
1073        struct bio *last = bio_list->tail;
1074        u64 last_end = 0;
1075        int ret;
1076        struct bio *bio;
1077        struct btrfs_bio_stripe *stripe;
1078        u64 disk_start;
1079
1080        stripe = &rbio->bbio->stripes[stripe_nr];
1081        disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1082
1083        /* if the device is missing, just fail this stripe */
1084        if (!stripe->dev->bdev)
1085                return fail_rbio_index(rbio, stripe_nr);
1086
1087        /* see if we can add this page onto our existing bio */
1088        if (last) {
1089                last_end = (u64)last->bi_iter.bi_sector << 9;
1090                last_end += last->bi_iter.bi_size;
1091
1092                /*
1093                 * we can't merge these if they are from different
1094                 * devices or if they are not contiguous
1095                 */
1096                if (last_end == disk_start && stripe->dev->bdev &&
1097                    !last->bi_error &&
1098                    last->bi_bdev == stripe->dev->bdev) {
1099                        ret = bio_add_page(last, page, PAGE_SIZE, 0);
1100                        if (ret == PAGE_SIZE)
1101                                return 0;
1102                }
1103        }
1104
1105        /* put a new bio on the list */
1106        bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
1107        if (!bio)
1108                return -ENOMEM;
1109
1110        bio->bi_iter.bi_size = 0;
1111        bio->bi_bdev = stripe->dev->bdev;
1112        bio->bi_iter.bi_sector = disk_start >> 9;
1113
1114        bio_add_page(bio, page, PAGE_SIZE, 0);
1115        bio_list_add(bio_list, bio);
1116        return 0;
1117}
1118
1119/*
1120 * while we're doing the read/modify/write cycle, we could
1121 * have errors in reading pages off the disk.  This checks
1122 * for errors and if we're not able to read the page it'll
1123 * trigger parity reconstruction.  The rmw will be finished
1124 * after we've reconstructed the failed stripes
1125 */
1126static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1127{
1128        if (rbio->faila >= 0 || rbio->failb >= 0) {
1129                BUG_ON(rbio->faila == rbio->real_stripes - 1);
1130                __raid56_parity_recover(rbio);
1131        } else {
1132                finish_rmw(rbio);
1133        }
1134}
1135
1136/*
1137 * helper function to walk our bio list and populate the bio_pages array with
1138 * the result.  This seems expensive, but it is faster than constantly
1139 * searching through the bio list as we setup the IO in finish_rmw or stripe
1140 * reconstruction.
1141 *
1142 * This must be called before you trust the answers from page_in_rbio
1143 */
1144static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1145{
1146        struct bio *bio;
1147        u64 start;
1148        unsigned long stripe_offset;
1149        unsigned long page_index;
1150        struct page *p;
1151        int i;
1152
1153        spin_lock_irq(&rbio->bio_list_lock);
1154        bio_list_for_each(bio, &rbio->bio_list) {
1155                start = (u64)bio->bi_iter.bi_sector << 9;
1156                stripe_offset = start - rbio->bbio->raid_map[0];
1157                page_index = stripe_offset >> PAGE_SHIFT;
1158
1159                for (i = 0; i < bio->bi_vcnt; i++) {
1160                        p = bio->bi_io_vec[i].bv_page;
1161                        rbio->bio_pages[page_index + i] = p;
1162                }
1163        }
1164        spin_unlock_irq(&rbio->bio_list_lock);
1165}
1166
1167/*
1168 * this is called from one of two situations.  We either
1169 * have a full stripe from the higher layers, or we've read all
1170 * the missing bits off disk.
1171 *
1172 * This will calculate the parity and then send down any
1173 * changed blocks.
1174 */
1175static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1176{
1177        struct btrfs_bio *bbio = rbio->bbio;
1178        void *pointers[rbio->real_stripes];
1179        int nr_data = rbio->nr_data;
1180        int stripe;
1181        int pagenr;
1182        int p_stripe = -1;
1183        int q_stripe = -1;
1184        struct bio_list bio_list;
1185        struct bio *bio;
1186        int ret;
1187
1188        bio_list_init(&bio_list);
1189
1190        if (rbio->real_stripes - rbio->nr_data == 1) {
1191                p_stripe = rbio->real_stripes - 1;
1192        } else if (rbio->real_stripes - rbio->nr_data == 2) {
1193                p_stripe = rbio->real_stripes - 2;
1194                q_stripe = rbio->real_stripes - 1;
1195        } else {
1196                BUG();
1197        }
1198
1199        /* at this point we either have a full stripe,
1200         * or we've read the full stripe from the drive.
1201         * recalculate the parity and write the new results.
1202         *
1203         * We're not allowed to add any new bios to the
1204         * bio list here, anyone else that wants to
1205         * change this stripe needs to do their own rmw.
1206         */
1207        spin_lock_irq(&rbio->bio_list_lock);
1208        set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1209        spin_unlock_irq(&rbio->bio_list_lock);
1210
1211        atomic_set(&rbio->error, 0);
1212
1213        /*
1214         * now that we've set rmw_locked, run through the
1215         * bio list one last time and map the page pointers
1216         *
1217         * We don't cache full rbios because we're assuming
1218         * the higher layers are unlikely to use this area of
1219         * the disk again soon.  If they do use it again,
1220         * hopefully they will send another full bio.
1221         */
1222        index_rbio_pages(rbio);
1223        if (!rbio_is_full(rbio))
1224                cache_rbio_pages(rbio);
1225        else
1226                clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1227
1228        for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1229                struct page *p;
1230                /* first collect one page from each data stripe */
1231                for (stripe = 0; stripe < nr_data; stripe++) {
1232                        p = page_in_rbio(rbio, stripe, pagenr, 0);
1233                        pointers[stripe] = kmap(p);
1234                }
1235
1236                /* then add the parity stripe */
1237                p = rbio_pstripe_page(rbio, pagenr);
1238                SetPageUptodate(p);
1239                pointers[stripe++] = kmap(p);
1240
1241                if (q_stripe != -1) {
1242
1243                        /*
1244                         * raid6, add the qstripe and call the
1245                         * library function to fill in our p/q
1246                         */
1247                        p = rbio_qstripe_page(rbio, pagenr);
1248                        SetPageUptodate(p);
1249                        pointers[stripe++] = kmap(p);
1250
1251                        raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1252                                                pointers);
1253                } else {
1254                        /* raid5 */
1255                        memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1256                        run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1257                }
1258
1259
1260                for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1261                        kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1262        }
1263
1264        /*
1265         * time to start writing.  Make bios for everything from the
1266         * higher layers (the bio_list in our rbio) and our p/q.  Ignore
1267         * everything else.
1268         */
1269        for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1270                for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1271                        struct page *page;
1272                        if (stripe < rbio->nr_data) {
1273                                page = page_in_rbio(rbio, stripe, pagenr, 1);
1274                                if (!page)
1275                                        continue;
1276                        } else {
1277                               page = rbio_stripe_page(rbio, stripe, pagenr);
1278                        }
1279
1280                        ret = rbio_add_io_page(rbio, &bio_list,
1281                                       page, stripe, pagenr, rbio->stripe_len);
1282                        if (ret)
1283                                goto cleanup;
1284                }
1285        }
1286
1287        if (likely(!bbio->num_tgtdevs))
1288                goto write_data;
1289
1290        for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1291                if (!bbio->tgtdev_map[stripe])
1292                        continue;
1293
1294                for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1295                        struct page *page;
1296                        if (stripe < rbio->nr_data) {
1297                                page = page_in_rbio(rbio, stripe, pagenr, 1);
1298                                if (!page)
1299                                        continue;
1300                        } else {
1301                               page = rbio_stripe_page(rbio, stripe, pagenr);
1302                        }
1303
1304                        ret = rbio_add_io_page(rbio, &bio_list, page,
1305                                               rbio->bbio->tgtdev_map[stripe],
1306                                               pagenr, rbio->stripe_len);
1307                        if (ret)
1308                                goto cleanup;
1309                }
1310        }
1311
1312write_data:
1313        atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1314        BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1315
1316        while (1) {
1317                bio = bio_list_pop(&bio_list);
1318                if (!bio)
1319                        break;
1320
1321                bio->bi_private = rbio;
1322                bio->bi_end_io = raid_write_end_io;
1323                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1324
1325                submit_bio(bio);
1326        }
1327        return;
1328
1329cleanup:
1330        rbio_orig_end_io(rbio, -EIO);
1331}
1332
1333/*
1334 * helper to find the stripe number for a given bio.  Used to figure out which
1335 * stripe has failed.  This expects the bio to correspond to a physical disk,
1336 * so it looks up based on physical sector numbers.
1337 */
1338static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1339                           struct bio *bio)
1340{
1341        u64 physical = bio->bi_iter.bi_sector;
1342        u64 stripe_start;
1343        int i;
1344        struct btrfs_bio_stripe *stripe;
1345
1346        physical <<= 9;
1347
1348        for (i = 0; i < rbio->bbio->num_stripes; i++) {
1349                stripe = &rbio->bbio->stripes[i];
1350                stripe_start = stripe->physical;
1351                if (physical >= stripe_start &&
1352                    physical < stripe_start + rbio->stripe_len &&
1353                    bio->bi_bdev == stripe->dev->bdev) {
1354                        return i;
1355                }
1356        }
1357        return -1;
1358}
1359
1360/*
1361 * helper to find the stripe number for a given
1362 * bio (before mapping).  Used to figure out which stripe has
1363 * failed.  This looks up based on logical block numbers.
1364 */
1365static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1366                                   struct bio *bio)
1367{
1368        u64 logical = bio->bi_iter.bi_sector;
1369        u64 stripe_start;
1370        int i;
1371
1372        logical <<= 9;
1373
1374        for (i = 0; i < rbio->nr_data; i++) {
1375                stripe_start = rbio->bbio->raid_map[i];
1376                if (logical >= stripe_start &&
1377                    logical < stripe_start + rbio->stripe_len) {
1378                        return i;
1379                }
1380        }
1381        return -1;
1382}
1383
1384/*
1385 * returns -EIO if we had too many failures
1386 */
1387static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1388{
1389        unsigned long flags;
1390        int ret = 0;
1391
1392        spin_lock_irqsave(&rbio->bio_list_lock, flags);
1393
1394        /* we already know this stripe is bad, move on */
1395        if (rbio->faila == failed || rbio->failb == failed)
1396                goto out;
1397
1398        if (rbio->faila == -1) {
1399                /* first failure on this rbio */
1400                rbio->faila = failed;
1401                atomic_inc(&rbio->error);
1402        } else if (rbio->failb == -1) {
1403                /* second failure on this rbio */
1404                rbio->failb = failed;
1405                atomic_inc(&rbio->error);
1406        } else {
1407                ret = -EIO;
1408        }
1409out:
1410        spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1411
1412        return ret;
1413}
1414
1415/*
1416 * helper to fail a stripe based on a physical disk
1417 * bio.
1418 */
1419static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1420                           struct bio *bio)
1421{
1422        int failed = find_bio_stripe(rbio, bio);
1423
1424        if (failed < 0)
1425                return -EIO;
1426
1427        return fail_rbio_index(rbio, failed);
1428}
1429
1430/*
1431 * this sets each page in the bio uptodate.  It should only be used on private
1432 * rbio pages, nothing that comes in from the higher layers
1433 */
1434static void set_bio_pages_uptodate(struct bio *bio)
1435{
1436        int i;
1437        struct page *p;
1438
1439        for (i = 0; i < bio->bi_vcnt; i++) {
1440                p = bio->bi_io_vec[i].bv_page;
1441                SetPageUptodate(p);
1442        }
1443}
1444
1445/*
1446 * end io for the read phase of the rmw cycle.  All the bios here are physical
1447 * stripe bios we've read from the disk so we can recalculate the parity of the
1448 * stripe.
1449 *
1450 * This will usually kick off finish_rmw once all the bios are read in, but it
1451 * may trigger parity reconstruction if we had any errors along the way
1452 */
1453static void raid_rmw_end_io(struct bio *bio)
1454{
1455        struct btrfs_raid_bio *rbio = bio->bi_private;
1456
1457        if (bio->bi_error)
1458                fail_bio_stripe(rbio, bio);
1459        else
1460                set_bio_pages_uptodate(bio);
1461
1462        bio_put(bio);
1463
1464        if (!atomic_dec_and_test(&rbio->stripes_pending))
1465                return;
1466
1467        if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1468                goto cleanup;
1469
1470        /*
1471         * this will normally call finish_rmw to start our write
1472         * but if there are any failed stripes we'll reconstruct
1473         * from parity first
1474         */
1475        validate_rbio_for_rmw(rbio);
1476        return;
1477
1478cleanup:
1479
1480        rbio_orig_end_io(rbio, -EIO);
1481}
1482
1483static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1484{
1485        btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1486                        rmw_work, NULL, NULL);
1487
1488        btrfs_queue_work(rbio->fs_info->rmw_workers,
1489                         &rbio->work);
1490}
1491
1492static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1493{
1494        btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1495                        read_rebuild_work, NULL, NULL);
1496
1497        btrfs_queue_work(rbio->fs_info->rmw_workers,
1498                         &rbio->work);
1499}
1500
1501/*
1502 * the stripe must be locked by the caller.  It will
1503 * unlock after all the writes are done
1504 */
1505static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1506{
1507        int bios_to_read = 0;
1508        struct bio_list bio_list;
1509        int ret;
1510        int pagenr;
1511        int stripe;
1512        struct bio *bio;
1513
1514        bio_list_init(&bio_list);
1515
1516        ret = alloc_rbio_pages(rbio);
1517        if (ret)
1518                goto cleanup;
1519
1520        index_rbio_pages(rbio);
1521
1522        atomic_set(&rbio->error, 0);
1523        /*
1524         * build a list of bios to read all the missing parts of this
1525         * stripe
1526         */
1527        for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1528                for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1529                        struct page *page;
1530                        /*
1531                         * we want to find all the pages missing from
1532                         * the rbio and read them from the disk.  If
1533                         * page_in_rbio finds a page in the bio list
1534                         * we don't need to read it off the stripe.
1535                         */
1536                        page = page_in_rbio(rbio, stripe, pagenr, 1);
1537                        if (page)
1538                                continue;
1539
1540                        page = rbio_stripe_page(rbio, stripe, pagenr);
1541                        /*
1542                         * the bio cache may have handed us an uptodate
1543                         * page.  If so, be happy and use it
1544                         */
1545                        if (PageUptodate(page))
1546                                continue;
1547
1548                        ret = rbio_add_io_page(rbio, &bio_list, page,
1549                                       stripe, pagenr, rbio->stripe_len);
1550                        if (ret)
1551                                goto cleanup;
1552                }
1553        }
1554
1555        bios_to_read = bio_list_size(&bio_list);
1556        if (!bios_to_read) {
1557                /*
1558                 * this can happen if others have merged with
1559                 * us, it means there is nothing left to read.
1560                 * But if there are missing devices it may not be
1561                 * safe to do the full stripe write yet.
1562                 */
1563                goto finish;
1564        }
1565
1566        /*
1567         * the bbio may be freed once we submit the last bio.  Make sure
1568         * not to touch it after that
1569         */
1570        atomic_set(&rbio->stripes_pending, bios_to_read);
1571        while (1) {
1572                bio = bio_list_pop(&bio_list);
1573                if (!bio)
1574                        break;
1575
1576                bio->bi_private = rbio;
1577                bio->bi_end_io = raid_rmw_end_io;
1578                bio_set_op_attrs(bio, REQ_OP_READ, 0);
1579
1580                btrfs_bio_wq_end_io(rbio->fs_info, bio,
1581                                    BTRFS_WQ_ENDIO_RAID56);
1582
1583                submit_bio(bio);
1584        }
1585        /* the actual write will happen once the reads are done */
1586        return 0;
1587
1588cleanup:
1589        rbio_orig_end_io(rbio, -EIO);
1590        return -EIO;
1591
1592finish:
1593        validate_rbio_for_rmw(rbio);
1594        return 0;
1595}
1596
1597/*
1598 * if the upper layers pass in a full stripe, we thank them by only allocating
1599 * enough pages to hold the parity, and sending it all down quickly.
1600 */
1601static int full_stripe_write(struct btrfs_raid_bio *rbio)
1602{
1603        int ret;
1604
1605        ret = alloc_rbio_parity_pages(rbio);
1606        if (ret) {
1607                __free_raid_bio(rbio);
1608                return ret;
1609        }
1610
1611        ret = lock_stripe_add(rbio);
1612        if (ret == 0)
1613                finish_rmw(rbio);
1614        return 0;
1615}
1616
1617/*
1618 * partial stripe writes get handed over to async helpers.
1619 * We're really hoping to merge a few more writes into this
1620 * rbio before calculating new parity
1621 */
1622static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1623{
1624        int ret;
1625
1626        ret = lock_stripe_add(rbio);
1627        if (ret == 0)
1628                async_rmw_stripe(rbio);
1629        return 0;
1630}
1631
1632/*
1633 * sometimes while we were reading from the drive to
1634 * recalculate parity, enough new bios come into create
1635 * a full stripe.  So we do a check here to see if we can
1636 * go directly to finish_rmw
1637 */
1638static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1639{
1640        /* head off into rmw land if we don't have a full stripe */
1641        if (!rbio_is_full(rbio))
1642                return partial_stripe_write(rbio);
1643        return full_stripe_write(rbio);
1644}
1645
1646/*
1647 * We use plugging call backs to collect full stripes.
1648 * Any time we get a partial stripe write while plugged
1649 * we collect it into a list.  When the unplug comes down,
1650 * we sort the list by logical block number and merge
1651 * everything we can into the same rbios
1652 */
1653struct btrfs_plug_cb {
1654        struct blk_plug_cb cb;
1655        struct btrfs_fs_info *info;
1656        struct list_head rbio_list;
1657        struct btrfs_work work;
1658};
1659
1660/*
1661 * rbios on the plug list are sorted for easier merging.
1662 */
1663static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1664{
1665        struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1666                                                 plug_list);
1667        struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1668                                                 plug_list);
1669        u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1670        u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1671
1672        if (a_sector < b_sector)
1673                return -1;
1674        if (a_sector > b_sector)
1675                return 1;
1676        return 0;
1677}
1678
1679static void run_plug(struct btrfs_plug_cb *plug)
1680{
1681        struct btrfs_raid_bio *cur;
1682        struct btrfs_raid_bio *last = NULL;
1683
1684        /*
1685         * sort our plug list then try to merge
1686         * everything we can in hopes of creating full
1687         * stripes.
1688         */
1689        list_sort(NULL, &plug->rbio_list, plug_cmp);
1690        while (!list_empty(&plug->rbio_list)) {
1691                cur = list_entry(plug->rbio_list.next,
1692                                 struct btrfs_raid_bio, plug_list);
1693                list_del_init(&cur->plug_list);
1694
1695                if (rbio_is_full(cur)) {
1696                        /* we have a full stripe, send it down */
1697                        full_stripe_write(cur);
1698                        continue;
1699                }
1700                if (last) {
1701                        if (rbio_can_merge(last, cur)) {
1702                                merge_rbio(last, cur);
1703                                __free_raid_bio(cur);
1704                                continue;
1705
1706                        }
1707                        __raid56_parity_write(last);
1708                }
1709                last = cur;
1710        }
1711        if (last) {
1712                __raid56_parity_write(last);
1713        }
1714        kfree(plug);
1715}
1716
1717/*
1718 * if the unplug comes from schedule, we have to push the
1719 * work off to a helper thread
1720 */
1721static void unplug_work(struct btrfs_work *work)
1722{
1723        struct btrfs_plug_cb *plug;
1724        plug = container_of(work, struct btrfs_plug_cb, work);
1725        run_plug(plug);
1726}
1727
1728static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1729{
1730        struct btrfs_plug_cb *plug;
1731        plug = container_of(cb, struct btrfs_plug_cb, cb);
1732
1733        if (from_schedule) {
1734                btrfs_init_work(&plug->work, btrfs_rmw_helper,
1735                                unplug_work, NULL, NULL);
1736                btrfs_queue_work(plug->info->rmw_workers,
1737                                 &plug->work);
1738                return;
1739        }
1740        run_plug(plug);
1741}
1742
1743/*
1744 * our main entry point for writes from the rest of the FS.
1745 */
1746int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1747                        struct btrfs_bio *bbio, u64 stripe_len)
1748{
1749        struct btrfs_raid_bio *rbio;
1750        struct btrfs_plug_cb *plug = NULL;
1751        struct blk_plug_cb *cb;
1752        int ret;
1753
1754        rbio = alloc_rbio(root, bbio, stripe_len);
1755        if (IS_ERR(rbio)) {
1756                btrfs_put_bbio(bbio);
1757                return PTR_ERR(rbio);
1758        }
1759        bio_list_add(&rbio->bio_list, bio);
1760        rbio->bio_list_bytes = bio->bi_iter.bi_size;
1761        rbio->operation = BTRFS_RBIO_WRITE;
1762
1763        btrfs_bio_counter_inc_noblocked(root->fs_info);
1764        rbio->generic_bio_cnt = 1;
1765
1766        /*
1767         * don't plug on full rbios, just get them out the door
1768         * as quickly as we can
1769         */
1770        if (rbio_is_full(rbio)) {
1771                ret = full_stripe_write(rbio);
1772                if (ret)
1773                        btrfs_bio_counter_dec(root->fs_info);
1774                return ret;
1775        }
1776
1777        cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
1778                               sizeof(*plug));
1779        if (cb) {
1780                plug = container_of(cb, struct btrfs_plug_cb, cb);
1781                if (!plug->info) {
1782                        plug->info = root->fs_info;
1783                        INIT_LIST_HEAD(&plug->rbio_list);
1784                }
1785                list_add_tail(&rbio->plug_list, &plug->rbio_list);
1786                ret = 0;
1787        } else {
1788                ret = __raid56_parity_write(rbio);
1789                if (ret)
1790                        btrfs_bio_counter_dec(root->fs_info);
1791        }
1792        return ret;
1793}
1794
1795/*
1796 * all parity reconstruction happens here.  We've read in everything
1797 * we can find from the drives and this does the heavy lifting of
1798 * sorting the good from the bad.
1799 */
1800static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1801{
1802        int pagenr, stripe;
1803        void **pointers;
1804        int faila = -1, failb = -1;
1805        struct page *page;
1806        int err;
1807        int i;
1808
1809        pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1810        if (!pointers) {
1811                err = -ENOMEM;
1812                goto cleanup_io;
1813        }
1814
1815        faila = rbio->faila;
1816        failb = rbio->failb;
1817
1818        if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1819            rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1820                spin_lock_irq(&rbio->bio_list_lock);
1821                set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1822                spin_unlock_irq(&rbio->bio_list_lock);
1823        }
1824
1825        index_rbio_pages(rbio);
1826
1827        for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1828                /*
1829                 * Now we just use bitmap to mark the horizontal stripes in
1830                 * which we have data when doing parity scrub.
1831                 */
1832                if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1833                    !test_bit(pagenr, rbio->dbitmap))
1834                        continue;
1835
1836                /* setup our array of pointers with pages
1837                 * from each stripe
1838                 */
1839                for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1840                        /*
1841                         * if we're rebuilding a read, we have to use
1842                         * pages from the bio list
1843                         */
1844                        if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1845                             rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1846                            (stripe == faila || stripe == failb)) {
1847                                page = page_in_rbio(rbio, stripe, pagenr, 0);
1848                        } else {
1849                                page = rbio_stripe_page(rbio, stripe, pagenr);
1850                        }
1851                        pointers[stripe] = kmap(page);
1852                }
1853
1854                /* all raid6 handling here */
1855                if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1856                        /*
1857                         * single failure, rebuild from parity raid5
1858                         * style
1859                         */
1860                        if (failb < 0) {
1861                                if (faila == rbio->nr_data) {
1862                                        /*
1863                                         * Just the P stripe has failed, without
1864                                         * a bad data or Q stripe.
1865                                         * TODO, we should redo the xor here.
1866                                         */
1867                                        err = -EIO;
1868                                        goto cleanup;
1869                                }
1870                                /*
1871                                 * a single failure in raid6 is rebuilt
1872                                 * in the pstripe code below
1873                                 */
1874                                goto pstripe;
1875                        }
1876
1877                        /* make sure our ps and qs are in order */
1878                        if (faila > failb) {
1879                                int tmp = failb;
1880                                failb = faila;
1881                                faila = tmp;
1882                        }
1883
1884                        /* if the q stripe is failed, do a pstripe reconstruction
1885                         * from the xors.
1886                         * If both the q stripe and the P stripe are failed, we're
1887                         * here due to a crc mismatch and we can't give them the
1888                         * data they want
1889                         */
1890                        if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1891                                if (rbio->bbio->raid_map[faila] ==
1892                                    RAID5_P_STRIPE) {
1893                                        err = -EIO;
1894                                        goto cleanup;
1895                                }
1896                                /*
1897                                 * otherwise we have one bad data stripe and
1898                                 * a good P stripe.  raid5!
1899                                 */
1900                                goto pstripe;
1901                        }
1902
1903                        if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1904                                raid6_datap_recov(rbio->real_stripes,
1905                                                  PAGE_SIZE, faila, pointers);
1906                        } else {
1907                                raid6_2data_recov(rbio->real_stripes,
1908                                                  PAGE_SIZE, faila, failb,
1909                                                  pointers);
1910                        }
1911                } else {
1912                        void *p;
1913
1914                        /* rebuild from P stripe here (raid5 or raid6) */
1915                        BUG_ON(failb != -1);
1916pstripe:
1917                        /* Copy parity block into failed block to start with */
1918                        memcpy(pointers[faila],
1919                               pointers[rbio->nr_data],
1920                               PAGE_SIZE);
1921
1922                        /* rearrange the pointer array */
1923                        p = pointers[faila];
1924                        for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1925                                pointers[stripe] = pointers[stripe + 1];
1926                        pointers[rbio->nr_data - 1] = p;
1927
1928                        /* xor in the rest */
1929                        run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1930                }
1931                /* if we're doing this rebuild as part of an rmw, go through
1932                 * and set all of our private rbio pages in the
1933                 * failed stripes as uptodate.  This way finish_rmw will
1934                 * know they can be trusted.  If this was a read reconstruction,
1935                 * other endio functions will fiddle the uptodate bits
1936                 */
1937                if (rbio->operation == BTRFS_RBIO_WRITE) {
1938                        for (i = 0;  i < rbio->stripe_npages; i++) {
1939                                if (faila != -1) {
1940                                        page = rbio_stripe_page(rbio, faila, i);
1941                                        SetPageUptodate(page);
1942                                }
1943                                if (failb != -1) {
1944                                        page = rbio_stripe_page(rbio, failb, i);
1945                                        SetPageUptodate(page);
1946                                }
1947                        }
1948                }
1949                for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1950                        /*
1951                         * if we're rebuilding a read, we have to use
1952                         * pages from the bio list
1953                         */
1954                        if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1955                             rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1956                            (stripe == faila || stripe == failb)) {
1957                                page = page_in_rbio(rbio, stripe, pagenr, 0);
1958                        } else {
1959                                page = rbio_stripe_page(rbio, stripe, pagenr);
1960                        }
1961                        kunmap(page);
1962                }
1963        }
1964
1965        err = 0;
1966cleanup:
1967        kfree(pointers);
1968
1969cleanup_io:
1970        if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1971                if (err == 0)
1972                        cache_rbio_pages(rbio);
1973                else
1974                        clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1975
1976                rbio_orig_end_io(rbio, err);
1977        } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1978                rbio_orig_end_io(rbio, err);
1979        } else if (err == 0) {
1980                rbio->faila = -1;
1981                rbio->failb = -1;
1982
1983                if (rbio->operation == BTRFS_RBIO_WRITE)
1984                        finish_rmw(rbio);
1985                else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1986                        finish_parity_scrub(rbio, 0);
1987                else
1988                        BUG();
1989        } else {
1990                rbio_orig_end_io(rbio, err);
1991        }
1992}
1993
1994/*
1995 * This is called only for stripes we've read from disk to
1996 * reconstruct the parity.
1997 */
1998static void raid_recover_end_io(struct bio *bio)
1999{
2000        struct btrfs_raid_bio *rbio = bio->bi_private;
2001
2002        /*
2003         * we only read stripe pages off the disk, set them
2004         * up to date if there were no errors
2005         */
2006        if (bio->bi_error)
2007                fail_bio_stripe(rbio, bio);
2008        else
2009                set_bio_pages_uptodate(bio);
2010        bio_put(bio);
2011
2012        if (!atomic_dec_and_test(&rbio->stripes_pending))
2013                return;
2014
2015        if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2016                rbio_orig_end_io(rbio, -EIO);
2017        else
2018                __raid_recover_end_io(rbio);
2019}
2020
2021/*
2022 * reads everything we need off the disk to reconstruct
2023 * the parity. endio handlers trigger final reconstruction
2024 * when the IO is done.
2025 *
2026 * This is used both for reads from the higher layers and for
2027 * parity construction required to finish a rmw cycle.
2028 */
2029static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2030{
2031        int bios_to_read = 0;
2032        struct bio_list bio_list;
2033        int ret;
2034        int pagenr;
2035        int stripe;
2036        struct bio *bio;
2037
2038        bio_list_init(&bio_list);
2039
2040        ret = alloc_rbio_pages(rbio);
2041        if (ret)
2042                goto cleanup;
2043
2044        atomic_set(&rbio->error, 0);
2045
2046        /*
2047         * read everything that hasn't failed.  Thanks to the
2048         * stripe cache, it is possible that some or all of these
2049         * pages are going to be uptodate.
2050         */
2051        for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2052                if (rbio->faila == stripe || rbio->failb == stripe) {
2053                        atomic_inc(&rbio->error);
2054                        continue;
2055                }
2056
2057                for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2058                        struct page *p;
2059
2060                        /*
2061                         * the rmw code may have already read this
2062                         * page in
2063                         */
2064                        p = rbio_stripe_page(rbio, stripe, pagenr);
2065                        if (PageUptodate(p))
2066                                continue;
2067
2068                        ret = rbio_add_io_page(rbio, &bio_list,
2069                                       rbio_stripe_page(rbio, stripe, pagenr),
2070                                       stripe, pagenr, rbio->stripe_len);
2071                        if (ret < 0)
2072                                goto cleanup;
2073                }
2074        }
2075
2076        bios_to_read = bio_list_size(&bio_list);
2077        if (!bios_to_read) {
2078                /*
2079                 * we might have no bios to read just because the pages
2080                 * were up to date, or we might have no bios to read because
2081                 * the devices were gone.
2082                 */
2083                if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2084                        __raid_recover_end_io(rbio);
2085                        goto out;
2086                } else {
2087                        goto cleanup;
2088                }
2089        }
2090
2091        /*
2092         * the bbio may be freed once we submit the last bio.  Make sure
2093         * not to touch it after that
2094         */
2095        atomic_set(&rbio->stripes_pending, bios_to_read);
2096        while (1) {
2097                bio = bio_list_pop(&bio_list);
2098                if (!bio)
2099                        break;
2100
2101                bio->bi_private = rbio;
2102                bio->bi_end_io = raid_recover_end_io;
2103                bio_set_op_attrs(bio, REQ_OP_READ, 0);
2104
2105                btrfs_bio_wq_end_io(rbio->fs_info, bio,
2106                                    BTRFS_WQ_ENDIO_RAID56);
2107
2108                submit_bio(bio);
2109        }
2110out:
2111        return 0;
2112
2113cleanup:
2114        if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2115            rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2116                rbio_orig_end_io(rbio, -EIO);
2117        return -EIO;
2118}
2119
2120/*
2121 * the main entry point for reads from the higher layers.  This
2122 * is really only called when the normal read path had a failure,
2123 * so we assume the bio they send down corresponds to a failed part
2124 * of the drive.
2125 */
2126int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2127                          struct btrfs_bio *bbio, u64 stripe_len,
2128                          int mirror_num, int generic_io)
2129{
2130        struct btrfs_raid_bio *rbio;
2131        int ret;
2132
2133        rbio = alloc_rbio(root, bbio, stripe_len);
2134        if (IS_ERR(rbio)) {
2135                if (generic_io)
2136                        btrfs_put_bbio(bbio);
2137                return PTR_ERR(rbio);
2138        }
2139
2140        rbio->operation = BTRFS_RBIO_READ_REBUILD;
2141        bio_list_add(&rbio->bio_list, bio);
2142        rbio->bio_list_bytes = bio->bi_iter.bi_size;
2143
2144        rbio->faila = find_logical_bio_stripe(rbio, bio);
2145        if (rbio->faila == -1) {
2146                btrfs_warn(root->fs_info,
2147        "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2148                           __func__, (u64)bio->bi_iter.bi_sector << 9,
2149                           (u64)bio->bi_iter.bi_size, bbio->map_type);
2150                if (generic_io)
2151                        btrfs_put_bbio(bbio);
2152                kfree(rbio);
2153                return -EIO;
2154        }
2155
2156        if (generic_io) {
2157                btrfs_bio_counter_inc_noblocked(root->fs_info);
2158                rbio->generic_bio_cnt = 1;
2159        } else {
2160                btrfs_get_bbio(bbio);
2161        }
2162
2163        /*
2164         * reconstruct from the q stripe if they are
2165         * asking for mirror 3
2166         */
2167        if (mirror_num == 3)
2168                rbio->failb = rbio->real_stripes - 2;
2169
2170        ret = lock_stripe_add(rbio);
2171
2172        /*
2173         * __raid56_parity_recover will end the bio with
2174         * any errors it hits.  We don't want to return
2175         * its error value up the stack because our caller
2176         * will end up calling bio_endio with any nonzero
2177         * return
2178         */
2179        if (ret == 0)
2180                __raid56_parity_recover(rbio);
2181        /*
2182         * our rbio has been added to the list of
2183         * rbios that will be handled after the
2184         * currently lock owner is done
2185         */
2186        return 0;
2187
2188}
2189
2190static void rmw_work(struct btrfs_work *work)
2191{
2192        struct btrfs_raid_bio *rbio;
2193
2194        rbio = container_of(work, struct btrfs_raid_bio, work);
2195        raid56_rmw_stripe(rbio);
2196}
2197
2198static void read_rebuild_work(struct btrfs_work *work)
2199{
2200        struct btrfs_raid_bio *rbio;
2201
2202        rbio = container_of(work, struct btrfs_raid_bio, work);
2203        __raid56_parity_recover(rbio);
2204}
2205
2206/*
2207 * The following code is used to scrub/replace the parity stripe
2208 *
2209 * Note: We need make sure all the pages that add into the scrub/replace
2210 * raid bio are correct and not be changed during the scrub/replace. That
2211 * is those pages just hold metadata or file data with checksum.
2212 */
2213
2214struct btrfs_raid_bio *
2215raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
2216                               struct btrfs_bio *bbio, u64 stripe_len,
2217                               struct btrfs_device *scrub_dev,
2218                               unsigned long *dbitmap, int stripe_nsectors)
2219{
2220        struct btrfs_raid_bio *rbio;
2221        int i;
2222
2223        rbio = alloc_rbio(root, bbio, stripe_len);
2224        if (IS_ERR(rbio))
2225                return NULL;
2226        bio_list_add(&rbio->bio_list, bio);
2227        /*
2228         * This is a special bio which is used to hold the completion handler
2229         * and make the scrub rbio is similar to the other types
2230         */
2231        ASSERT(!bio->bi_iter.bi_size);
2232        rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2233
2234        for (i = 0; i < rbio->real_stripes; i++) {
2235                if (bbio->stripes[i].dev == scrub_dev) {
2236                        rbio->scrubp = i;
2237                        break;
2238                }
2239        }
2240
2241        /* Now we just support the sectorsize equals to page size */
2242        ASSERT(root->sectorsize == PAGE_SIZE);
2243        ASSERT(rbio->stripe_npages == stripe_nsectors);
2244        bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2245
2246        return rbio;
2247}
2248
2249/* Used for both parity scrub and missing. */
2250void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2251                            u64 logical)
2252{
2253        int stripe_offset;
2254        int index;
2255
2256        ASSERT(logical >= rbio->bbio->raid_map[0]);
2257        ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2258                                rbio->stripe_len * rbio->nr_data);
2259        stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2260        index = stripe_offset >> PAGE_SHIFT;
2261        rbio->bio_pages[index] = page;
2262}
2263
2264/*
2265 * We just scrub the parity that we have correct data on the same horizontal,
2266 * so we needn't allocate all pages for all the stripes.
2267 */
2268static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2269{
2270        int i;
2271        int bit;
2272        int index;
2273        struct page *page;
2274
2275        for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2276                for (i = 0; i < rbio->real_stripes; i++) {
2277                        index = i * rbio->stripe_npages + bit;
2278                        if (rbio->stripe_pages[index])
2279                                continue;
2280
2281                        page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2282                        if (!page)
2283                                return -ENOMEM;
2284                        rbio->stripe_pages[index] = page;
2285                }
2286        }
2287        return 0;
2288}
2289
2290static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2291                                         int need_check)
2292{
2293        struct btrfs_bio *bbio = rbio->bbio;
2294        void *pointers[rbio->real_stripes];
2295        DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2296        int nr_data = rbio->nr_data;
2297        int stripe;
2298        int pagenr;
2299        int p_stripe = -1;
2300        int q_stripe = -1;
2301        struct page *p_page = NULL;
2302        struct page *q_page = NULL;
2303        struct bio_list bio_list;
2304        struct bio *bio;
2305        int is_replace = 0;
2306        int ret;
2307
2308        bio_list_init(&bio_list);
2309
2310        if (rbio->real_stripes - rbio->nr_data == 1) {
2311                p_stripe = rbio->real_stripes - 1;
2312        } else if (rbio->real_stripes - rbio->nr_data == 2) {
2313                p_stripe = rbio->real_stripes - 2;
2314                q_stripe = rbio->real_stripes - 1;
2315        } else {
2316                BUG();
2317        }
2318
2319        if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2320                is_replace = 1;
2321                bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2322        }
2323
2324        /*
2325         * Because the higher layers(scrubber) are unlikely to
2326         * use this area of the disk again soon, so don't cache
2327         * it.
2328         */
2329        clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2330
2331        if (!need_check)
2332                goto writeback;
2333
2334        p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2335        if (!p_page)
2336                goto cleanup;
2337        SetPageUptodate(p_page);
2338
2339        if (q_stripe != -1) {
2340                q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2341                if (!q_page) {
2342                        __free_page(p_page);
2343                        goto cleanup;
2344                }
2345                SetPageUptodate(q_page);
2346        }
2347
2348        atomic_set(&rbio->error, 0);
2349
2350        for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2351                struct page *p;
2352                void *parity;
2353                /* first collect one page from each data stripe */
2354                for (stripe = 0; stripe < nr_data; stripe++) {
2355                        p = page_in_rbio(rbio, stripe, pagenr, 0);
2356                        pointers[stripe] = kmap(p);
2357                }
2358
2359                /* then add the parity stripe */
2360                pointers[stripe++] = kmap(p_page);
2361
2362                if (q_stripe != -1) {
2363
2364                        /*
2365                         * raid6, add the qstripe and call the
2366                         * library function to fill in our p/q
2367                         */
2368                        pointers[stripe++] = kmap(q_page);
2369
2370                        raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2371                                                pointers);
2372                } else {
2373                        /* raid5 */
2374                        memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2375                        run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2376                }
2377
2378                /* Check scrubbing parity and repair it */
2379                p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2380                parity = kmap(p);
2381                if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2382                        memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2383                else
2384                        /* Parity is right, needn't writeback */
2385                        bitmap_clear(rbio->dbitmap, pagenr, 1);
2386                kunmap(p);
2387
2388                for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2389                        kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2390        }
2391
2392        __free_page(p_page);
2393        if (q_page)
2394                __free_page(q_page);
2395
2396writeback:
2397        /*
2398         * time to start writing.  Make bios for everything from the
2399         * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2400         * everything else.
2401         */
2402        for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2403                struct page *page;
2404
2405                page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2406                ret = rbio_add_io_page(rbio, &bio_list,
2407                               page, rbio->scrubp, pagenr, rbio->stripe_len);
2408                if (ret)
2409                        goto cleanup;
2410        }
2411
2412        if (!is_replace)
2413                goto submit_write;
2414
2415        for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2416                struct page *page;
2417
2418                page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2419                ret = rbio_add_io_page(rbio, &bio_list, page,
2420                                       bbio->tgtdev_map[rbio->scrubp],
2421                                       pagenr, rbio->stripe_len);
2422                if (ret)
2423                        goto cleanup;
2424        }
2425
2426submit_write:
2427        nr_data = bio_list_size(&bio_list);
2428        if (!nr_data) {
2429                /* Every parity is right */
2430                rbio_orig_end_io(rbio, 0);
2431                return;
2432        }
2433
2434        atomic_set(&rbio->stripes_pending, nr_data);
2435
2436        while (1) {
2437                bio = bio_list_pop(&bio_list);
2438                if (!bio)
2439                        break;
2440
2441                bio->bi_private = rbio;
2442                bio->bi_end_io = raid_write_end_io;
2443                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2444
2445                submit_bio(bio);
2446        }
2447        return;
2448
2449cleanup:
2450        rbio_orig_end_io(rbio, -EIO);
2451}
2452
2453static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2454{
2455        if (stripe >= 0 && stripe < rbio->nr_data)
2456                return 1;
2457        return 0;
2458}
2459
2460/*
2461 * While we're doing the parity check and repair, we could have errors
2462 * in reading pages off the disk.  This checks for errors and if we're
2463 * not able to read the page it'll trigger parity reconstruction.  The
2464 * parity scrub will be finished after we've reconstructed the failed
2465 * stripes
2466 */
2467static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2468{
2469        if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2470                goto cleanup;
2471
2472        if (rbio->faila >= 0 || rbio->failb >= 0) {
2473                int dfail = 0, failp = -1;
2474
2475                if (is_data_stripe(rbio, rbio->faila))
2476                        dfail++;
2477                else if (is_parity_stripe(rbio->faila))
2478                        failp = rbio->faila;
2479
2480                if (is_data_stripe(rbio, rbio->failb))
2481                        dfail++;
2482                else if (is_parity_stripe(rbio->failb))
2483                        failp = rbio->failb;
2484
2485                /*
2486                 * Because we can not use a scrubbing parity to repair
2487                 * the data, so the capability of the repair is declined.
2488                 * (In the case of RAID5, we can not repair anything)
2489                 */
2490                if (dfail > rbio->bbio->max_errors - 1)
2491                        goto cleanup;
2492
2493                /*
2494                 * If all data is good, only parity is correctly, just
2495                 * repair the parity.
2496                 */
2497                if (dfail == 0) {
2498                        finish_parity_scrub(rbio, 0);
2499                        return;
2500                }
2501
2502                /*
2503                 * Here means we got one corrupted data stripe and one
2504                 * corrupted parity on RAID6, if the corrupted parity
2505                 * is scrubbing parity, luckily, use the other one to repair
2506                 * the data, or we can not repair the data stripe.
2507                 */
2508                if (failp != rbio->scrubp)
2509                        goto cleanup;
2510
2511                __raid_recover_end_io(rbio);
2512        } else {
2513                finish_parity_scrub(rbio, 1);
2514        }
2515        return;
2516
2517cleanup:
2518        rbio_orig_end_io(rbio, -EIO);
2519}
2520
2521/*
2522 * end io for the read phase of the rmw cycle.  All the bios here are physical
2523 * stripe bios we've read from the disk so we can recalculate the parity of the
2524 * stripe.
2525 *
2526 * This will usually kick off finish_rmw once all the bios are read in, but it
2527 * may trigger parity reconstruction if we had any errors along the way
2528 */
2529static void raid56_parity_scrub_end_io(struct bio *bio)
2530{
2531        struct btrfs_raid_bio *rbio = bio->bi_private;
2532
2533        if (bio->bi_error)
2534                fail_bio_stripe(rbio, bio);
2535        else
2536                set_bio_pages_uptodate(bio);
2537
2538        bio_put(bio);
2539
2540        if (!atomic_dec_and_test(&rbio->stripes_pending))
2541                return;
2542
2543        /*
2544         * this will normally call finish_rmw to start our write
2545         * but if there are any failed stripes we'll reconstruct
2546         * from parity first
2547         */
2548        validate_rbio_for_parity_scrub(rbio);
2549}
2550
2551static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2552{
2553        int bios_to_read = 0;
2554        struct bio_list bio_list;
2555        int ret;
2556        int pagenr;
2557        int stripe;
2558        struct bio *bio;
2559
2560        ret = alloc_rbio_essential_pages(rbio);
2561        if (ret)
2562                goto cleanup;
2563
2564        bio_list_init(&bio_list);
2565
2566        atomic_set(&rbio->error, 0);
2567        /*
2568         * build a list of bios to read all the missing parts of this
2569         * stripe
2570         */
2571        for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2572                for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2573                        struct page *page;
2574                        /*
2575                         * we want to find all the pages missing from
2576                         * the rbio and read them from the disk.  If
2577                         * page_in_rbio finds a page in the bio list
2578                         * we don't need to read it off the stripe.
2579                         */
2580                        page = page_in_rbio(rbio, stripe, pagenr, 1);
2581                        if (page)
2582                                continue;
2583
2584                        page = rbio_stripe_page(rbio, stripe, pagenr);
2585                        /*
2586                         * the bio cache may have handed us an uptodate
2587                         * page.  If so, be happy and use it
2588                         */
2589                        if (PageUptodate(page))
2590                                continue;
2591
2592                        ret = rbio_add_io_page(rbio, &bio_list, page,
2593                                       stripe, pagenr, rbio->stripe_len);
2594                        if (ret)
2595                                goto cleanup;
2596                }
2597        }
2598
2599        bios_to_read = bio_list_size(&bio_list);
2600        if (!bios_to_read) {
2601                /*
2602                 * this can happen if others have merged with
2603                 * us, it means there is nothing left to read.
2604                 * But if there are missing devices it may not be
2605                 * safe to do the full stripe write yet.
2606                 */
2607                goto finish;
2608        }
2609
2610        /*
2611         * the bbio may be freed once we submit the last bio.  Make sure
2612         * not to touch it after that
2613         */
2614        atomic_set(&rbio->stripes_pending, bios_to_read);
2615        while (1) {
2616                bio = bio_list_pop(&bio_list);
2617                if (!bio)
2618                        break;
2619
2620                bio->bi_private = rbio;
2621                bio->bi_end_io = raid56_parity_scrub_end_io;
2622                bio_set_op_attrs(bio, REQ_OP_READ, 0);
2623
2624                btrfs_bio_wq_end_io(rbio->fs_info, bio,
2625                                    BTRFS_WQ_ENDIO_RAID56);
2626
2627                submit_bio(bio);
2628        }
2629        /* the actual write will happen once the reads are done */
2630        return;
2631
2632cleanup:
2633        rbio_orig_end_io(rbio, -EIO);
2634        return;
2635
2636finish:
2637        validate_rbio_for_parity_scrub(rbio);
2638}
2639
2640static void scrub_parity_work(struct btrfs_work *work)
2641{
2642        struct btrfs_raid_bio *rbio;
2643
2644        rbio = container_of(work, struct btrfs_raid_bio, work);
2645        raid56_parity_scrub_stripe(rbio);
2646}
2647
2648static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2649{
2650        btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2651                        scrub_parity_work, NULL, NULL);
2652
2653        btrfs_queue_work(rbio->fs_info->rmw_workers,
2654                         &rbio->work);
2655}
2656
2657void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2658{
2659        if (!lock_stripe_add(rbio))
2660                async_scrub_parity(rbio);
2661}
2662
2663/* The following code is used for dev replace of a missing RAID 5/6 device. */
2664
2665struct btrfs_raid_bio *
2666raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
2667                          struct btrfs_bio *bbio, u64 length)
2668{
2669        struct btrfs_raid_bio *rbio;
2670
2671        rbio = alloc_rbio(root, bbio, length);
2672        if (IS_ERR(rbio))
2673                return NULL;
2674
2675        rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2676        bio_list_add(&rbio->bio_list, bio);
2677        /*
2678         * This is a special bio which is used to hold the completion handler
2679         * and make the scrub rbio is similar to the other types
2680         */
2681        ASSERT(!bio->bi_iter.bi_size);
2682
2683        rbio->faila = find_logical_bio_stripe(rbio, bio);
2684        if (rbio->faila == -1) {
2685                BUG();
2686                kfree(rbio);
2687                return NULL;
2688        }
2689
2690        return rbio;
2691}
2692
2693static void missing_raid56_work(struct btrfs_work *work)
2694{
2695        struct btrfs_raid_bio *rbio;
2696
2697        rbio = container_of(work, struct btrfs_raid_bio, work);
2698        __raid56_parity_recover(rbio);
2699}
2700
2701static void async_missing_raid56(struct btrfs_raid_bio *rbio)
2702{
2703        btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2704                        missing_raid56_work, NULL, NULL);
2705
2706        btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2707}
2708
2709void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2710{
2711        if (!lock_stripe_add(rbio))
2712                async_missing_raid56(rbio);
2713}
2714