linux/fs/btrfs/raid56.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 Fusion-io  All rights reserved.
   3 * Copyright (C) 2012 Intel Corp. All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public
   7 * License v2 as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  12 * General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public
  15 * License along with this program; if not, write to the
  16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17 * Boston, MA 021110-1307, USA.
  18 */
  19#include <linux/sched.h>
  20#include <linux/wait.h>
  21#include <linux/bio.h>
  22#include <linux/slab.h>
  23#include <linux/buffer_head.h>
  24#include <linux/blkdev.h>
  25#include <linux/random.h>
  26#include <linux/iocontext.h>
  27#include <linux/capability.h>
  28#include <linux/ratelimit.h>
  29#include <linux/kthread.h>
  30#include <linux/raid/pq.h>
  31#include <linux/hash.h>
  32#include <linux/list_sort.h>
  33#include <linux/raid/xor.h>
  34#include <linux/vmalloc.h>
  35#include <asm/div64.h>
  36#include "ctree.h"
  37#include "extent_map.h"
  38#include "disk-io.h"
  39#include "transaction.h"
  40#include "print-tree.h"
  41#include "volumes.h"
  42#include "raid56.h"
  43#include "async-thread.h"
  44#include "check-integrity.h"
  45#include "rcu-string.h"
  46
  47/* set when additional merges to this rbio are not allowed */
  48#define RBIO_RMW_LOCKED_BIT     1
  49
  50/*
  51 * set when this rbio is sitting in the hash, but it is just a cache
  52 * of past RMW
  53 */
  54#define RBIO_CACHE_BIT          2
  55
  56/*
  57 * set when it is safe to trust the stripe_pages for caching
  58 */
  59#define RBIO_CACHE_READY_BIT    3
  60
  61#define RBIO_CACHE_SIZE 1024
  62
  63enum btrfs_rbio_ops {
  64        BTRFS_RBIO_WRITE        = 0,
  65        BTRFS_RBIO_READ_REBUILD = 1,
  66        BTRFS_RBIO_PARITY_SCRUB = 2,
  67};
  68
  69struct btrfs_raid_bio {
  70        struct btrfs_fs_info *fs_info;
  71        struct btrfs_bio *bbio;
  72
  73        /* while we're doing rmw on a stripe
  74         * we put it into a hash table so we can
  75         * lock the stripe and merge more rbios
  76         * into it.
  77         */
  78        struct list_head hash_list;
  79
  80        /*
  81         * LRU list for the stripe cache
  82         */
  83        struct list_head stripe_cache;
  84
  85        /*
  86         * for scheduling work in the helper threads
  87         */
  88        struct btrfs_work work;
  89
  90        /*
  91         * bio list and bio_list_lock are used
  92         * to add more bios into the stripe
  93         * in hopes of avoiding the full rmw
  94         */
  95        struct bio_list bio_list;
  96        spinlock_t bio_list_lock;
  97
  98        /* also protected by the bio_list_lock, the
  99         * plug list is used by the plugging code
 100         * to collect partial bios while plugged.  The
 101         * stripe locking code also uses it to hand off
 102         * the stripe lock to the next pending IO
 103         */
 104        struct list_head plug_list;
 105
 106        /*
 107         * flags that tell us if it is safe to
 108         * merge with this bio
 109         */
 110        unsigned long flags;
 111
 112        /* size of each individual stripe on disk */
 113        int stripe_len;
 114
 115        /* number of data stripes (no p/q) */
 116        int nr_data;
 117
 118        int real_stripes;
 119
 120        int stripe_npages;
 121        /*
 122         * set if we're doing a parity rebuild
 123         * for a read from higher up, which is handled
 124         * differently from a parity rebuild as part of
 125         * rmw
 126         */
 127        enum btrfs_rbio_ops operation;
 128
 129        /* first bad stripe */
 130        int faila;
 131
 132        /* second bad stripe (for raid6 use) */
 133        int failb;
 134
 135        int scrubp;
 136        /*
 137         * number of pages needed to represent the full
 138         * stripe
 139         */
 140        int nr_pages;
 141
 142        /*
 143         * size of all the bios in the bio_list.  This
 144         * helps us decide if the rbio maps to a full
 145         * stripe or not
 146         */
 147        int bio_list_bytes;
 148
 149        int generic_bio_cnt;
 150
 151        atomic_t refs;
 152
 153        atomic_t stripes_pending;
 154
 155        atomic_t error;
 156        /*
 157         * these are two arrays of pointers.  We allocate the
 158         * rbio big enough to hold them both and setup their
 159         * locations when the rbio is allocated
 160         */
 161
 162        /* pointers to pages that we allocated for
 163         * reading/writing stripes directly from the disk (including P/Q)
 164         */
 165        struct page **stripe_pages;
 166
 167        /*
 168         * pointers to the pages in the bio_list.  Stored
 169         * here for faster lookup
 170         */
 171        struct page **bio_pages;
 172
 173        /*
 174         * bitmap to record which horizontal stripe has data
 175         */
 176        unsigned long *dbitmap;
 177};
 178
 179static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
 180static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
 181static void rmw_work(struct btrfs_work *work);
 182static void read_rebuild_work(struct btrfs_work *work);
 183static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
 184static void async_read_rebuild(struct btrfs_raid_bio *rbio);
 185static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
 186static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
 187static void __free_raid_bio(struct btrfs_raid_bio *rbio);
 188static void index_rbio_pages(struct btrfs_raid_bio *rbio);
 189static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
 190
 191static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
 192                                         int need_check);
 193static void async_scrub_parity(struct btrfs_raid_bio *rbio);
 194
 195/*
 196 * the stripe hash table is used for locking, and to collect
 197 * bios in hopes of making a full stripe
 198 */
 199int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
 200{
 201        struct btrfs_stripe_hash_table *table;
 202        struct btrfs_stripe_hash_table *x;
 203        struct btrfs_stripe_hash *cur;
 204        struct btrfs_stripe_hash *h;
 205        int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
 206        int i;
 207        int table_size;
 208
 209        if (info->stripe_hash_table)
 210                return 0;
 211
 212        /*
 213         * The table is large, starting with order 4 and can go as high as
 214         * order 7 in case lock debugging is turned on.
 215         *
 216         * Try harder to allocate and fallback to vmalloc to lower the chance
 217         * of a failing mount.
 218         */
 219        table_size = sizeof(*table) + sizeof(*h) * num_entries;
 220        table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
 221        if (!table) {
 222                table = vzalloc(table_size);
 223                if (!table)
 224                        return -ENOMEM;
 225        }
 226
 227        spin_lock_init(&table->cache_lock);
 228        INIT_LIST_HEAD(&table->stripe_cache);
 229
 230        h = table->table;
 231
 232        for (i = 0; i < num_entries; i++) {
 233                cur = h + i;
 234                INIT_LIST_HEAD(&cur->hash_list);
 235                spin_lock_init(&cur->lock);
 236                init_waitqueue_head(&cur->wait);
 237        }
 238
 239        x = cmpxchg(&info->stripe_hash_table, NULL, table);
 240        if (x) {
 241                if (is_vmalloc_addr(x))
 242                        vfree(x);
 243                else
 244                        kfree(x);
 245        }
 246        return 0;
 247}
 248
 249/*
 250 * caching an rbio means to copy anything from the
 251 * bio_pages array into the stripe_pages array.  We
 252 * use the page uptodate bit in the stripe cache array
 253 * to indicate if it has valid data
 254 *
 255 * once the caching is done, we set the cache ready
 256 * bit.
 257 */
 258static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
 259{
 260        int i;
 261        char *s;
 262        char *d;
 263        int ret;
 264
 265        ret = alloc_rbio_pages(rbio);
 266        if (ret)
 267                return;
 268
 269        for (i = 0; i < rbio->nr_pages; i++) {
 270                if (!rbio->bio_pages[i])
 271                        continue;
 272
 273                s = kmap(rbio->bio_pages[i]);
 274                d = kmap(rbio->stripe_pages[i]);
 275
 276                memcpy(d, s, PAGE_CACHE_SIZE);
 277
 278                kunmap(rbio->bio_pages[i]);
 279                kunmap(rbio->stripe_pages[i]);
 280                SetPageUptodate(rbio->stripe_pages[i]);
 281        }
 282        set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
 283}
 284
 285/*
 286 * we hash on the first logical address of the stripe
 287 */
 288static int rbio_bucket(struct btrfs_raid_bio *rbio)
 289{
 290        u64 num = rbio->bbio->raid_map[0];
 291
 292        /*
 293         * we shift down quite a bit.  We're using byte
 294         * addressing, and most of the lower bits are zeros.
 295         * This tends to upset hash_64, and it consistently
 296         * returns just one or two different values.
 297         *
 298         * shifting off the lower bits fixes things.
 299         */
 300        return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
 301}
 302
 303/*
 304 * stealing an rbio means taking all the uptodate pages from the stripe
 305 * array in the source rbio and putting them into the destination rbio
 306 */
 307static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
 308{
 309        int i;
 310        struct page *s;
 311        struct page *d;
 312
 313        if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
 314                return;
 315
 316        for (i = 0; i < dest->nr_pages; i++) {
 317                s = src->stripe_pages[i];
 318                if (!s || !PageUptodate(s)) {
 319                        continue;
 320                }
 321
 322                d = dest->stripe_pages[i];
 323                if (d)
 324                        __free_page(d);
 325
 326                dest->stripe_pages[i] = s;
 327                src->stripe_pages[i] = NULL;
 328        }
 329}
 330
 331/*
 332 * merging means we take the bio_list from the victim and
 333 * splice it into the destination.  The victim should
 334 * be discarded afterwards.
 335 *
 336 * must be called with dest->rbio_list_lock held
 337 */
 338static void merge_rbio(struct btrfs_raid_bio *dest,
 339                       struct btrfs_raid_bio *victim)
 340{
 341        bio_list_merge(&dest->bio_list, &victim->bio_list);
 342        dest->bio_list_bytes += victim->bio_list_bytes;
 343        dest->generic_bio_cnt += victim->generic_bio_cnt;
 344        bio_list_init(&victim->bio_list);
 345}
 346
 347/*
 348 * used to prune items that are in the cache.  The caller
 349 * must hold the hash table lock.
 350 */
 351static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 352{
 353        int bucket = rbio_bucket(rbio);
 354        struct btrfs_stripe_hash_table *table;
 355        struct btrfs_stripe_hash *h;
 356        int freeit = 0;
 357
 358        /*
 359         * check the bit again under the hash table lock.
 360         */
 361        if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 362                return;
 363
 364        table = rbio->fs_info->stripe_hash_table;
 365        h = table->table + bucket;
 366
 367        /* hold the lock for the bucket because we may be
 368         * removing it from the hash table
 369         */
 370        spin_lock(&h->lock);
 371
 372        /*
 373         * hold the lock for the bio list because we need
 374         * to make sure the bio list is empty
 375         */
 376        spin_lock(&rbio->bio_list_lock);
 377
 378        if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 379                list_del_init(&rbio->stripe_cache);
 380                table->cache_size -= 1;
 381                freeit = 1;
 382
 383                /* if the bio list isn't empty, this rbio is
 384                 * still involved in an IO.  We take it out
 385                 * of the cache list, and drop the ref that
 386                 * was held for the list.
 387                 *
 388                 * If the bio_list was empty, we also remove
 389                 * the rbio from the hash_table, and drop
 390                 * the corresponding ref
 391                 */
 392                if (bio_list_empty(&rbio->bio_list)) {
 393                        if (!list_empty(&rbio->hash_list)) {
 394                                list_del_init(&rbio->hash_list);
 395                                atomic_dec(&rbio->refs);
 396                                BUG_ON(!list_empty(&rbio->plug_list));
 397                        }
 398                }
 399        }
 400
 401        spin_unlock(&rbio->bio_list_lock);
 402        spin_unlock(&h->lock);
 403
 404        if (freeit)
 405                __free_raid_bio(rbio);
 406}
 407
 408/*
 409 * prune a given rbio from the cache
 410 */
 411static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 412{
 413        struct btrfs_stripe_hash_table *table;
 414        unsigned long flags;
 415
 416        if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 417                return;
 418
 419        table = rbio->fs_info->stripe_hash_table;
 420
 421        spin_lock_irqsave(&table->cache_lock, flags);
 422        __remove_rbio_from_cache(rbio);
 423        spin_unlock_irqrestore(&table->cache_lock, flags);
 424}
 425
 426/*
 427 * remove everything in the cache
 428 */
 429static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
 430{
 431        struct btrfs_stripe_hash_table *table;
 432        unsigned long flags;
 433        struct btrfs_raid_bio *rbio;
 434
 435        table = info->stripe_hash_table;
 436
 437        spin_lock_irqsave(&table->cache_lock, flags);
 438        while (!list_empty(&table->stripe_cache)) {
 439                rbio = list_entry(table->stripe_cache.next,
 440                                  struct btrfs_raid_bio,
 441                                  stripe_cache);
 442                __remove_rbio_from_cache(rbio);
 443        }
 444        spin_unlock_irqrestore(&table->cache_lock, flags);
 445}
 446
 447/*
 448 * remove all cached entries and free the hash table
 449 * used by unmount
 450 */
 451void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
 452{
 453        if (!info->stripe_hash_table)
 454                return;
 455        btrfs_clear_rbio_cache(info);
 456        if (is_vmalloc_addr(info->stripe_hash_table))
 457                vfree(info->stripe_hash_table);
 458        else
 459                kfree(info->stripe_hash_table);
 460        info->stripe_hash_table = NULL;
 461}
 462
 463/*
 464 * insert an rbio into the stripe cache.  It
 465 * must have already been prepared by calling
 466 * cache_rbio_pages
 467 *
 468 * If this rbio was already cached, it gets
 469 * moved to the front of the lru.
 470 *
 471 * If the size of the rbio cache is too big, we
 472 * prune an item.
 473 */
 474static void cache_rbio(struct btrfs_raid_bio *rbio)
 475{
 476        struct btrfs_stripe_hash_table *table;
 477        unsigned long flags;
 478
 479        if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
 480                return;
 481
 482        table = rbio->fs_info->stripe_hash_table;
 483
 484        spin_lock_irqsave(&table->cache_lock, flags);
 485        spin_lock(&rbio->bio_list_lock);
 486
 487        /* bump our ref if we were not in the list before */
 488        if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
 489                atomic_inc(&rbio->refs);
 490
 491        if (!list_empty(&rbio->stripe_cache)){
 492                list_move(&rbio->stripe_cache, &table->stripe_cache);
 493        } else {
 494                list_add(&rbio->stripe_cache, &table->stripe_cache);
 495                table->cache_size += 1;
 496        }
 497
 498        spin_unlock(&rbio->bio_list_lock);
 499
 500        if (table->cache_size > RBIO_CACHE_SIZE) {
 501                struct btrfs_raid_bio *found;
 502
 503                found = list_entry(table->stripe_cache.prev,
 504                                  struct btrfs_raid_bio,
 505                                  stripe_cache);
 506
 507                if (found != rbio)
 508                        __remove_rbio_from_cache(found);
 509        }
 510
 511        spin_unlock_irqrestore(&table->cache_lock, flags);
 512        return;
 513}
 514
 515/*
 516 * helper function to run the xor_blocks api.  It is only
 517 * able to do MAX_XOR_BLOCKS at a time, so we need to
 518 * loop through.
 519 */
 520static void run_xor(void **pages, int src_cnt, ssize_t len)
 521{
 522        int src_off = 0;
 523        int xor_src_cnt = 0;
 524        void *dest = pages[src_cnt];
 525
 526        while(src_cnt > 0) {
 527                xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
 528                xor_blocks(xor_src_cnt, len, dest, pages + src_off);
 529
 530                src_cnt -= xor_src_cnt;
 531                src_off += xor_src_cnt;
 532        }
 533}
 534
 535/*
 536 * returns true if the bio list inside this rbio
 537 * covers an entire stripe (no rmw required).
 538 * Must be called with the bio list lock held, or
 539 * at a time when you know it is impossible to add
 540 * new bios into the list
 541 */
 542static int __rbio_is_full(struct btrfs_raid_bio *rbio)
 543{
 544        unsigned long size = rbio->bio_list_bytes;
 545        int ret = 1;
 546
 547        if (size != rbio->nr_data * rbio->stripe_len)
 548                ret = 0;
 549
 550        BUG_ON(size > rbio->nr_data * rbio->stripe_len);
 551        return ret;
 552}
 553
 554static int rbio_is_full(struct btrfs_raid_bio *rbio)
 555{
 556        unsigned long flags;
 557        int ret;
 558
 559        spin_lock_irqsave(&rbio->bio_list_lock, flags);
 560        ret = __rbio_is_full(rbio);
 561        spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
 562        return ret;
 563}
 564
 565/*
 566 * returns 1 if it is safe to merge two rbios together.
 567 * The merging is safe if the two rbios correspond to
 568 * the same stripe and if they are both going in the same
 569 * direction (read vs write), and if neither one is
 570 * locked for final IO
 571 *
 572 * The caller is responsible for locking such that
 573 * rmw_locked is safe to test
 574 */
 575static int rbio_can_merge(struct btrfs_raid_bio *last,
 576                          struct btrfs_raid_bio *cur)
 577{
 578        if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
 579            test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
 580                return 0;
 581
 582        /*
 583         * we can't merge with cached rbios, since the
 584         * idea is that when we merge the destination
 585         * rbio is going to run our IO for us.  We can
 586         * steal from cached rbio's though, other functions
 587         * handle that.
 588         */
 589        if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
 590            test_bit(RBIO_CACHE_BIT, &cur->flags))
 591                return 0;
 592
 593        if (last->bbio->raid_map[0] !=
 594            cur->bbio->raid_map[0])
 595                return 0;
 596
 597        /* we can't merge with different operations */
 598        if (last->operation != cur->operation)
 599                return 0;
 600        /*
 601         * We've need read the full stripe from the drive.
 602         * check and repair the parity and write the new results.
 603         *
 604         * We're not allowed to add any new bios to the
 605         * bio list here, anyone else that wants to
 606         * change this stripe needs to do their own rmw.
 607         */
 608        if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
 609            cur->operation == BTRFS_RBIO_PARITY_SCRUB)
 610                return 0;
 611
 612        return 1;
 613}
 614
 615/*
 616 * helper to index into the pstripe
 617 */
 618static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
 619{
 620        index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
 621        return rbio->stripe_pages[index];
 622}
 623
 624/*
 625 * helper to index into the qstripe, returns null
 626 * if there is no qstripe
 627 */
 628static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
 629{
 630        if (rbio->nr_data + 1 == rbio->real_stripes)
 631                return NULL;
 632
 633        index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
 634                PAGE_CACHE_SHIFT;
 635        return rbio->stripe_pages[index];
 636}
 637
 638/*
 639 * The first stripe in the table for a logical address
 640 * has the lock.  rbios are added in one of three ways:
 641 *
 642 * 1) Nobody has the stripe locked yet.  The rbio is given
 643 * the lock and 0 is returned.  The caller must start the IO
 644 * themselves.
 645 *
 646 * 2) Someone has the stripe locked, but we're able to merge
 647 * with the lock owner.  The rbio is freed and the IO will
 648 * start automatically along with the existing rbio.  1 is returned.
 649 *
 650 * 3) Someone has the stripe locked, but we're not able to merge.
 651 * The rbio is added to the lock owner's plug list, or merged into
 652 * an rbio already on the plug list.  When the lock owner unlocks,
 653 * the next rbio on the list is run and the IO is started automatically.
 654 * 1 is returned
 655 *
 656 * If we return 0, the caller still owns the rbio and must continue with
 657 * IO submission.  If we return 1, the caller must assume the rbio has
 658 * already been freed.
 659 */
 660static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
 661{
 662        int bucket = rbio_bucket(rbio);
 663        struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
 664        struct btrfs_raid_bio *cur;
 665        struct btrfs_raid_bio *pending;
 666        unsigned long flags;
 667        DEFINE_WAIT(wait);
 668        struct btrfs_raid_bio *freeit = NULL;
 669        struct btrfs_raid_bio *cache_drop = NULL;
 670        int ret = 0;
 671        int walk = 0;
 672
 673        spin_lock_irqsave(&h->lock, flags);
 674        list_for_each_entry(cur, &h->hash_list, hash_list) {
 675                walk++;
 676                if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
 677                        spin_lock(&cur->bio_list_lock);
 678
 679                        /* can we steal this cached rbio's pages? */
 680                        if (bio_list_empty(&cur->bio_list) &&
 681                            list_empty(&cur->plug_list) &&
 682                            test_bit(RBIO_CACHE_BIT, &cur->flags) &&
 683                            !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
 684                                list_del_init(&cur->hash_list);
 685                                atomic_dec(&cur->refs);
 686
 687                                steal_rbio(cur, rbio);
 688                                cache_drop = cur;
 689                                spin_unlock(&cur->bio_list_lock);
 690
 691                                goto lockit;
 692                        }
 693
 694                        /* can we merge into the lock owner? */
 695                        if (rbio_can_merge(cur, rbio)) {
 696                                merge_rbio(cur, rbio);
 697                                spin_unlock(&cur->bio_list_lock);
 698                                freeit = rbio;
 699                                ret = 1;
 700                                goto out;
 701                        }
 702
 703
 704                        /*
 705                         * we couldn't merge with the running
 706                         * rbio, see if we can merge with the
 707                         * pending ones.  We don't have to
 708                         * check for rmw_locked because there
 709                         * is no way they are inside finish_rmw
 710                         * right now
 711                         */
 712                        list_for_each_entry(pending, &cur->plug_list,
 713                                            plug_list) {
 714                                if (rbio_can_merge(pending, rbio)) {
 715                                        merge_rbio(pending, rbio);
 716                                        spin_unlock(&cur->bio_list_lock);
 717                                        freeit = rbio;
 718                                        ret = 1;
 719                                        goto out;
 720                                }
 721                        }
 722
 723                        /* no merging, put us on the tail of the plug list,
 724                         * our rbio will be started with the currently
 725                         * running rbio unlocks
 726                         */
 727                        list_add_tail(&rbio->plug_list, &cur->plug_list);
 728                        spin_unlock(&cur->bio_list_lock);
 729                        ret = 1;
 730                        goto out;
 731                }
 732        }
 733lockit:
 734        atomic_inc(&rbio->refs);
 735        list_add(&rbio->hash_list, &h->hash_list);
 736out:
 737        spin_unlock_irqrestore(&h->lock, flags);
 738        if (cache_drop)
 739                remove_rbio_from_cache(cache_drop);
 740        if (freeit)
 741                __free_raid_bio(freeit);
 742        return ret;
 743}
 744
 745/*
 746 * called as rmw or parity rebuild is completed.  If the plug list has more
 747 * rbios waiting for this stripe, the next one on the list will be started
 748 */
 749static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
 750{
 751        int bucket;
 752        struct btrfs_stripe_hash *h;
 753        unsigned long flags;
 754        int keep_cache = 0;
 755
 756        bucket = rbio_bucket(rbio);
 757        h = rbio->fs_info->stripe_hash_table->table + bucket;
 758
 759        if (list_empty(&rbio->plug_list))
 760                cache_rbio(rbio);
 761
 762        spin_lock_irqsave(&h->lock, flags);
 763        spin_lock(&rbio->bio_list_lock);
 764
 765        if (!list_empty(&rbio->hash_list)) {
 766                /*
 767                 * if we're still cached and there is no other IO
 768                 * to perform, just leave this rbio here for others
 769                 * to steal from later
 770                 */
 771                if (list_empty(&rbio->plug_list) &&
 772                    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 773                        keep_cache = 1;
 774                        clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
 775                        BUG_ON(!bio_list_empty(&rbio->bio_list));
 776                        goto done;
 777                }
 778
 779                list_del_init(&rbio->hash_list);
 780                atomic_dec(&rbio->refs);
 781
 782                /*
 783                 * we use the plug list to hold all the rbios
 784                 * waiting for the chance to lock this stripe.
 785                 * hand the lock over to one of them.
 786                 */
 787                if (!list_empty(&rbio->plug_list)) {
 788                        struct btrfs_raid_bio *next;
 789                        struct list_head *head = rbio->plug_list.next;
 790
 791                        next = list_entry(head, struct btrfs_raid_bio,
 792                                          plug_list);
 793
 794                        list_del_init(&rbio->plug_list);
 795
 796                        list_add(&next->hash_list, &h->hash_list);
 797                        atomic_inc(&next->refs);
 798                        spin_unlock(&rbio->bio_list_lock);
 799                        spin_unlock_irqrestore(&h->lock, flags);
 800
 801                        if (next->operation == BTRFS_RBIO_READ_REBUILD)
 802                                async_read_rebuild(next);
 803                        else if (next->operation == BTRFS_RBIO_WRITE) {
 804                                steal_rbio(rbio, next);
 805                                async_rmw_stripe(next);
 806                        } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
 807                                steal_rbio(rbio, next);
 808                                async_scrub_parity(next);
 809                        }
 810
 811                        goto done_nolock;
 812                } else  if (waitqueue_active(&h->wait)) {
 813                        spin_unlock(&rbio->bio_list_lock);
 814                        spin_unlock_irqrestore(&h->lock, flags);
 815                        wake_up(&h->wait);
 816                        goto done_nolock;
 817                }
 818        }
 819done:
 820        spin_unlock(&rbio->bio_list_lock);
 821        spin_unlock_irqrestore(&h->lock, flags);
 822
 823done_nolock:
 824        if (!keep_cache)
 825                remove_rbio_from_cache(rbio);
 826}
 827
 828static void __free_raid_bio(struct btrfs_raid_bio *rbio)
 829{
 830        int i;
 831
 832        WARN_ON(atomic_read(&rbio->refs) < 0);
 833        if (!atomic_dec_and_test(&rbio->refs))
 834                return;
 835
 836        WARN_ON(!list_empty(&rbio->stripe_cache));
 837        WARN_ON(!list_empty(&rbio->hash_list));
 838        WARN_ON(!bio_list_empty(&rbio->bio_list));
 839
 840        for (i = 0; i < rbio->nr_pages; i++) {
 841                if (rbio->stripe_pages[i]) {
 842                        __free_page(rbio->stripe_pages[i]);
 843                        rbio->stripe_pages[i] = NULL;
 844                }
 845        }
 846
 847        btrfs_put_bbio(rbio->bbio);
 848        kfree(rbio);
 849}
 850
 851static void free_raid_bio(struct btrfs_raid_bio *rbio)
 852{
 853        unlock_stripe(rbio);
 854        __free_raid_bio(rbio);
 855}
 856
 857/*
 858 * this frees the rbio and runs through all the bios in the
 859 * bio_list and calls end_io on them
 860 */
 861static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
 862{
 863        struct bio *cur = bio_list_get(&rbio->bio_list);
 864        struct bio *next;
 865
 866        if (rbio->generic_bio_cnt)
 867                btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
 868
 869        free_raid_bio(rbio);
 870
 871        while (cur) {
 872                next = cur->bi_next;
 873                cur->bi_next = NULL;
 874                if (uptodate)
 875                        set_bit(BIO_UPTODATE, &cur->bi_flags);
 876                bio_endio(cur, err);
 877                cur = next;
 878        }
 879}
 880
 881/*
 882 * end io function used by finish_rmw.  When we finally
 883 * get here, we've written a full stripe
 884 */
 885static void raid_write_end_io(struct bio *bio, int err)
 886{
 887        struct btrfs_raid_bio *rbio = bio->bi_private;
 888
 889        if (err)
 890                fail_bio_stripe(rbio, bio);
 891
 892        bio_put(bio);
 893
 894        if (!atomic_dec_and_test(&rbio->stripes_pending))
 895                return;
 896
 897        err = 0;
 898
 899        /* OK, we have read all the stripes we need to. */
 900        if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
 901                err = -EIO;
 902
 903        rbio_orig_end_io(rbio, err, 0);
 904        return;
 905}
 906
 907/*
 908 * the read/modify/write code wants to use the original bio for
 909 * any pages it included, and then use the rbio for everything
 910 * else.  This function decides if a given index (stripe number)
 911 * and page number in that stripe fall inside the original bio
 912 * or the rbio.
 913 *
 914 * if you set bio_list_only, you'll get a NULL back for any ranges
 915 * that are outside the bio_list
 916 *
 917 * This doesn't take any refs on anything, you get a bare page pointer
 918 * and the caller must bump refs as required.
 919 *
 920 * You must call index_rbio_pages once before you can trust
 921 * the answers from this function.
 922 */
 923static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
 924                                 int index, int pagenr, int bio_list_only)
 925{
 926        int chunk_page;
 927        struct page *p = NULL;
 928
 929        chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
 930
 931        spin_lock_irq(&rbio->bio_list_lock);
 932        p = rbio->bio_pages[chunk_page];
 933        spin_unlock_irq(&rbio->bio_list_lock);
 934
 935        if (p || bio_list_only)
 936                return p;
 937
 938        return rbio->stripe_pages[chunk_page];
 939}
 940
 941/*
 942 * number of pages we need for the entire stripe across all the
 943 * drives
 944 */
 945static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
 946{
 947        unsigned long nr = stripe_len * nr_stripes;
 948        return DIV_ROUND_UP(nr, PAGE_CACHE_SIZE);
 949}
 950
 951/*
 952 * allocation and initial setup for the btrfs_raid_bio.  Not
 953 * this does not allocate any pages for rbio->pages.
 954 */
 955static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
 956                          struct btrfs_bio *bbio, u64 stripe_len)
 957{
 958        struct btrfs_raid_bio *rbio;
 959        int nr_data = 0;
 960        int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
 961        int num_pages = rbio_nr_pages(stripe_len, real_stripes);
 962        int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
 963        void *p;
 964
 965        rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
 966                       DIV_ROUND_UP(stripe_npages, BITS_PER_LONG / 8),
 967                        GFP_NOFS);
 968        if (!rbio)
 969                return ERR_PTR(-ENOMEM);
 970
 971        bio_list_init(&rbio->bio_list);
 972        INIT_LIST_HEAD(&rbio->plug_list);
 973        spin_lock_init(&rbio->bio_list_lock);
 974        INIT_LIST_HEAD(&rbio->stripe_cache);
 975        INIT_LIST_HEAD(&rbio->hash_list);
 976        rbio->bbio = bbio;
 977        rbio->fs_info = root->fs_info;
 978        rbio->stripe_len = stripe_len;
 979        rbio->nr_pages = num_pages;
 980        rbio->real_stripes = real_stripes;
 981        rbio->stripe_npages = stripe_npages;
 982        rbio->faila = -1;
 983        rbio->failb = -1;
 984        atomic_set(&rbio->refs, 1);
 985        atomic_set(&rbio->error, 0);
 986        atomic_set(&rbio->stripes_pending, 0);
 987
 988        /*
 989         * the stripe_pages and bio_pages array point to the extra
 990         * memory we allocated past the end of the rbio
 991         */
 992        p = rbio + 1;
 993        rbio->stripe_pages = p;
 994        rbio->bio_pages = p + sizeof(struct page *) * num_pages;
 995        rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
 996
 997        if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
 998                nr_data = real_stripes - 1;
 999        else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1000                nr_data = real_stripes - 2;
1001        else
1002                BUG();
1003
1004        rbio->nr_data = nr_data;
1005        return rbio;
1006}
1007
1008/* allocate pages for all the stripes in the bio, including parity */
1009static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1010{
1011        int i;
1012        struct page *page;
1013
1014        for (i = 0; i < rbio->nr_pages; i++) {
1015                if (rbio->stripe_pages[i])
1016                        continue;
1017                page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1018                if (!page)
1019                        return -ENOMEM;
1020                rbio->stripe_pages[i] = page;
1021                ClearPageUptodate(page);
1022        }
1023        return 0;
1024}
1025
1026/* allocate pages for just the p/q stripes */
1027static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1028{
1029        int i;
1030        struct page *page;
1031
1032        i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
1033
1034        for (; i < rbio->nr_pages; i++) {
1035                if (rbio->stripe_pages[i])
1036                        continue;
1037                page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1038                if (!page)
1039                        return -ENOMEM;
1040                rbio->stripe_pages[i] = page;
1041        }
1042        return 0;
1043}
1044
1045/*
1046 * add a single page from a specific stripe into our list of bios for IO
1047 * this will try to merge into existing bios if possible, and returns
1048 * zero if all went well.
1049 */
1050static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1051                            struct bio_list *bio_list,
1052                            struct page *page,
1053                            int stripe_nr,
1054                            unsigned long page_index,
1055                            unsigned long bio_max_len)
1056{
1057        struct bio *last = bio_list->tail;
1058        u64 last_end = 0;
1059        int ret;
1060        struct bio *bio;
1061        struct btrfs_bio_stripe *stripe;
1062        u64 disk_start;
1063
1064        stripe = &rbio->bbio->stripes[stripe_nr];
1065        disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
1066
1067        /* if the device is missing, just fail this stripe */
1068        if (!stripe->dev->bdev)
1069                return fail_rbio_index(rbio, stripe_nr);
1070
1071        /* see if we can add this page onto our existing bio */
1072        if (last) {
1073                last_end = (u64)last->bi_iter.bi_sector << 9;
1074                last_end += last->bi_iter.bi_size;
1075
1076                /*
1077                 * we can't merge these if they are from different
1078                 * devices or if they are not contiguous
1079                 */
1080                if (last_end == disk_start && stripe->dev->bdev &&
1081                    test_bit(BIO_UPTODATE, &last->bi_flags) &&
1082                    last->bi_bdev == stripe->dev->bdev) {
1083                        ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
1084                        if (ret == PAGE_CACHE_SIZE)
1085                                return 0;
1086                }
1087        }
1088
1089        /* put a new bio on the list */
1090        bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
1091        if (!bio)
1092                return -ENOMEM;
1093
1094        bio->bi_iter.bi_size = 0;
1095        bio->bi_bdev = stripe->dev->bdev;
1096        bio->bi_iter.bi_sector = disk_start >> 9;
1097        set_bit(BIO_UPTODATE, &bio->bi_flags);
1098
1099        bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
1100        bio_list_add(bio_list, bio);
1101        return 0;
1102}
1103
1104/*
1105 * while we're doing the read/modify/write cycle, we could
1106 * have errors in reading pages off the disk.  This checks
1107 * for errors and if we're not able to read the page it'll
1108 * trigger parity reconstruction.  The rmw will be finished
1109 * after we've reconstructed the failed stripes
1110 */
1111static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1112{
1113        if (rbio->faila >= 0 || rbio->failb >= 0) {
1114                BUG_ON(rbio->faila == rbio->real_stripes - 1);
1115                __raid56_parity_recover(rbio);
1116        } else {
1117                finish_rmw(rbio);
1118        }
1119}
1120
1121/*
1122 * these are just the pages from the rbio array, not from anything
1123 * the FS sent down to us
1124 */
1125static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page)
1126{
1127        int index;
1128        index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT);
1129        index += page;
1130        return rbio->stripe_pages[index];
1131}
1132
1133/*
1134 * helper function to walk our bio list and populate the bio_pages array with
1135 * the result.  This seems expensive, but it is faster than constantly
1136 * searching through the bio list as we setup the IO in finish_rmw or stripe
1137 * reconstruction.
1138 *
1139 * This must be called before you trust the answers from page_in_rbio
1140 */
1141static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1142{
1143        struct bio *bio;
1144        u64 start;
1145        unsigned long stripe_offset;
1146        unsigned long page_index;
1147        struct page *p;
1148        int i;
1149
1150        spin_lock_irq(&rbio->bio_list_lock);
1151        bio_list_for_each(bio, &rbio->bio_list) {
1152                start = (u64)bio->bi_iter.bi_sector << 9;
1153                stripe_offset = start - rbio->bbio->raid_map[0];
1154                page_index = stripe_offset >> PAGE_CACHE_SHIFT;
1155
1156                for (i = 0; i < bio->bi_vcnt; i++) {
1157                        p = bio->bi_io_vec[i].bv_page;
1158                        rbio->bio_pages[page_index + i] = p;
1159                }
1160        }
1161        spin_unlock_irq(&rbio->bio_list_lock);
1162}
1163
1164/*
1165 * this is called from one of two situations.  We either
1166 * have a full stripe from the higher layers, or we've read all
1167 * the missing bits off disk.
1168 *
1169 * This will calculate the parity and then send down any
1170 * changed blocks.
1171 */
1172static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1173{
1174        struct btrfs_bio *bbio = rbio->bbio;
1175        void *pointers[rbio->real_stripes];
1176        int stripe_len = rbio->stripe_len;
1177        int nr_data = rbio->nr_data;
1178        int stripe;
1179        int pagenr;
1180        int p_stripe = -1;
1181        int q_stripe = -1;
1182        struct bio_list bio_list;
1183        struct bio *bio;
1184        int pages_per_stripe = stripe_len >> PAGE_CACHE_SHIFT;
1185        int ret;
1186
1187        bio_list_init(&bio_list);
1188
1189        if (rbio->real_stripes - rbio->nr_data == 1) {
1190                p_stripe = rbio->real_stripes - 1;
1191        } else if (rbio->real_stripes - rbio->nr_data == 2) {
1192                p_stripe = rbio->real_stripes - 2;
1193                q_stripe = rbio->real_stripes - 1;
1194        } else {
1195                BUG();
1196        }
1197
1198        /* at this point we either have a full stripe,
1199         * or we've read the full stripe from the drive.
1200         * recalculate the parity and write the new results.
1201         *
1202         * We're not allowed to add any new bios to the
1203         * bio list here, anyone else that wants to
1204         * change this stripe needs to do their own rmw.
1205         */
1206        spin_lock_irq(&rbio->bio_list_lock);
1207        set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1208        spin_unlock_irq(&rbio->bio_list_lock);
1209
1210        atomic_set(&rbio->error, 0);
1211
1212        /*
1213         * now that we've set rmw_locked, run through the
1214         * bio list one last time and map the page pointers
1215         *
1216         * We don't cache full rbios because we're assuming
1217         * the higher layers are unlikely to use this area of
1218         * the disk again soon.  If they do use it again,
1219         * hopefully they will send another full bio.
1220         */
1221        index_rbio_pages(rbio);
1222        if (!rbio_is_full(rbio))
1223                cache_rbio_pages(rbio);
1224        else
1225                clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1226
1227        for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1228                struct page *p;
1229                /* first collect one page from each data stripe */
1230                for (stripe = 0; stripe < nr_data; stripe++) {
1231                        p = page_in_rbio(rbio, stripe, pagenr, 0);
1232                        pointers[stripe] = kmap(p);
1233                }
1234
1235                /* then add the parity stripe */
1236                p = rbio_pstripe_page(rbio, pagenr);
1237                SetPageUptodate(p);
1238                pointers[stripe++] = kmap(p);
1239
1240                if (q_stripe != -1) {
1241
1242                        /*
1243                         * raid6, add the qstripe and call the
1244                         * library function to fill in our p/q
1245                         */
1246                        p = rbio_qstripe_page(rbio, pagenr);
1247                        SetPageUptodate(p);
1248                        pointers[stripe++] = kmap(p);
1249
1250                        raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1251                                                pointers);
1252                } else {
1253                        /* raid5 */
1254                        memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1255                        run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
1256                }
1257
1258
1259                for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1260                        kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1261        }
1262
1263        /*
1264         * time to start writing.  Make bios for everything from the
1265         * higher layers (the bio_list in our rbio) and our p/q.  Ignore
1266         * everything else.
1267         */
1268        for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1269                for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1270                        struct page *page;
1271                        if (stripe < rbio->nr_data) {
1272                                page = page_in_rbio(rbio, stripe, pagenr, 1);
1273                                if (!page)
1274                                        continue;
1275                        } else {
1276                               page = rbio_stripe_page(rbio, stripe, pagenr);
1277                        }
1278
1279                        ret = rbio_add_io_page(rbio, &bio_list,
1280                                       page, stripe, pagenr, rbio->stripe_len);
1281                        if (ret)
1282                                goto cleanup;
1283                }
1284        }
1285
1286        if (likely(!bbio->num_tgtdevs))
1287                goto write_data;
1288
1289        for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1290                if (!bbio->tgtdev_map[stripe])
1291                        continue;
1292
1293                for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1294                        struct page *page;
1295                        if (stripe < rbio->nr_data) {
1296                                page = page_in_rbio(rbio, stripe, pagenr, 1);
1297                                if (!page)
1298                                        continue;
1299                        } else {
1300                               page = rbio_stripe_page(rbio, stripe, pagenr);
1301                        }
1302
1303                        ret = rbio_add_io_page(rbio, &bio_list, page,
1304                                               rbio->bbio->tgtdev_map[stripe],
1305                                               pagenr, rbio->stripe_len);
1306                        if (ret)
1307                                goto cleanup;
1308                }
1309        }
1310
1311write_data:
1312        atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1313        BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1314
1315        while (1) {
1316                bio = bio_list_pop(&bio_list);
1317                if (!bio)
1318                        break;
1319
1320                bio->bi_private = rbio;
1321                bio->bi_end_io = raid_write_end_io;
1322                BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
1323                submit_bio(WRITE, bio);
1324        }
1325        return;
1326
1327cleanup:
1328        rbio_orig_end_io(rbio, -EIO, 0);
1329}
1330
1331/*
1332 * helper to find the stripe number for a given bio.  Used to figure out which
1333 * stripe has failed.  This expects the bio to correspond to a physical disk,
1334 * so it looks up based on physical sector numbers.
1335 */
1336static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1337                           struct bio *bio)
1338{
1339        u64 physical = bio->bi_iter.bi_sector;
1340        u64 stripe_start;
1341        int i;
1342        struct btrfs_bio_stripe *stripe;
1343
1344        physical <<= 9;
1345
1346        for (i = 0; i < rbio->bbio->num_stripes; i++) {
1347                stripe = &rbio->bbio->stripes[i];
1348                stripe_start = stripe->physical;
1349                if (physical >= stripe_start &&
1350                    physical < stripe_start + rbio->stripe_len &&
1351                    bio->bi_bdev == stripe->dev->bdev) {
1352                        return i;
1353                }
1354        }
1355        return -1;
1356}
1357
1358/*
1359 * helper to find the stripe number for a given
1360 * bio (before mapping).  Used to figure out which stripe has
1361 * failed.  This looks up based on logical block numbers.
1362 */
1363static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1364                                   struct bio *bio)
1365{
1366        u64 logical = bio->bi_iter.bi_sector;
1367        u64 stripe_start;
1368        int i;
1369
1370        logical <<= 9;
1371
1372        for (i = 0; i < rbio->nr_data; i++) {
1373                stripe_start = rbio->bbio->raid_map[i];
1374                if (logical >= stripe_start &&
1375                    logical < stripe_start + rbio->stripe_len) {
1376                        return i;
1377                }
1378        }
1379        return -1;
1380}
1381
1382/*
1383 * returns -EIO if we had too many failures
1384 */
1385static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1386{
1387        unsigned long flags;
1388        int ret = 0;
1389
1390        spin_lock_irqsave(&rbio->bio_list_lock, flags);
1391
1392        /* we already know this stripe is bad, move on */
1393        if (rbio->faila == failed || rbio->failb == failed)
1394                goto out;
1395
1396        if (rbio->faila == -1) {
1397                /* first failure on this rbio */
1398                rbio->faila = failed;
1399                atomic_inc(&rbio->error);
1400        } else if (rbio->failb == -1) {
1401                /* second failure on this rbio */
1402                rbio->failb = failed;
1403                atomic_inc(&rbio->error);
1404        } else {
1405                ret = -EIO;
1406        }
1407out:
1408        spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1409
1410        return ret;
1411}
1412
1413/*
1414 * helper to fail a stripe based on a physical disk
1415 * bio.
1416 */
1417static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1418                           struct bio *bio)
1419{
1420        int failed = find_bio_stripe(rbio, bio);
1421
1422        if (failed < 0)
1423                return -EIO;
1424
1425        return fail_rbio_index(rbio, failed);
1426}
1427
1428/*
1429 * this sets each page in the bio uptodate.  It should only be used on private
1430 * rbio pages, nothing that comes in from the higher layers
1431 */
1432static void set_bio_pages_uptodate(struct bio *bio)
1433{
1434        int i;
1435        struct page *p;
1436
1437        for (i = 0; i < bio->bi_vcnt; i++) {
1438                p = bio->bi_io_vec[i].bv_page;
1439                SetPageUptodate(p);
1440        }
1441}
1442
1443/*
1444 * end io for the read phase of the rmw cycle.  All the bios here are physical
1445 * stripe bios we've read from the disk so we can recalculate the parity of the
1446 * stripe.
1447 *
1448 * This will usually kick off finish_rmw once all the bios are read in, but it
1449 * may trigger parity reconstruction if we had any errors along the way
1450 */
1451static void raid_rmw_end_io(struct bio *bio, int err)
1452{
1453        struct btrfs_raid_bio *rbio = bio->bi_private;
1454
1455        if (err)
1456                fail_bio_stripe(rbio, bio);
1457        else
1458                set_bio_pages_uptodate(bio);
1459
1460        bio_put(bio);
1461
1462        if (!atomic_dec_and_test(&rbio->stripes_pending))
1463                return;
1464
1465        err = 0;
1466        if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1467                goto cleanup;
1468
1469        /*
1470         * this will normally call finish_rmw to start our write
1471         * but if there are any failed stripes we'll reconstruct
1472         * from parity first
1473         */
1474        validate_rbio_for_rmw(rbio);
1475        return;
1476
1477cleanup:
1478
1479        rbio_orig_end_io(rbio, -EIO, 0);
1480}
1481
1482static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1483{
1484        btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1485                        rmw_work, NULL, NULL);
1486
1487        btrfs_queue_work(rbio->fs_info->rmw_workers,
1488                         &rbio->work);
1489}
1490
1491static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1492{
1493        btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1494                        read_rebuild_work, NULL, NULL);
1495
1496        btrfs_queue_work(rbio->fs_info->rmw_workers,
1497                         &rbio->work);
1498}
1499
1500/*
1501 * the stripe must be locked by the caller.  It will
1502 * unlock after all the writes are done
1503 */
1504static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1505{
1506        int bios_to_read = 0;
1507        struct bio_list bio_list;
1508        int ret;
1509        int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
1510        int pagenr;
1511        int stripe;
1512        struct bio *bio;
1513
1514        bio_list_init(&bio_list);
1515
1516        ret = alloc_rbio_pages(rbio);
1517        if (ret)
1518                goto cleanup;
1519
1520        index_rbio_pages(rbio);
1521
1522        atomic_set(&rbio->error, 0);
1523        /*
1524         * build a list of bios to read all the missing parts of this
1525         * stripe
1526         */
1527        for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1528                for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1529                        struct page *page;
1530                        /*
1531                         * we want to find all the pages missing from
1532                         * the rbio and read them from the disk.  If
1533                         * page_in_rbio finds a page in the bio list
1534                         * we don't need to read it off the stripe.
1535                         */
1536                        page = page_in_rbio(rbio, stripe, pagenr, 1);
1537                        if (page)
1538                                continue;
1539
1540                        page = rbio_stripe_page(rbio, stripe, pagenr);
1541                        /*
1542                         * the bio cache may have handed us an uptodate
1543                         * page.  If so, be happy and use it
1544                         */
1545                        if (PageUptodate(page))
1546                                continue;
1547
1548                        ret = rbio_add_io_page(rbio, &bio_list, page,
1549                                       stripe, pagenr, rbio->stripe_len);
1550                        if (ret)
1551                                goto cleanup;
1552                }
1553        }
1554
1555        bios_to_read = bio_list_size(&bio_list);
1556        if (!bios_to_read) {
1557                /*
1558                 * this can happen if others have merged with
1559                 * us, it means there is nothing left to read.
1560                 * But if there are missing devices it may not be
1561                 * safe to do the full stripe write yet.
1562                 */
1563                goto finish;
1564        }
1565
1566        /*
1567         * the bbio may be freed once we submit the last bio.  Make sure
1568         * not to touch it after that
1569         */
1570        atomic_set(&rbio->stripes_pending, bios_to_read);
1571        while (1) {
1572                bio = bio_list_pop(&bio_list);
1573                if (!bio)
1574                        break;
1575
1576                bio->bi_private = rbio;
1577                bio->bi_end_io = raid_rmw_end_io;
1578
1579                btrfs_bio_wq_end_io(rbio->fs_info, bio,
1580                                    BTRFS_WQ_ENDIO_RAID56);
1581
1582                BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
1583                submit_bio(READ, bio);
1584        }
1585        /* the actual write will happen once the reads are done */
1586        return 0;
1587
1588cleanup:
1589        rbio_orig_end_io(rbio, -EIO, 0);
1590        return -EIO;
1591
1592finish:
1593        validate_rbio_for_rmw(rbio);
1594        return 0;
1595}
1596
1597/*
1598 * if the upper layers pass in a full stripe, we thank them by only allocating
1599 * enough pages to hold the parity, and sending it all down quickly.
1600 */
1601static int full_stripe_write(struct btrfs_raid_bio *rbio)
1602{
1603        int ret;
1604
1605        ret = alloc_rbio_parity_pages(rbio);
1606        if (ret) {
1607                __free_raid_bio(rbio);
1608                return ret;
1609        }
1610
1611        ret = lock_stripe_add(rbio);
1612        if (ret == 0)
1613                finish_rmw(rbio);
1614        return 0;
1615}
1616
1617/*
1618 * partial stripe writes get handed over to async helpers.
1619 * We're really hoping to merge a few more writes into this
1620 * rbio before calculating new parity
1621 */
1622static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1623{
1624        int ret;
1625
1626        ret = lock_stripe_add(rbio);
1627        if (ret == 0)
1628                async_rmw_stripe(rbio);
1629        return 0;
1630}
1631
1632/*
1633 * sometimes while we were reading from the drive to
1634 * recalculate parity, enough new bios come into create
1635 * a full stripe.  So we do a check here to see if we can
1636 * go directly to finish_rmw
1637 */
1638static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1639{
1640        /* head off into rmw land if we don't have a full stripe */
1641        if (!rbio_is_full(rbio))
1642                return partial_stripe_write(rbio);
1643        return full_stripe_write(rbio);
1644}
1645
1646/*
1647 * We use plugging call backs to collect full stripes.
1648 * Any time we get a partial stripe write while plugged
1649 * we collect it into a list.  When the unplug comes down,
1650 * we sort the list by logical block number and merge
1651 * everything we can into the same rbios
1652 */
1653struct btrfs_plug_cb {
1654        struct blk_plug_cb cb;
1655        struct btrfs_fs_info *info;
1656        struct list_head rbio_list;
1657        struct btrfs_work work;
1658};
1659
1660/*
1661 * rbios on the plug list are sorted for easier merging.
1662 */
1663static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1664{
1665        struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1666                                                 plug_list);
1667        struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1668                                                 plug_list);
1669        u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1670        u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1671
1672        if (a_sector < b_sector)
1673                return -1;
1674        if (a_sector > b_sector)
1675                return 1;
1676        return 0;
1677}
1678
1679static void run_plug(struct btrfs_plug_cb *plug)
1680{
1681        struct btrfs_raid_bio *cur;
1682        struct btrfs_raid_bio *last = NULL;
1683
1684        /*
1685         * sort our plug list then try to merge
1686         * everything we can in hopes of creating full
1687         * stripes.
1688         */
1689        list_sort(NULL, &plug->rbio_list, plug_cmp);
1690        while (!list_empty(&plug->rbio_list)) {
1691                cur = list_entry(plug->rbio_list.next,
1692                                 struct btrfs_raid_bio, plug_list);
1693                list_del_init(&cur->plug_list);
1694
1695                if (rbio_is_full(cur)) {
1696                        /* we have a full stripe, send it down */
1697                        full_stripe_write(cur);
1698                        continue;
1699                }
1700                if (last) {
1701                        if (rbio_can_merge(last, cur)) {
1702                                merge_rbio(last, cur);
1703                                __free_raid_bio(cur);
1704                                continue;
1705
1706                        }
1707                        __raid56_parity_write(last);
1708                }
1709                last = cur;
1710        }
1711        if (last) {
1712                __raid56_parity_write(last);
1713        }
1714        kfree(plug);
1715}
1716
1717/*
1718 * if the unplug comes from schedule, we have to push the
1719 * work off to a helper thread
1720 */
1721static void unplug_work(struct btrfs_work *work)
1722{
1723        struct btrfs_plug_cb *plug;
1724        plug = container_of(work, struct btrfs_plug_cb, work);
1725        run_plug(plug);
1726}
1727
1728static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1729{
1730        struct btrfs_plug_cb *plug;
1731        plug = container_of(cb, struct btrfs_plug_cb, cb);
1732
1733        if (from_schedule) {
1734                btrfs_init_work(&plug->work, btrfs_rmw_helper,
1735                                unplug_work, NULL, NULL);
1736                btrfs_queue_work(plug->info->rmw_workers,
1737                                 &plug->work);
1738                return;
1739        }
1740        run_plug(plug);
1741}
1742
1743/*
1744 * our main entry point for writes from the rest of the FS.
1745 */
1746int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1747                        struct btrfs_bio *bbio, u64 stripe_len)
1748{
1749        struct btrfs_raid_bio *rbio;
1750        struct btrfs_plug_cb *plug = NULL;
1751        struct blk_plug_cb *cb;
1752        int ret;
1753
1754        rbio = alloc_rbio(root, bbio, stripe_len);
1755        if (IS_ERR(rbio)) {
1756                btrfs_put_bbio(bbio);
1757                return PTR_ERR(rbio);
1758        }
1759        bio_list_add(&rbio->bio_list, bio);
1760        rbio->bio_list_bytes = bio->bi_iter.bi_size;
1761        rbio->operation = BTRFS_RBIO_WRITE;
1762
1763        btrfs_bio_counter_inc_noblocked(root->fs_info);
1764        rbio->generic_bio_cnt = 1;
1765
1766        /*
1767         * don't plug on full rbios, just get them out the door
1768         * as quickly as we can
1769         */
1770        if (rbio_is_full(rbio)) {
1771                ret = full_stripe_write(rbio);
1772                if (ret)
1773                        btrfs_bio_counter_dec(root->fs_info);
1774                return ret;
1775        }
1776
1777        cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
1778                               sizeof(*plug));
1779        if (cb) {
1780                plug = container_of(cb, struct btrfs_plug_cb, cb);
1781                if (!plug->info) {
1782                        plug->info = root->fs_info;
1783                        INIT_LIST_HEAD(&plug->rbio_list);
1784                }
1785                list_add_tail(&rbio->plug_list, &plug->rbio_list);
1786                ret = 0;
1787        } else {
1788                ret = __raid56_parity_write(rbio);
1789                if (ret)
1790                        btrfs_bio_counter_dec(root->fs_info);
1791        }
1792        return ret;
1793}
1794
1795/*
1796 * all parity reconstruction happens here.  We've read in everything
1797 * we can find from the drives and this does the heavy lifting of
1798 * sorting the good from the bad.
1799 */
1800static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1801{
1802        int pagenr, stripe;
1803        void **pointers;
1804        int faila = -1, failb = -1;
1805        int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
1806        struct page *page;
1807        int err;
1808        int i;
1809
1810        pointers = kzalloc(rbio->real_stripes * sizeof(void *),
1811                           GFP_NOFS);
1812        if (!pointers) {
1813                err = -ENOMEM;
1814                goto cleanup_io;
1815        }
1816
1817        faila = rbio->faila;
1818        failb = rbio->failb;
1819
1820        if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1821                spin_lock_irq(&rbio->bio_list_lock);
1822                set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1823                spin_unlock_irq(&rbio->bio_list_lock);
1824        }
1825
1826        index_rbio_pages(rbio);
1827
1828        for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1829                /*
1830                 * Now we just use bitmap to mark the horizontal stripes in
1831                 * which we have data when doing parity scrub.
1832                 */
1833                if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1834                    !test_bit(pagenr, rbio->dbitmap))
1835                        continue;
1836
1837                /* setup our array of pointers with pages
1838                 * from each stripe
1839                 */
1840                for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1841                        /*
1842                         * if we're rebuilding a read, we have to use
1843                         * pages from the bio list
1844                         */
1845                        if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
1846                            (stripe == faila || stripe == failb)) {
1847                                page = page_in_rbio(rbio, stripe, pagenr, 0);
1848                        } else {
1849                                page = rbio_stripe_page(rbio, stripe, pagenr);
1850                        }
1851                        pointers[stripe] = kmap(page);
1852                }
1853
1854                /* all raid6 handling here */
1855                if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1856                        /*
1857                         * single failure, rebuild from parity raid5
1858                         * style
1859                         */
1860                        if (failb < 0) {
1861                                if (faila == rbio->nr_data) {
1862                                        /*
1863                                         * Just the P stripe has failed, without
1864                                         * a bad data or Q stripe.
1865                                         * TODO, we should redo the xor here.
1866                                         */
1867                                        err = -EIO;
1868                                        goto cleanup;
1869                                }
1870                                /*
1871                                 * a single failure in raid6 is rebuilt
1872                                 * in the pstripe code below
1873                                 */
1874                                goto pstripe;
1875                        }
1876
1877                        /* make sure our ps and qs are in order */
1878                        if (faila > failb) {
1879                                int tmp = failb;
1880                                failb = faila;
1881                                faila = tmp;
1882                        }
1883
1884                        /* if the q stripe is failed, do a pstripe reconstruction
1885                         * from the xors.
1886                         * If both the q stripe and the P stripe are failed, we're
1887                         * here due to a crc mismatch and we can't give them the
1888                         * data they want
1889                         */
1890                        if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1891                                if (rbio->bbio->raid_map[faila] ==
1892                                    RAID5_P_STRIPE) {
1893                                        err = -EIO;
1894                                        goto cleanup;
1895                                }
1896                                /*
1897                                 * otherwise we have one bad data stripe and
1898                                 * a good P stripe.  raid5!
1899                                 */
1900                                goto pstripe;
1901                        }
1902
1903                        if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1904                                raid6_datap_recov(rbio->real_stripes,
1905                                                  PAGE_SIZE, faila, pointers);
1906                        } else {
1907                                raid6_2data_recov(rbio->real_stripes,
1908                                                  PAGE_SIZE, faila, failb,
1909                                                  pointers);
1910                        }
1911                } else {
1912                        void *p;
1913
1914                        /* rebuild from P stripe here (raid5 or raid6) */
1915                        BUG_ON(failb != -1);
1916pstripe:
1917                        /* Copy parity block into failed block to start with */
1918                        memcpy(pointers[faila],
1919                               pointers[rbio->nr_data],
1920                               PAGE_CACHE_SIZE);
1921
1922                        /* rearrange the pointer array */
1923                        p = pointers[faila];
1924                        for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1925                                pointers[stripe] = pointers[stripe + 1];
1926                        pointers[rbio->nr_data - 1] = p;
1927
1928                        /* xor in the rest */
1929                        run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
1930                }
1931                /* if we're doing this rebuild as part of an rmw, go through
1932                 * and set all of our private rbio pages in the
1933                 * failed stripes as uptodate.  This way finish_rmw will
1934                 * know they can be trusted.  If this was a read reconstruction,
1935                 * other endio functions will fiddle the uptodate bits
1936                 */
1937                if (rbio->operation == BTRFS_RBIO_WRITE) {
1938                        for (i = 0;  i < nr_pages; i++) {
1939                                if (faila != -1) {
1940                                        page = rbio_stripe_page(rbio, faila, i);
1941                                        SetPageUptodate(page);
1942                                }
1943                                if (failb != -1) {
1944                                        page = rbio_stripe_page(rbio, failb, i);
1945                                        SetPageUptodate(page);
1946                                }
1947                        }
1948                }
1949                for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1950                        /*
1951                         * if we're rebuilding a read, we have to use
1952                         * pages from the bio list
1953                         */
1954                        if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
1955                            (stripe == faila || stripe == failb)) {
1956                                page = page_in_rbio(rbio, stripe, pagenr, 0);
1957                        } else {
1958                                page = rbio_stripe_page(rbio, stripe, pagenr);
1959                        }
1960                        kunmap(page);
1961                }
1962        }
1963
1964        err = 0;
1965cleanup:
1966        kfree(pointers);
1967
1968cleanup_io:
1969        if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1970                if (err == 0)
1971                        cache_rbio_pages(rbio);
1972                else
1973                        clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1974
1975                rbio_orig_end_io(rbio, err, err == 0);
1976        } else if (err == 0) {
1977                rbio->faila = -1;
1978                rbio->failb = -1;
1979
1980                if (rbio->operation == BTRFS_RBIO_WRITE)
1981                        finish_rmw(rbio);
1982                else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1983                        finish_parity_scrub(rbio, 0);
1984                else
1985                        BUG();
1986        } else {
1987                rbio_orig_end_io(rbio, err, 0);
1988        }
1989}
1990
1991/*
1992 * This is called only for stripes we've read from disk to
1993 * reconstruct the parity.
1994 */
1995static void raid_recover_end_io(struct bio *bio, int err)
1996{
1997        struct btrfs_raid_bio *rbio = bio->bi_private;
1998
1999        /*
2000         * we only read stripe pages off the disk, set them
2001         * up to date if there were no errors
2002         */
2003        if (err)
2004                fail_bio_stripe(rbio, bio);
2005        else
2006                set_bio_pages_uptodate(bio);
2007        bio_put(bio);
2008
2009        if (!atomic_dec_and_test(&rbio->stripes_pending))
2010                return;
2011
2012        if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2013                rbio_orig_end_io(rbio, -EIO, 0);
2014        else
2015                __raid_recover_end_io(rbio);
2016}
2017
2018/*
2019 * reads everything we need off the disk to reconstruct
2020 * the parity. endio handlers trigger final reconstruction
2021 * when the IO is done.
2022 *
2023 * This is used both for reads from the higher layers and for
2024 * parity construction required to finish a rmw cycle.
2025 */
2026static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2027{
2028        int bios_to_read = 0;
2029        struct bio_list bio_list;
2030        int ret;
2031        int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
2032        int pagenr;
2033        int stripe;
2034        struct bio *bio;
2035
2036        bio_list_init(&bio_list);
2037
2038        ret = alloc_rbio_pages(rbio);
2039        if (ret)
2040                goto cleanup;
2041
2042        atomic_set(&rbio->error, 0);
2043
2044        /*
2045         * read everything that hasn't failed.  Thanks to the
2046         * stripe cache, it is possible that some or all of these
2047         * pages are going to be uptodate.
2048         */
2049        for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2050                if (rbio->faila == stripe || rbio->failb == stripe) {
2051                        atomic_inc(&rbio->error);
2052                        continue;
2053                }
2054
2055                for (pagenr = 0; pagenr < nr_pages; pagenr++) {
2056                        struct page *p;
2057
2058                        /*
2059                         * the rmw code may have already read this
2060                         * page in
2061                         */
2062                        p = rbio_stripe_page(rbio, stripe, pagenr);
2063                        if (PageUptodate(p))
2064                                continue;
2065
2066                        ret = rbio_add_io_page(rbio, &bio_list,
2067                                       rbio_stripe_page(rbio, stripe, pagenr),
2068                                       stripe, pagenr, rbio->stripe_len);
2069                        if (ret < 0)
2070                                goto cleanup;
2071                }
2072        }
2073
2074        bios_to_read = bio_list_size(&bio_list);
2075        if (!bios_to_read) {
2076                /*
2077                 * we might have no bios to read just because the pages
2078                 * were up to date, or we might have no bios to read because
2079                 * the devices were gone.
2080                 */
2081                if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2082                        __raid_recover_end_io(rbio);
2083                        goto out;
2084                } else {
2085                        goto cleanup;
2086                }
2087        }
2088
2089        /*
2090         * the bbio may be freed once we submit the last bio.  Make sure
2091         * not to touch it after that
2092         */
2093        atomic_set(&rbio->stripes_pending, bios_to_read);
2094        while (1) {
2095                bio = bio_list_pop(&bio_list);
2096                if (!bio)
2097                        break;
2098
2099                bio->bi_private = rbio;
2100                bio->bi_end_io = raid_recover_end_io;
2101
2102                btrfs_bio_wq_end_io(rbio->fs_info, bio,
2103                                    BTRFS_WQ_ENDIO_RAID56);
2104
2105                BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2106                submit_bio(READ, bio);
2107        }
2108out:
2109        return 0;
2110
2111cleanup:
2112        if (rbio->operation == BTRFS_RBIO_READ_REBUILD)
2113                rbio_orig_end_io(rbio, -EIO, 0);
2114        return -EIO;
2115}
2116
2117/*
2118 * the main entry point for reads from the higher layers.  This
2119 * is really only called when the normal read path had a failure,
2120 * so we assume the bio they send down corresponds to a failed part
2121 * of the drive.
2122 */
2123int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2124                          struct btrfs_bio *bbio, u64 stripe_len,
2125                          int mirror_num, int generic_io)
2126{
2127        struct btrfs_raid_bio *rbio;
2128        int ret;
2129
2130        rbio = alloc_rbio(root, bbio, stripe_len);
2131        if (IS_ERR(rbio)) {
2132                if (generic_io)
2133                        btrfs_put_bbio(bbio);
2134                return PTR_ERR(rbio);
2135        }
2136
2137        rbio->operation = BTRFS_RBIO_READ_REBUILD;
2138        bio_list_add(&rbio->bio_list, bio);
2139        rbio->bio_list_bytes = bio->bi_iter.bi_size;
2140
2141        rbio->faila = find_logical_bio_stripe(rbio, bio);
2142        if (rbio->faila == -1) {
2143                BUG();
2144                if (generic_io)
2145                        btrfs_put_bbio(bbio);
2146                kfree(rbio);
2147                return -EIO;
2148        }
2149
2150        if (generic_io) {
2151                btrfs_bio_counter_inc_noblocked(root->fs_info);
2152                rbio->generic_bio_cnt = 1;
2153        } else {
2154                btrfs_get_bbio(bbio);
2155        }
2156
2157        /*
2158         * reconstruct from the q stripe if they are
2159         * asking for mirror 3
2160         */
2161        if (mirror_num == 3)
2162                rbio->failb = rbio->real_stripes - 2;
2163
2164        ret = lock_stripe_add(rbio);
2165
2166        /*
2167         * __raid56_parity_recover will end the bio with
2168         * any errors it hits.  We don't want to return
2169         * its error value up the stack because our caller
2170         * will end up calling bio_endio with any nonzero
2171         * return
2172         */
2173        if (ret == 0)
2174                __raid56_parity_recover(rbio);
2175        /*
2176         * our rbio has been added to the list of
2177         * rbios that will be handled after the
2178         * currently lock owner is done
2179         */
2180        return 0;
2181
2182}
2183
2184static void rmw_work(struct btrfs_work *work)
2185{
2186        struct btrfs_raid_bio *rbio;
2187
2188        rbio = container_of(work, struct btrfs_raid_bio, work);
2189        raid56_rmw_stripe(rbio);
2190}
2191
2192static void read_rebuild_work(struct btrfs_work *work)
2193{
2194        struct btrfs_raid_bio *rbio;
2195
2196        rbio = container_of(work, struct btrfs_raid_bio, work);
2197        __raid56_parity_recover(rbio);
2198}
2199
2200/*
2201 * The following code is used to scrub/replace the parity stripe
2202 *
2203 * Note: We need make sure all the pages that add into the scrub/replace
2204 * raid bio are correct and not be changed during the scrub/replace. That
2205 * is those pages just hold metadata or file data with checksum.
2206 */
2207
2208struct btrfs_raid_bio *
2209raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
2210                               struct btrfs_bio *bbio, u64 stripe_len,
2211                               struct btrfs_device *scrub_dev,
2212                               unsigned long *dbitmap, int stripe_nsectors)
2213{
2214        struct btrfs_raid_bio *rbio;
2215        int i;
2216
2217        rbio = alloc_rbio(root, bbio, stripe_len);
2218        if (IS_ERR(rbio))
2219                return NULL;
2220        bio_list_add(&rbio->bio_list, bio);
2221        /*
2222         * This is a special bio which is used to hold the completion handler
2223         * and make the scrub rbio is similar to the other types
2224         */
2225        ASSERT(!bio->bi_iter.bi_size);
2226        rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2227
2228        for (i = 0; i < rbio->real_stripes; i++) {
2229                if (bbio->stripes[i].dev == scrub_dev) {
2230                        rbio->scrubp = i;
2231                        break;
2232                }
2233        }
2234
2235        /* Now we just support the sectorsize equals to page size */
2236        ASSERT(root->sectorsize == PAGE_SIZE);
2237        ASSERT(rbio->stripe_npages == stripe_nsectors);
2238        bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2239
2240        return rbio;
2241}
2242
2243void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
2244                                   struct page *page, u64 logical)
2245{
2246        int stripe_offset;
2247        int index;
2248
2249        ASSERT(logical >= rbio->bbio->raid_map[0]);
2250        ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2251                                rbio->stripe_len * rbio->nr_data);
2252        stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2253        index = stripe_offset >> PAGE_CACHE_SHIFT;
2254        rbio->bio_pages[index] = page;
2255}
2256
2257/*
2258 * We just scrub the parity that we have correct data on the same horizontal,
2259 * so we needn't allocate all pages for all the stripes.
2260 */
2261static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2262{
2263        int i;
2264        int bit;
2265        int index;
2266        struct page *page;
2267
2268        for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2269                for (i = 0; i < rbio->real_stripes; i++) {
2270                        index = i * rbio->stripe_npages + bit;
2271                        if (rbio->stripe_pages[index])
2272                                continue;
2273
2274                        page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2275                        if (!page)
2276                                return -ENOMEM;
2277                        rbio->stripe_pages[index] = page;
2278                        ClearPageUptodate(page);
2279                }
2280        }
2281        return 0;
2282}
2283
2284/*
2285 * end io function used by finish_rmw.  When we finally
2286 * get here, we've written a full stripe
2287 */
2288static void raid_write_parity_end_io(struct bio *bio, int err)
2289{
2290        struct btrfs_raid_bio *rbio = bio->bi_private;
2291
2292        if (err)
2293                fail_bio_stripe(rbio, bio);
2294
2295        bio_put(bio);
2296
2297        if (!atomic_dec_and_test(&rbio->stripes_pending))
2298                return;
2299
2300        err = 0;
2301
2302        if (atomic_read(&rbio->error))
2303                err = -EIO;
2304
2305        rbio_orig_end_io(rbio, err, 0);
2306}
2307
2308static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2309                                         int need_check)
2310{
2311        struct btrfs_bio *bbio = rbio->bbio;
2312        void *pointers[rbio->real_stripes];
2313        DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2314        int nr_data = rbio->nr_data;
2315        int stripe;
2316        int pagenr;
2317        int p_stripe = -1;
2318        int q_stripe = -1;
2319        struct page *p_page = NULL;
2320        struct page *q_page = NULL;
2321        struct bio_list bio_list;
2322        struct bio *bio;
2323        int is_replace = 0;
2324        int ret;
2325
2326        bio_list_init(&bio_list);
2327
2328        if (rbio->real_stripes - rbio->nr_data == 1) {
2329                p_stripe = rbio->real_stripes - 1;
2330        } else if (rbio->real_stripes - rbio->nr_data == 2) {
2331                p_stripe = rbio->real_stripes - 2;
2332                q_stripe = rbio->real_stripes - 1;
2333        } else {
2334                BUG();
2335        }
2336
2337        if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2338                is_replace = 1;
2339                bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2340        }
2341
2342        /*
2343         * Because the higher layers(scrubber) are unlikely to
2344         * use this area of the disk again soon, so don't cache
2345         * it.
2346         */
2347        clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2348
2349        if (!need_check)
2350                goto writeback;
2351
2352        p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2353        if (!p_page)
2354                goto cleanup;
2355        SetPageUptodate(p_page);
2356
2357        if (q_stripe != -1) {
2358                q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2359                if (!q_page) {
2360                        __free_page(p_page);
2361                        goto cleanup;
2362                }
2363                SetPageUptodate(q_page);
2364        }
2365
2366        atomic_set(&rbio->error, 0);
2367
2368        for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2369                struct page *p;
2370                void *parity;
2371                /* first collect one page from each data stripe */
2372                for (stripe = 0; stripe < nr_data; stripe++) {
2373                        p = page_in_rbio(rbio, stripe, pagenr, 0);
2374                        pointers[stripe] = kmap(p);
2375                }
2376
2377                /* then add the parity stripe */
2378                pointers[stripe++] = kmap(p_page);
2379
2380                if (q_stripe != -1) {
2381
2382                        /*
2383                         * raid6, add the qstripe and call the
2384                         * library function to fill in our p/q
2385                         */
2386                        pointers[stripe++] = kmap(q_page);
2387
2388                        raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2389                                                pointers);
2390                } else {
2391                        /* raid5 */
2392                        memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2393                        run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
2394                }
2395
2396                /* Check scrubbing pairty and repair it */
2397                p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2398                parity = kmap(p);
2399                if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE))
2400                        memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE);
2401                else
2402                        /* Parity is right, needn't writeback */
2403                        bitmap_clear(rbio->dbitmap, pagenr, 1);
2404                kunmap(p);
2405
2406                for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2407                        kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2408        }
2409
2410        __free_page(p_page);
2411        if (q_page)
2412                __free_page(q_page);
2413
2414writeback:
2415        /*
2416         * time to start writing.  Make bios for everything from the
2417         * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2418         * everything else.
2419         */
2420        for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2421                struct page *page;
2422
2423                page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2424                ret = rbio_add_io_page(rbio, &bio_list,
2425                               page, rbio->scrubp, pagenr, rbio->stripe_len);
2426                if (ret)
2427                        goto cleanup;
2428        }
2429
2430        if (!is_replace)
2431                goto submit_write;
2432
2433        for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2434                struct page *page;
2435
2436                page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2437                ret = rbio_add_io_page(rbio, &bio_list, page,
2438                                       bbio->tgtdev_map[rbio->scrubp],
2439                                       pagenr, rbio->stripe_len);
2440                if (ret)
2441                        goto cleanup;
2442        }
2443
2444submit_write:
2445        nr_data = bio_list_size(&bio_list);
2446        if (!nr_data) {
2447                /* Every parity is right */
2448                rbio_orig_end_io(rbio, 0, 0);
2449                return;
2450        }
2451
2452        atomic_set(&rbio->stripes_pending, nr_data);
2453
2454        while (1) {
2455                bio = bio_list_pop(&bio_list);
2456                if (!bio)
2457                        break;
2458
2459                bio->bi_private = rbio;
2460                bio->bi_end_io = raid_write_parity_end_io;
2461                BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2462                submit_bio(WRITE, bio);
2463        }
2464        return;
2465
2466cleanup:
2467        rbio_orig_end_io(rbio, -EIO, 0);
2468}
2469
2470static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2471{
2472        if (stripe >= 0 && stripe < rbio->nr_data)
2473                return 1;
2474        return 0;
2475}
2476
2477/*
2478 * While we're doing the parity check and repair, we could have errors
2479 * in reading pages off the disk.  This checks for errors and if we're
2480 * not able to read the page it'll trigger parity reconstruction.  The
2481 * parity scrub will be finished after we've reconstructed the failed
2482 * stripes
2483 */
2484static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2485{
2486        if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2487                goto cleanup;
2488
2489        if (rbio->faila >= 0 || rbio->failb >= 0) {
2490                int dfail = 0, failp = -1;
2491
2492                if (is_data_stripe(rbio, rbio->faila))
2493                        dfail++;
2494                else if (is_parity_stripe(rbio->faila))
2495                        failp = rbio->faila;
2496
2497                if (is_data_stripe(rbio, rbio->failb))
2498                        dfail++;
2499                else if (is_parity_stripe(rbio->failb))
2500                        failp = rbio->failb;
2501
2502                /*
2503                 * Because we can not use a scrubbing parity to repair
2504                 * the data, so the capability of the repair is declined.
2505                 * (In the case of RAID5, we can not repair anything)
2506                 */
2507                if (dfail > rbio->bbio->max_errors - 1)
2508                        goto cleanup;
2509
2510                /*
2511                 * If all data is good, only parity is correctly, just
2512                 * repair the parity.
2513                 */
2514                if (dfail == 0) {
2515                        finish_parity_scrub(rbio, 0);
2516                        return;
2517                }
2518
2519                /*
2520                 * Here means we got one corrupted data stripe and one
2521                 * corrupted parity on RAID6, if the corrupted parity
2522                 * is scrubbing parity, luckly, use the other one to repair
2523                 * the data, or we can not repair the data stripe.
2524                 */
2525                if (failp != rbio->scrubp)
2526                        goto cleanup;
2527
2528                __raid_recover_end_io(rbio);
2529        } else {
2530                finish_parity_scrub(rbio, 1);
2531        }
2532        return;
2533
2534cleanup:
2535        rbio_orig_end_io(rbio, -EIO, 0);
2536}
2537
2538/*
2539 * end io for the read phase of the rmw cycle.  All the bios here are physical
2540 * stripe bios we've read from the disk so we can recalculate the parity of the
2541 * stripe.
2542 *
2543 * This will usually kick off finish_rmw once all the bios are read in, but it
2544 * may trigger parity reconstruction if we had any errors along the way
2545 */
2546static void raid56_parity_scrub_end_io(struct bio *bio, int err)
2547{
2548        struct btrfs_raid_bio *rbio = bio->bi_private;
2549
2550        if (err)
2551                fail_bio_stripe(rbio, bio);
2552        else
2553                set_bio_pages_uptodate(bio);
2554
2555        bio_put(bio);
2556
2557        if (!atomic_dec_and_test(&rbio->stripes_pending))
2558                return;
2559
2560        /*
2561         * this will normally call finish_rmw to start our write
2562         * but if there are any failed stripes we'll reconstruct
2563         * from parity first
2564         */
2565        validate_rbio_for_parity_scrub(rbio);
2566}
2567
2568static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2569{
2570        int bios_to_read = 0;
2571        struct bio_list bio_list;
2572        int ret;
2573        int pagenr;
2574        int stripe;
2575        struct bio *bio;
2576
2577        ret = alloc_rbio_essential_pages(rbio);
2578        if (ret)
2579                goto cleanup;
2580
2581        bio_list_init(&bio_list);
2582
2583        atomic_set(&rbio->error, 0);
2584        /*
2585         * build a list of bios to read all the missing parts of this
2586         * stripe
2587         */
2588        for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2589                for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2590                        struct page *page;
2591                        /*
2592                         * we want to find all the pages missing from
2593                         * the rbio and read them from the disk.  If
2594                         * page_in_rbio finds a page in the bio list
2595                         * we don't need to read it off the stripe.
2596                         */
2597                        page = page_in_rbio(rbio, stripe, pagenr, 1);
2598                        if (page)
2599                                continue;
2600
2601                        page = rbio_stripe_page(rbio, stripe, pagenr);
2602                        /*
2603                         * the bio cache may have handed us an uptodate
2604                         * page.  If so, be happy and use it
2605                         */
2606                        if (PageUptodate(page))
2607                                continue;
2608
2609                        ret = rbio_add_io_page(rbio, &bio_list, page,
2610                                       stripe, pagenr, rbio->stripe_len);
2611                        if (ret)
2612                                goto cleanup;
2613                }
2614        }
2615
2616        bios_to_read = bio_list_size(&bio_list);
2617        if (!bios_to_read) {
2618                /*
2619                 * this can happen if others have merged with
2620                 * us, it means there is nothing left to read.
2621                 * But if there are missing devices it may not be
2622                 * safe to do the full stripe write yet.
2623                 */
2624                goto finish;
2625        }
2626
2627        /*
2628         * the bbio may be freed once we submit the last bio.  Make sure
2629         * not to touch it after that
2630         */
2631        atomic_set(&rbio->stripes_pending, bios_to_read);
2632        while (1) {
2633                bio = bio_list_pop(&bio_list);
2634                if (!bio)
2635                        break;
2636
2637                bio->bi_private = rbio;
2638                bio->bi_end_io = raid56_parity_scrub_end_io;
2639
2640                btrfs_bio_wq_end_io(rbio->fs_info, bio,
2641                                    BTRFS_WQ_ENDIO_RAID56);
2642
2643                BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2644                submit_bio(READ, bio);
2645        }
2646        /* the actual write will happen once the reads are done */
2647        return;
2648
2649cleanup:
2650        rbio_orig_end_io(rbio, -EIO, 0);
2651        return;
2652
2653finish:
2654        validate_rbio_for_parity_scrub(rbio);
2655}
2656
2657static void scrub_parity_work(struct btrfs_work *work)
2658{
2659        struct btrfs_raid_bio *rbio;
2660
2661        rbio = container_of(work, struct btrfs_raid_bio, work);
2662        raid56_parity_scrub_stripe(rbio);
2663}
2664
2665static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2666{
2667        btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2668                        scrub_parity_work, NULL, NULL);
2669
2670        btrfs_queue_work(rbio->fs_info->rmw_workers,
2671                         &rbio->work);
2672}
2673
2674void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2675{
2676        if (!lock_stripe_add(rbio))
2677                async_scrub_parity(rbio);
2678}
2679