linux/fs/btrfs/volumes.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18#include <linux/sched.h>
  19#include <linux/bio.h>
  20#include <linux/slab.h>
  21#include <linux/buffer_head.h>
  22#include <linux/blkdev.h>
  23#include <linux/random.h>
  24#include <linux/iocontext.h>
  25#include <linux/capability.h>
  26#include <linux/ratelimit.h>
  27#include <linux/kthread.h>
  28#include <linux/raid/pq.h>
  29#include <asm/div64.h>
  30#include "compat.h"
  31#include "ctree.h"
  32#include "extent_map.h"
  33#include "disk-io.h"
  34#include "transaction.h"
  35#include "print-tree.h"
  36#include "volumes.h"
  37#include "raid56.h"
  38#include "async-thread.h"
  39#include "check-integrity.h"
  40#include "rcu-string.h"
  41#include "math.h"
  42#include "dev-replace.h"
  43
  44static int init_first_rw_device(struct btrfs_trans_handle *trans,
  45                                struct btrfs_root *root,
  46                                struct btrfs_device *device);
  47static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
  48static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
  49static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
  50static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
  51
  52static DEFINE_MUTEX(uuid_mutex);
  53static LIST_HEAD(fs_uuids);
  54
  55static void lock_chunks(struct btrfs_root *root)
  56{
  57        mutex_lock(&root->fs_info->chunk_mutex);
  58}
  59
  60static void unlock_chunks(struct btrfs_root *root)
  61{
  62        mutex_unlock(&root->fs_info->chunk_mutex);
  63}
  64
  65static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
  66{
  67        struct btrfs_device *device;
  68        WARN_ON(fs_devices->opened);
  69        while (!list_empty(&fs_devices->devices)) {
  70                device = list_entry(fs_devices->devices.next,
  71                                    struct btrfs_device, dev_list);
  72                list_del(&device->dev_list);
  73                rcu_string_free(device->name);
  74                kfree(device);
  75        }
  76        kfree(fs_devices);
  77}
  78
  79static void btrfs_kobject_uevent(struct block_device *bdev,
  80                                 enum kobject_action action)
  81{
  82        int ret;
  83
  84        ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
  85        if (ret)
  86                pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
  87                        action,
  88                        kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
  89                        &disk_to_dev(bdev->bd_disk)->kobj);
  90}
  91
  92void btrfs_cleanup_fs_uuids(void)
  93{
  94        struct btrfs_fs_devices *fs_devices;
  95
  96        while (!list_empty(&fs_uuids)) {
  97                fs_devices = list_entry(fs_uuids.next,
  98                                        struct btrfs_fs_devices, list);
  99                list_del(&fs_devices->list);
 100                free_fs_devices(fs_devices);
 101        }
 102}
 103
 104static noinline struct btrfs_device *__find_device(struct list_head *head,
 105                                                   u64 devid, u8 *uuid)
 106{
 107        struct btrfs_device *dev;
 108
 109        list_for_each_entry(dev, head, dev_list) {
 110                if (dev->devid == devid &&
 111                    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
 112                        return dev;
 113                }
 114        }
 115        return NULL;
 116}
 117
 118static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
 119{
 120        struct btrfs_fs_devices *fs_devices;
 121
 122        list_for_each_entry(fs_devices, &fs_uuids, list) {
 123                if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
 124                        return fs_devices;
 125        }
 126        return NULL;
 127}
 128
 129static int
 130btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
 131                      int flush, struct block_device **bdev,
 132                      struct buffer_head **bh)
 133{
 134        int ret;
 135
 136        *bdev = blkdev_get_by_path(device_path, flags, holder);
 137
 138        if (IS_ERR(*bdev)) {
 139                ret = PTR_ERR(*bdev);
 140                printk(KERN_INFO "btrfs: open %s failed\n", device_path);
 141                goto error;
 142        }
 143
 144        if (flush)
 145                filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
 146        ret = set_blocksize(*bdev, 4096);
 147        if (ret) {
 148                blkdev_put(*bdev, flags);
 149                goto error;
 150        }
 151        invalidate_bdev(*bdev);
 152        *bh = btrfs_read_dev_super(*bdev);
 153        if (!*bh) {
 154                ret = -EINVAL;
 155                blkdev_put(*bdev, flags);
 156                goto error;
 157        }
 158
 159        return 0;
 160
 161error:
 162        *bdev = NULL;
 163        *bh = NULL;
 164        return ret;
 165}
 166
 167static void requeue_list(struct btrfs_pending_bios *pending_bios,
 168                        struct bio *head, struct bio *tail)
 169{
 170
 171        struct bio *old_head;
 172
 173        old_head = pending_bios->head;
 174        pending_bios->head = head;
 175        if (pending_bios->tail)
 176                tail->bi_next = old_head;
 177        else
 178                pending_bios->tail = tail;
 179}
 180
 181/*
 182 * we try to collect pending bios for a device so we don't get a large
 183 * number of procs sending bios down to the same device.  This greatly
 184 * improves the schedulers ability to collect and merge the bios.
 185 *
 186 * But, it also turns into a long list of bios to process and that is sure
 187 * to eventually make the worker thread block.  The solution here is to
 188 * make some progress and then put this work struct back at the end of
 189 * the list if the block device is congested.  This way, multiple devices
 190 * can make progress from a single worker thread.
 191 */
 192static noinline void run_scheduled_bios(struct btrfs_device *device)
 193{
 194        struct bio *pending;
 195        struct backing_dev_info *bdi;
 196        struct btrfs_fs_info *fs_info;
 197        struct btrfs_pending_bios *pending_bios;
 198        struct bio *tail;
 199        struct bio *cur;
 200        int again = 0;
 201        unsigned long num_run;
 202        unsigned long batch_run = 0;
 203        unsigned long limit;
 204        unsigned long last_waited = 0;
 205        int force_reg = 0;
 206        int sync_pending = 0;
 207        struct blk_plug plug;
 208
 209        /*
 210         * this function runs all the bios we've collected for
 211         * a particular device.  We don't want to wander off to
 212         * another device without first sending all of these down.
 213         * So, setup a plug here and finish it off before we return
 214         */
 215        blk_start_plug(&plug);
 216
 217        bdi = blk_get_backing_dev_info(device->bdev);
 218        fs_info = device->dev_root->fs_info;
 219        limit = btrfs_async_submit_limit(fs_info);
 220        limit = limit * 2 / 3;
 221
 222loop:
 223        spin_lock(&device->io_lock);
 224
 225loop_lock:
 226        num_run = 0;
 227
 228        /* take all the bios off the list at once and process them
 229         * later on (without the lock held).  But, remember the
 230         * tail and other pointers so the bios can be properly reinserted
 231         * into the list if we hit congestion
 232         */
 233        if (!force_reg && device->pending_sync_bios.head) {
 234                pending_bios = &device->pending_sync_bios;
 235                force_reg = 1;
 236        } else {
 237                pending_bios = &device->pending_bios;
 238                force_reg = 0;
 239        }
 240
 241        pending = pending_bios->head;
 242        tail = pending_bios->tail;
 243        WARN_ON(pending && !tail);
 244
 245        /*
 246         * if pending was null this time around, no bios need processing
 247         * at all and we can stop.  Otherwise it'll loop back up again
 248         * and do an additional check so no bios are missed.
 249         *
 250         * device->running_pending is used to synchronize with the
 251         * schedule_bio code.
 252         */
 253        if (device->pending_sync_bios.head == NULL &&
 254            device->pending_bios.head == NULL) {
 255                again = 0;
 256                device->running_pending = 0;
 257        } else {
 258                again = 1;
 259                device->running_pending = 1;
 260        }
 261
 262        pending_bios->head = NULL;
 263        pending_bios->tail = NULL;
 264
 265        spin_unlock(&device->io_lock);
 266
 267        while (pending) {
 268
 269                rmb();
 270                /* we want to work on both lists, but do more bios on the
 271                 * sync list than the regular list
 272                 */
 273                if ((num_run > 32 &&
 274                    pending_bios != &device->pending_sync_bios &&
 275                    device->pending_sync_bios.head) ||
 276                   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
 277                    device->pending_bios.head)) {
 278                        spin_lock(&device->io_lock);
 279                        requeue_list(pending_bios, pending, tail);
 280                        goto loop_lock;
 281                }
 282
 283                cur = pending;
 284                pending = pending->bi_next;
 285                cur->bi_next = NULL;
 286
 287                if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
 288                    waitqueue_active(&fs_info->async_submit_wait))
 289                        wake_up(&fs_info->async_submit_wait);
 290
 291                BUG_ON(atomic_read(&cur->bi_cnt) == 0);
 292
 293                /*
 294                 * if we're doing the sync list, record that our
 295                 * plug has some sync requests on it
 296                 *
 297                 * If we're doing the regular list and there are
 298                 * sync requests sitting around, unplug before
 299                 * we add more
 300                 */
 301                if (pending_bios == &device->pending_sync_bios) {
 302                        sync_pending = 1;
 303                } else if (sync_pending) {
 304                        blk_finish_plug(&plug);
 305                        blk_start_plug(&plug);
 306                        sync_pending = 0;
 307                }
 308
 309                btrfsic_submit_bio(cur->bi_rw, cur);
 310                num_run++;
 311                batch_run++;
 312                if (need_resched())
 313                        cond_resched();
 314
 315                /*
 316                 * we made progress, there is more work to do and the bdi
 317                 * is now congested.  Back off and let other work structs
 318                 * run instead
 319                 */
 320                if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
 321                    fs_info->fs_devices->open_devices > 1) {
 322                        struct io_context *ioc;
 323
 324                        ioc = current->io_context;
 325
 326                        /*
 327                         * the main goal here is that we don't want to
 328                         * block if we're going to be able to submit
 329                         * more requests without blocking.
 330                         *
 331                         * This code does two great things, it pokes into
 332                         * the elevator code from a filesystem _and_
 333                         * it makes assumptions about how batching works.
 334                         */
 335                        if (ioc && ioc->nr_batch_requests > 0 &&
 336                            time_before(jiffies, ioc->last_waited + HZ/50UL) &&
 337                            (last_waited == 0 ||
 338                             ioc->last_waited == last_waited)) {
 339                                /*
 340                                 * we want to go through our batch of
 341                                 * requests and stop.  So, we copy out
 342                                 * the ioc->last_waited time and test
 343                                 * against it before looping
 344                                 */
 345                                last_waited = ioc->last_waited;
 346                                if (need_resched())
 347                                        cond_resched();
 348                                continue;
 349                        }
 350                        spin_lock(&device->io_lock);
 351                        requeue_list(pending_bios, pending, tail);
 352                        device->running_pending = 1;
 353
 354                        spin_unlock(&device->io_lock);
 355                        btrfs_requeue_work(&device->work);
 356                        goto done;
 357                }
 358                /* unplug every 64 requests just for good measure */
 359                if (batch_run % 64 == 0) {
 360                        blk_finish_plug(&plug);
 361                        blk_start_plug(&plug);
 362                        sync_pending = 0;
 363                }
 364        }
 365
 366        cond_resched();
 367        if (again)
 368                goto loop;
 369
 370        spin_lock(&device->io_lock);
 371        if (device->pending_bios.head || device->pending_sync_bios.head)
 372                goto loop_lock;
 373        spin_unlock(&device->io_lock);
 374
 375done:
 376        blk_finish_plug(&plug);
 377}
 378
 379static void pending_bios_fn(struct btrfs_work *work)
 380{
 381        struct btrfs_device *device;
 382
 383        device = container_of(work, struct btrfs_device, work);
 384        run_scheduled_bios(device);
 385}
 386
 387static noinline int device_list_add(const char *path,
 388                           struct btrfs_super_block *disk_super,
 389                           u64 devid, struct btrfs_fs_devices **fs_devices_ret)
 390{
 391        struct btrfs_device *device;
 392        struct btrfs_fs_devices *fs_devices;
 393        struct rcu_string *name;
 394        u64 found_transid = btrfs_super_generation(disk_super);
 395
 396        fs_devices = find_fsid(disk_super->fsid);
 397        if (!fs_devices) {
 398                fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
 399                if (!fs_devices)
 400                        return -ENOMEM;
 401                INIT_LIST_HEAD(&fs_devices->devices);
 402                INIT_LIST_HEAD(&fs_devices->alloc_list);
 403                list_add(&fs_devices->list, &fs_uuids);
 404                memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
 405                fs_devices->latest_devid = devid;
 406                fs_devices->latest_trans = found_transid;
 407                mutex_init(&fs_devices->device_list_mutex);
 408                device = NULL;
 409        } else {
 410                device = __find_device(&fs_devices->devices, devid,
 411                                       disk_super->dev_item.uuid);
 412        }
 413        if (!device) {
 414                if (fs_devices->opened)
 415                        return -EBUSY;
 416
 417                device = kzalloc(sizeof(*device), GFP_NOFS);
 418                if (!device) {
 419                        /* we can safely leave the fs_devices entry around */
 420                        return -ENOMEM;
 421                }
 422                device->devid = devid;
 423                device->dev_stats_valid = 0;
 424                device->work.func = pending_bios_fn;
 425                memcpy(device->uuid, disk_super->dev_item.uuid,
 426                       BTRFS_UUID_SIZE);
 427                spin_lock_init(&device->io_lock);
 428
 429                name = rcu_string_strdup(path, GFP_NOFS);
 430                if (!name) {
 431                        kfree(device);
 432                        return -ENOMEM;
 433                }
 434                rcu_assign_pointer(device->name, name);
 435                INIT_LIST_HEAD(&device->dev_alloc_list);
 436
 437                /* init readahead state */
 438                spin_lock_init(&device->reada_lock);
 439                device->reada_curr_zone = NULL;
 440                atomic_set(&device->reada_in_flight, 0);
 441                device->reada_next = 0;
 442                INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
 443                INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
 444
 445                mutex_lock(&fs_devices->device_list_mutex);
 446                list_add_rcu(&device->dev_list, &fs_devices->devices);
 447                mutex_unlock(&fs_devices->device_list_mutex);
 448
 449                device->fs_devices = fs_devices;
 450                fs_devices->num_devices++;
 451        } else if (!device->name || strcmp(device->name->str, path)) {
 452                name = rcu_string_strdup(path, GFP_NOFS);
 453                if (!name)
 454                        return -ENOMEM;
 455                rcu_string_free(device->name);
 456                rcu_assign_pointer(device->name, name);
 457                if (device->missing) {
 458                        fs_devices->missing_devices--;
 459                        device->missing = 0;
 460                }
 461        }
 462
 463        if (found_transid > fs_devices->latest_trans) {
 464                fs_devices->latest_devid = devid;
 465                fs_devices->latest_trans = found_transid;
 466        }
 467        *fs_devices_ret = fs_devices;
 468        return 0;
 469}
 470
 471static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
 472{
 473        struct btrfs_fs_devices *fs_devices;
 474        struct btrfs_device *device;
 475        struct btrfs_device *orig_dev;
 476
 477        fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
 478        if (!fs_devices)
 479                return ERR_PTR(-ENOMEM);
 480
 481        INIT_LIST_HEAD(&fs_devices->devices);
 482        INIT_LIST_HEAD(&fs_devices->alloc_list);
 483        INIT_LIST_HEAD(&fs_devices->list);
 484        mutex_init(&fs_devices->device_list_mutex);
 485        fs_devices->latest_devid = orig->latest_devid;
 486        fs_devices->latest_trans = orig->latest_trans;
 487        fs_devices->total_devices = orig->total_devices;
 488        memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
 489
 490        /* We have held the volume lock, it is safe to get the devices. */
 491        list_for_each_entry(orig_dev, &orig->devices, dev_list) {
 492                struct rcu_string *name;
 493
 494                device = kzalloc(sizeof(*device), GFP_NOFS);
 495                if (!device)
 496                        goto error;
 497
 498                /*
 499                 * This is ok to do without rcu read locked because we hold the
 500                 * uuid mutex so nothing we touch in here is going to disappear.
 501                 */
 502                name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
 503                if (!name) {
 504                        kfree(device);
 505                        goto error;
 506                }
 507                rcu_assign_pointer(device->name, name);
 508
 509                device->devid = orig_dev->devid;
 510                device->work.func = pending_bios_fn;
 511                memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
 512                spin_lock_init(&device->io_lock);
 513                INIT_LIST_HEAD(&device->dev_list);
 514                INIT_LIST_HEAD(&device->dev_alloc_list);
 515
 516                list_add(&device->dev_list, &fs_devices->devices);
 517                device->fs_devices = fs_devices;
 518                fs_devices->num_devices++;
 519        }
 520        return fs_devices;
 521error:
 522        free_fs_devices(fs_devices);
 523        return ERR_PTR(-ENOMEM);
 524}
 525
 526void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
 527                               struct btrfs_fs_devices *fs_devices, int step)
 528{
 529        struct btrfs_device *device, *next;
 530
 531        struct block_device *latest_bdev = NULL;
 532        u64 latest_devid = 0;
 533        u64 latest_transid = 0;
 534
 535        mutex_lock(&uuid_mutex);
 536again:
 537        /* This is the initialized path, it is safe to release the devices. */
 538        list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
 539                if (device->in_fs_metadata) {
 540                        if (!device->is_tgtdev_for_dev_replace &&
 541                            (!latest_transid ||
 542                             device->generation > latest_transid)) {
 543                                latest_devid = device->devid;
 544                                latest_transid = device->generation;
 545                                latest_bdev = device->bdev;
 546                        }
 547                        continue;
 548                }
 549
 550                if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
 551                        /*
 552                         * In the first step, keep the device which has
 553                         * the correct fsid and the devid that is used
 554                         * for the dev_replace procedure.
 555                         * In the second step, the dev_replace state is
 556                         * read from the device tree and it is known
 557                         * whether the procedure is really active or
 558                         * not, which means whether this device is
 559                         * used or whether it should be removed.
 560                         */
 561                        if (step == 0 || device->is_tgtdev_for_dev_replace) {
 562                                continue;
 563                        }
 564                }
 565                if (device->bdev) {
 566                        blkdev_put(device->bdev, device->mode);
 567                        device->bdev = NULL;
 568                        fs_devices->open_devices--;
 569                }
 570                if (device->writeable) {
 571                        list_del_init(&device->dev_alloc_list);
 572                        device->writeable = 0;
 573                        if (!device->is_tgtdev_for_dev_replace)
 574                                fs_devices->rw_devices--;
 575                }
 576                list_del_init(&device->dev_list);
 577                fs_devices->num_devices--;
 578                rcu_string_free(device->name);
 579                kfree(device);
 580        }
 581
 582        if (fs_devices->seed) {
 583                fs_devices = fs_devices->seed;
 584                goto again;
 585        }
 586
 587        fs_devices->latest_bdev = latest_bdev;
 588        fs_devices->latest_devid = latest_devid;
 589        fs_devices->latest_trans = latest_transid;
 590
 591        mutex_unlock(&uuid_mutex);
 592}
 593
 594static void __free_device(struct work_struct *work)
 595{
 596        struct btrfs_device *device;
 597
 598        device = container_of(work, struct btrfs_device, rcu_work);
 599
 600        if (device->bdev)
 601                blkdev_put(device->bdev, device->mode);
 602
 603        rcu_string_free(device->name);
 604        kfree(device);
 605}
 606
 607static void free_device(struct rcu_head *head)
 608{
 609        struct btrfs_device *device;
 610
 611        device = container_of(head, struct btrfs_device, rcu);
 612
 613        INIT_WORK(&device->rcu_work, __free_device);
 614        schedule_work(&device->rcu_work);
 615}
 616
 617static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 618{
 619        struct btrfs_device *device;
 620
 621        if (--fs_devices->opened > 0)
 622                return 0;
 623
 624        mutex_lock(&fs_devices->device_list_mutex);
 625        list_for_each_entry(device, &fs_devices->devices, dev_list) {
 626                struct btrfs_device *new_device;
 627                struct rcu_string *name;
 628
 629                if (device->bdev)
 630                        fs_devices->open_devices--;
 631
 632                if (device->writeable && !device->is_tgtdev_for_dev_replace) {
 633                        list_del_init(&device->dev_alloc_list);
 634                        fs_devices->rw_devices--;
 635                }
 636
 637                if (device->can_discard)
 638                        fs_devices->num_can_discard--;
 639
 640                new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
 641                BUG_ON(!new_device); /* -ENOMEM */
 642                memcpy(new_device, device, sizeof(*new_device));
 643
 644                /* Safe because we are under uuid_mutex */
 645                if (device->name) {
 646                        name = rcu_string_strdup(device->name->str, GFP_NOFS);
 647                        BUG_ON(device->name && !name); /* -ENOMEM */
 648                        rcu_assign_pointer(new_device->name, name);
 649                }
 650                new_device->bdev = NULL;
 651                new_device->writeable = 0;
 652                new_device->in_fs_metadata = 0;
 653                new_device->can_discard = 0;
 654                spin_lock_init(&new_device->io_lock);
 655                list_replace_rcu(&device->dev_list, &new_device->dev_list);
 656
 657                call_rcu(&device->rcu, free_device);
 658        }
 659        mutex_unlock(&fs_devices->device_list_mutex);
 660
 661        WARN_ON(fs_devices->open_devices);
 662        WARN_ON(fs_devices->rw_devices);
 663        fs_devices->opened = 0;
 664        fs_devices->seeding = 0;
 665
 666        return 0;
 667}
 668
 669int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 670{
 671        struct btrfs_fs_devices *seed_devices = NULL;
 672        int ret;
 673
 674        mutex_lock(&uuid_mutex);
 675        ret = __btrfs_close_devices(fs_devices);
 676        if (!fs_devices->opened) {
 677                seed_devices = fs_devices->seed;
 678                fs_devices->seed = NULL;
 679        }
 680        mutex_unlock(&uuid_mutex);
 681
 682        while (seed_devices) {
 683                fs_devices = seed_devices;
 684                seed_devices = fs_devices->seed;
 685                __btrfs_close_devices(fs_devices);
 686                free_fs_devices(fs_devices);
 687        }
 688        /*
 689         * Wait for rcu kworkers under __btrfs_close_devices
 690         * to finish all blkdev_puts so device is really
 691         * free when umount is done.
 692         */
 693        rcu_barrier();
 694        return ret;
 695}
 696
 697static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 698                                fmode_t flags, void *holder)
 699{
 700        struct request_queue *q;
 701        struct block_device *bdev;
 702        struct list_head *head = &fs_devices->devices;
 703        struct btrfs_device *device;
 704        struct block_device *latest_bdev = NULL;
 705        struct buffer_head *bh;
 706        struct btrfs_super_block *disk_super;
 707        u64 latest_devid = 0;
 708        u64 latest_transid = 0;
 709        u64 devid;
 710        int seeding = 1;
 711        int ret = 0;
 712
 713        flags |= FMODE_EXCL;
 714
 715        list_for_each_entry(device, head, dev_list) {
 716                if (device->bdev)
 717                        continue;
 718                if (!device->name)
 719                        continue;
 720
 721                /* Just open everything we can; ignore failures here */
 722                if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
 723                                            &bdev, &bh))
 724                        continue;
 725
 726                disk_super = (struct btrfs_super_block *)bh->b_data;
 727                devid = btrfs_stack_device_id(&disk_super->dev_item);
 728                if (devid != device->devid)
 729                        goto error_brelse;
 730
 731                if (memcmp(device->uuid, disk_super->dev_item.uuid,
 732                           BTRFS_UUID_SIZE))
 733                        goto error_brelse;
 734
 735                device->generation = btrfs_super_generation(disk_super);
 736                if (!latest_transid || device->generation > latest_transid) {
 737                        latest_devid = devid;
 738                        latest_transid = device->generation;
 739                        latest_bdev = bdev;
 740                }
 741
 742                if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
 743                        device->writeable = 0;
 744                } else {
 745                        device->writeable = !bdev_read_only(bdev);
 746                        seeding = 0;
 747                }
 748
 749                q = bdev_get_queue(bdev);
 750                if (blk_queue_discard(q)) {
 751                        device->can_discard = 1;
 752                        fs_devices->num_can_discard++;
 753                }
 754
 755                device->bdev = bdev;
 756                device->in_fs_metadata = 0;
 757                device->mode = flags;
 758
 759                if (!blk_queue_nonrot(bdev_get_queue(bdev)))
 760                        fs_devices->rotating = 1;
 761
 762                fs_devices->open_devices++;
 763                if (device->writeable && !device->is_tgtdev_for_dev_replace) {
 764                        fs_devices->rw_devices++;
 765                        list_add(&device->dev_alloc_list,
 766                                 &fs_devices->alloc_list);
 767                }
 768                brelse(bh);
 769                continue;
 770
 771error_brelse:
 772                brelse(bh);
 773                blkdev_put(bdev, flags);
 774                continue;
 775        }
 776        if (fs_devices->open_devices == 0) {
 777                ret = -EINVAL;
 778                goto out;
 779        }
 780        fs_devices->seeding = seeding;
 781        fs_devices->opened = 1;
 782        fs_devices->latest_bdev = latest_bdev;
 783        fs_devices->latest_devid = latest_devid;
 784        fs_devices->latest_trans = latest_transid;
 785        fs_devices->total_rw_bytes = 0;
 786out:
 787        return ret;
 788}
 789
 790int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 791                       fmode_t flags, void *holder)
 792{
 793        int ret;
 794
 795        mutex_lock(&uuid_mutex);
 796        if (fs_devices->opened) {
 797                fs_devices->opened++;
 798                ret = 0;
 799        } else {
 800                ret = __btrfs_open_devices(fs_devices, flags, holder);
 801        }
 802        mutex_unlock(&uuid_mutex);
 803        return ret;
 804}
 805
 806/*
 807 * Look for a btrfs signature on a device. This may be called out of the mount path
 808 * and we are not allowed to call set_blocksize during the scan. The superblock
 809 * is read via pagecache
 810 */
 811int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
 812                          struct btrfs_fs_devices **fs_devices_ret)
 813{
 814        struct btrfs_super_block *disk_super;
 815        struct block_device *bdev;
 816        struct page *page;
 817        void *p;
 818        int ret = -EINVAL;
 819        u64 devid;
 820        u64 transid;
 821        u64 total_devices;
 822        u64 bytenr;
 823        pgoff_t index;
 824
 825        /*
 826         * we would like to check all the supers, but that would make
 827         * a btrfs mount succeed after a mkfs from a different FS.
 828         * So, we need to add a special mount option to scan for
 829         * later supers, using BTRFS_SUPER_MIRROR_MAX instead
 830         */
 831        bytenr = btrfs_sb_offset(0);
 832        flags |= FMODE_EXCL;
 833        mutex_lock(&uuid_mutex);
 834
 835        bdev = blkdev_get_by_path(path, flags, holder);
 836
 837        if (IS_ERR(bdev)) {
 838                ret = PTR_ERR(bdev);
 839                goto error;
 840        }
 841
 842        /* make sure our super fits in the device */
 843        if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
 844                goto error_bdev_put;
 845
 846        /* make sure our super fits in the page */
 847        if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
 848                goto error_bdev_put;
 849
 850        /* make sure our super doesn't straddle pages on disk */
 851        index = bytenr >> PAGE_CACHE_SHIFT;
 852        if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
 853                goto error_bdev_put;
 854
 855        /* pull in the page with our super */
 856        page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
 857                                   index, GFP_NOFS);
 858
 859        if (IS_ERR_OR_NULL(page))
 860                goto error_bdev_put;
 861
 862        p = kmap(page);
 863
 864        /* align our pointer to the offset of the super block */
 865        disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
 866
 867        if (btrfs_super_bytenr(disk_super) != bytenr ||
 868            disk_super->magic != cpu_to_le64(BTRFS_MAGIC))
 869                goto error_unmap;
 870
 871        devid = btrfs_stack_device_id(&disk_super->dev_item);
 872        transid = btrfs_super_generation(disk_super);
 873        total_devices = btrfs_super_num_devices(disk_super);
 874
 875        if (disk_super->label[0]) {
 876                if (disk_super->label[BTRFS_LABEL_SIZE - 1])
 877                        disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
 878                printk(KERN_INFO "device label %s ", disk_super->label);
 879        } else {
 880                printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
 881        }
 882
 883        printk(KERN_CONT "devid %llu transid %llu %s\n",
 884               (unsigned long long)devid, (unsigned long long)transid, path);
 885
 886        ret = device_list_add(path, disk_super, devid, fs_devices_ret);
 887        if (!ret && fs_devices_ret)
 888                (*fs_devices_ret)->total_devices = total_devices;
 889
 890error_unmap:
 891        kunmap(page);
 892        page_cache_release(page);
 893
 894error_bdev_put:
 895        blkdev_put(bdev, flags);
 896error:
 897        mutex_unlock(&uuid_mutex);
 898        return ret;
 899}
 900
 901/* helper to account the used device space in the range */
 902int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
 903                                   u64 end, u64 *length)
 904{
 905        struct btrfs_key key;
 906        struct btrfs_root *root = device->dev_root;
 907        struct btrfs_dev_extent *dev_extent;
 908        struct btrfs_path *path;
 909        u64 extent_end;
 910        int ret;
 911        int slot;
 912        struct extent_buffer *l;
 913
 914        *length = 0;
 915
 916        if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
 917                return 0;
 918
 919        path = btrfs_alloc_path();
 920        if (!path)
 921                return -ENOMEM;
 922        path->reada = 2;
 923
 924        key.objectid = device->devid;
 925        key.offset = start;
 926        key.type = BTRFS_DEV_EXTENT_KEY;
 927
 928        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 929        if (ret < 0)
 930                goto out;
 931        if (ret > 0) {
 932                ret = btrfs_previous_item(root, path, key.objectid, key.type);
 933                if (ret < 0)
 934                        goto out;
 935        }
 936
 937        while (1) {
 938                l = path->nodes[0];
 939                slot = path->slots[0];
 940                if (slot >= btrfs_header_nritems(l)) {
 941                        ret = btrfs_next_leaf(root, path);
 942                        if (ret == 0)
 943                                continue;
 944                        if (ret < 0)
 945                                goto out;
 946
 947                        break;
 948                }
 949                btrfs_item_key_to_cpu(l, &key, slot);
 950
 951                if (key.objectid < device->devid)
 952                        goto next;
 953
 954                if (key.objectid > device->devid)
 955                        break;
 956
 957                if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
 958                        goto next;
 959
 960                dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
 961                extent_end = key.offset + btrfs_dev_extent_length(l,
 962                                                                  dev_extent);
 963                if (key.offset <= start && extent_end > end) {
 964                        *length = end - start + 1;
 965                        break;
 966                } else if (key.offset <= start && extent_end > start)
 967                        *length += extent_end - start;
 968                else if (key.offset > start && extent_end <= end)
 969                        *length += extent_end - key.offset;
 970                else if (key.offset > start && key.offset <= end) {
 971                        *length += end - key.offset + 1;
 972                        break;
 973                } else if (key.offset > end)
 974                        break;
 975
 976next:
 977                path->slots[0]++;
 978        }
 979        ret = 0;
 980out:
 981        btrfs_free_path(path);
 982        return ret;
 983}
 984
 985/*
 986 * find_free_dev_extent - find free space in the specified device
 987 * @device:     the device which we search the free space in
 988 * @num_bytes:  the size of the free space that we need
 989 * @start:      store the start of the free space.
 990 * @len:        the size of the free space. that we find, or the size of the max
 991 *              free space if we don't find suitable free space
 992 *
 993 * this uses a pretty simple search, the expectation is that it is
 994 * called very infrequently and that a given device has a small number
 995 * of extents
 996 *
 997 * @start is used to store the start of the free space if we find. But if we
 998 * don't find suitable free space, it will be used to store the start position
 999 * of the max free space.
1000 *
1001 * @len is used to store the size of the free space that we find.
1002 * But if we don't find suitable free space, it is used to store the size of
1003 * the max free space.
1004 */
1005int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1006                         u64 *start, u64 *len)
1007{
1008        struct btrfs_key key;
1009        struct btrfs_root *root = device->dev_root;
1010        struct btrfs_dev_extent *dev_extent;
1011        struct btrfs_path *path;
1012        u64 hole_size;
1013        u64 max_hole_start;
1014        u64 max_hole_size;
1015        u64 extent_end;
1016        u64 search_start;
1017        u64 search_end = device->total_bytes;
1018        int ret;
1019        int slot;
1020        struct extent_buffer *l;
1021
1022        /* FIXME use last free of some kind */
1023
1024        /* we don't want to overwrite the superblock on the drive,
1025         * so we make sure to start at an offset of at least 1MB
1026         */
1027        search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1028
1029        max_hole_start = search_start;
1030        max_hole_size = 0;
1031        hole_size = 0;
1032
1033        if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1034                ret = -ENOSPC;
1035                goto error;
1036        }
1037
1038        path = btrfs_alloc_path();
1039        if (!path) {
1040                ret = -ENOMEM;
1041                goto error;
1042        }
1043        path->reada = 2;
1044
1045        key.objectid = device->devid;
1046        key.offset = search_start;
1047        key.type = BTRFS_DEV_EXTENT_KEY;
1048
1049        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1050        if (ret < 0)
1051                goto out;
1052        if (ret > 0) {
1053                ret = btrfs_previous_item(root, path, key.objectid, key.type);
1054                if (ret < 0)
1055                        goto out;
1056        }
1057
1058        while (1) {
1059                l = path->nodes[0];
1060                slot = path->slots[0];
1061                if (slot >= btrfs_header_nritems(l)) {
1062                        ret = btrfs_next_leaf(root, path);
1063                        if (ret == 0)
1064                                continue;
1065                        if (ret < 0)
1066                                goto out;
1067
1068                        break;
1069                }
1070                btrfs_item_key_to_cpu(l, &key, slot);
1071
1072                if (key.objectid < device->devid)
1073                        goto next;
1074
1075                if (key.objectid > device->devid)
1076                        break;
1077
1078                if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1079                        goto next;
1080
1081                if (key.offset > search_start) {
1082                        hole_size = key.offset - search_start;
1083
1084                        if (hole_size > max_hole_size) {
1085                                max_hole_start = search_start;
1086                                max_hole_size = hole_size;
1087                        }
1088
1089                        /*
1090                         * If this free space is greater than which we need,
1091                         * it must be the max free space that we have found
1092                         * until now, so max_hole_start must point to the start
1093                         * of this free space and the length of this free space
1094                         * is stored in max_hole_size. Thus, we return
1095                         * max_hole_start and max_hole_size and go back to the
1096                         * caller.
1097                         */
1098                        if (hole_size >= num_bytes) {
1099                                ret = 0;
1100                                goto out;
1101                        }
1102                }
1103
1104                dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1105                extent_end = key.offset + btrfs_dev_extent_length(l,
1106                                                                  dev_extent);
1107                if (extent_end > search_start)
1108                        search_start = extent_end;
1109next:
1110                path->slots[0]++;
1111                cond_resched();
1112        }
1113
1114        /*
1115         * At this point, search_start should be the end of
1116         * allocated dev extents, and when shrinking the device,
1117         * search_end may be smaller than search_start.
1118         */
1119        if (search_end > search_start)
1120                hole_size = search_end - search_start;
1121
1122        if (hole_size > max_hole_size) {
1123                max_hole_start = search_start;
1124                max_hole_size = hole_size;
1125        }
1126
1127        /* See above. */
1128        if (hole_size < num_bytes)
1129                ret = -ENOSPC;
1130        else
1131                ret = 0;
1132
1133out:
1134        btrfs_free_path(path);
1135error:
1136        *start = max_hole_start;
1137        if (len)
1138                *len = max_hole_size;
1139        return ret;
1140}
1141
1142static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1143                          struct btrfs_device *device,
1144                          u64 start)
1145{
1146        int ret;
1147        struct btrfs_path *path;
1148        struct btrfs_root *root = device->dev_root;
1149        struct btrfs_key key;
1150        struct btrfs_key found_key;
1151        struct extent_buffer *leaf = NULL;
1152        struct btrfs_dev_extent *extent = NULL;
1153
1154        path = btrfs_alloc_path();
1155        if (!path)
1156                return -ENOMEM;
1157
1158        key.objectid = device->devid;
1159        key.offset = start;
1160        key.type = BTRFS_DEV_EXTENT_KEY;
1161again:
1162        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1163        if (ret > 0) {
1164                ret = btrfs_previous_item(root, path, key.objectid,
1165                                          BTRFS_DEV_EXTENT_KEY);
1166                if (ret)
1167                        goto out;
1168                leaf = path->nodes[0];
1169                btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1170                extent = btrfs_item_ptr(leaf, path->slots[0],
1171                                        struct btrfs_dev_extent);
1172                BUG_ON(found_key.offset > start || found_key.offset +
1173                       btrfs_dev_extent_length(leaf, extent) < start);
1174                key = found_key;
1175                btrfs_release_path(path);
1176                goto again;
1177        } else if (ret == 0) {
1178                leaf = path->nodes[0];
1179                extent = btrfs_item_ptr(leaf, path->slots[0],
1180                                        struct btrfs_dev_extent);
1181        } else {
1182                btrfs_error(root->fs_info, ret, "Slot search failed");
1183                goto out;
1184        }
1185
1186        if (device->bytes_used > 0) {
1187                u64 len = btrfs_dev_extent_length(leaf, extent);
1188                device->bytes_used -= len;
1189                spin_lock(&root->fs_info->free_chunk_lock);
1190                root->fs_info->free_chunk_space += len;
1191                spin_unlock(&root->fs_info->free_chunk_lock);
1192        }
1193        ret = btrfs_del_item(trans, root, path);
1194        if (ret) {
1195                btrfs_error(root->fs_info, ret,
1196                            "Failed to remove dev extent item");
1197        }
1198out:
1199        btrfs_free_path(path);
1200        return ret;
1201}
1202
1203static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1204                                  struct btrfs_device *device,
1205                                  u64 chunk_tree, u64 chunk_objectid,
1206                                  u64 chunk_offset, u64 start, u64 num_bytes)
1207{
1208        int ret;
1209        struct btrfs_path *path;
1210        struct btrfs_root *root = device->dev_root;
1211        struct btrfs_dev_extent *extent;
1212        struct extent_buffer *leaf;
1213        struct btrfs_key key;
1214
1215        WARN_ON(!device->in_fs_metadata);
1216        WARN_ON(device->is_tgtdev_for_dev_replace);
1217        path = btrfs_alloc_path();
1218        if (!path)
1219                return -ENOMEM;
1220
1221        key.objectid = device->devid;
1222        key.offset = start;
1223        key.type = BTRFS_DEV_EXTENT_KEY;
1224        ret = btrfs_insert_empty_item(trans, root, path, &key,
1225                                      sizeof(*extent));
1226        if (ret)
1227                goto out;
1228
1229        leaf = path->nodes[0];
1230        extent = btrfs_item_ptr(leaf, path->slots[0],
1231                                struct btrfs_dev_extent);
1232        btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1233        btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1234        btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1235
1236        write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1237                    (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1238                    BTRFS_UUID_SIZE);
1239
1240        btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1241        btrfs_mark_buffer_dirty(leaf);
1242out:
1243        btrfs_free_path(path);
1244        return ret;
1245}
1246
1247static noinline int find_next_chunk(struct btrfs_root *root,
1248                                    u64 objectid, u64 *offset)
1249{
1250        struct btrfs_path *path;
1251        int ret;
1252        struct btrfs_key key;
1253        struct btrfs_chunk *chunk;
1254        struct btrfs_key found_key;
1255
1256        path = btrfs_alloc_path();
1257        if (!path)
1258                return -ENOMEM;
1259
1260        key.objectid = objectid;
1261        key.offset = (u64)-1;
1262        key.type = BTRFS_CHUNK_ITEM_KEY;
1263
1264        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1265        if (ret < 0)
1266                goto error;
1267
1268        BUG_ON(ret == 0); /* Corruption */
1269
1270        ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1271        if (ret) {
1272                *offset = 0;
1273        } else {
1274                btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1275                                      path->slots[0]);
1276                if (found_key.objectid != objectid)
1277                        *offset = 0;
1278                else {
1279                        chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1280                                               struct btrfs_chunk);
1281                        *offset = found_key.offset +
1282                                btrfs_chunk_length(path->nodes[0], chunk);
1283                }
1284        }
1285        ret = 0;
1286error:
1287        btrfs_free_path(path);
1288        return ret;
1289}
1290
1291static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1292{
1293        int ret;
1294        struct btrfs_key key;
1295        struct btrfs_key found_key;
1296        struct btrfs_path *path;
1297
1298        root = root->fs_info->chunk_root;
1299
1300        path = btrfs_alloc_path();
1301        if (!path)
1302                return -ENOMEM;
1303
1304        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1305        key.type = BTRFS_DEV_ITEM_KEY;
1306        key.offset = (u64)-1;
1307
1308        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1309        if (ret < 0)
1310                goto error;
1311
1312        BUG_ON(ret == 0); /* Corruption */
1313
1314        ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1315                                  BTRFS_DEV_ITEM_KEY);
1316        if (ret) {
1317                *objectid = 1;
1318        } else {
1319                btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1320                                      path->slots[0]);
1321                *objectid = found_key.offset + 1;
1322        }
1323        ret = 0;
1324error:
1325        btrfs_free_path(path);
1326        return ret;
1327}
1328
1329/*
1330 * the device information is stored in the chunk root
1331 * the btrfs_device struct should be fully filled in
1332 */
1333static int btrfs_add_device(struct btrfs_trans_handle *trans,
1334                            struct btrfs_root *root,
1335                            struct btrfs_device *device)
1336{
1337        int ret;
1338        struct btrfs_path *path;
1339        struct btrfs_dev_item *dev_item;
1340        struct extent_buffer *leaf;
1341        struct btrfs_key key;
1342        unsigned long ptr;
1343
1344        root = root->fs_info->chunk_root;
1345
1346        path = btrfs_alloc_path();
1347        if (!path)
1348                return -ENOMEM;
1349
1350        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1351        key.type = BTRFS_DEV_ITEM_KEY;
1352        key.offset = device->devid;
1353
1354        ret = btrfs_insert_empty_item(trans, root, path, &key,
1355                                      sizeof(*dev_item));
1356        if (ret)
1357                goto out;
1358
1359        leaf = path->nodes[0];
1360        dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1361
1362        btrfs_set_device_id(leaf, dev_item, device->devid);
1363        btrfs_set_device_generation(leaf, dev_item, 0);
1364        btrfs_set_device_type(leaf, dev_item, device->type);
1365        btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1366        btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1367        btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1368        btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1369        btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1370        btrfs_set_device_group(leaf, dev_item, 0);
1371        btrfs_set_device_seek_speed(leaf, dev_item, 0);
1372        btrfs_set_device_bandwidth(leaf, dev_item, 0);
1373        btrfs_set_device_start_offset(leaf, dev_item, 0);
1374
1375        ptr = (unsigned long)btrfs_device_uuid(dev_item);
1376        write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1377        ptr = (unsigned long)btrfs_device_fsid(dev_item);
1378        write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1379        btrfs_mark_buffer_dirty(leaf);
1380
1381        ret = 0;
1382out:
1383        btrfs_free_path(path);
1384        return ret;
1385}
1386
1387static int btrfs_rm_dev_item(struct btrfs_root *root,
1388                             struct btrfs_device *device)
1389{
1390        int ret;
1391        struct btrfs_path *path;
1392        struct btrfs_key key;
1393        struct btrfs_trans_handle *trans;
1394
1395        root = root->fs_info->chunk_root;
1396
1397        path = btrfs_alloc_path();
1398        if (!path)
1399                return -ENOMEM;
1400
1401        trans = btrfs_start_transaction(root, 0);
1402        if (IS_ERR(trans)) {
1403                btrfs_free_path(path);
1404                return PTR_ERR(trans);
1405        }
1406        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1407        key.type = BTRFS_DEV_ITEM_KEY;
1408        key.offset = device->devid;
1409        lock_chunks(root);
1410
1411        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1412        if (ret < 0)
1413                goto out;
1414
1415        if (ret > 0) {
1416                ret = -ENOENT;
1417                goto out;
1418        }
1419
1420        ret = btrfs_del_item(trans, root, path);
1421        if (ret)
1422                goto out;
1423out:
1424        btrfs_free_path(path);
1425        unlock_chunks(root);
1426        btrfs_commit_transaction(trans, root);
1427        return ret;
1428}
1429
1430int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1431{
1432        struct btrfs_device *device;
1433        struct btrfs_device *next_device;
1434        struct block_device *bdev;
1435        struct buffer_head *bh = NULL;
1436        struct btrfs_super_block *disk_super;
1437        struct btrfs_fs_devices *cur_devices;
1438        u64 all_avail;
1439        u64 devid;
1440        u64 num_devices;
1441        u8 *dev_uuid;
1442        unsigned seq;
1443        int ret = 0;
1444        bool clear_super = false;
1445
1446        mutex_lock(&uuid_mutex);
1447
1448        do {
1449                seq = read_seqbegin(&root->fs_info->profiles_lock);
1450
1451                all_avail = root->fs_info->avail_data_alloc_bits |
1452                            root->fs_info->avail_system_alloc_bits |
1453                            root->fs_info->avail_metadata_alloc_bits;
1454        } while (read_seqretry(&root->fs_info->profiles_lock, seq));
1455
1456        num_devices = root->fs_info->fs_devices->num_devices;
1457        btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1458        if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1459                WARN_ON(num_devices < 1);
1460                num_devices--;
1461        }
1462        btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1463
1464        if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1465                printk(KERN_ERR "btrfs: unable to go below four devices "
1466                       "on raid10\n");
1467                ret = -EINVAL;
1468                goto out;
1469        }
1470
1471        if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1472                printk(KERN_ERR "btrfs: unable to go below two "
1473                       "devices on raid1\n");
1474                ret = -EINVAL;
1475                goto out;
1476        }
1477
1478        if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1479            root->fs_info->fs_devices->rw_devices <= 2) {
1480                printk(KERN_ERR "btrfs: unable to go below two "
1481                       "devices on raid5\n");
1482                ret = -EINVAL;
1483                goto out;
1484        }
1485        if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1486            root->fs_info->fs_devices->rw_devices <= 3) {
1487                printk(KERN_ERR "btrfs: unable to go below three "
1488                       "devices on raid6\n");
1489                ret = -EINVAL;
1490                goto out;
1491        }
1492
1493        if (strcmp(device_path, "missing") == 0) {
1494                struct list_head *devices;
1495                struct btrfs_device *tmp;
1496
1497                device = NULL;
1498                devices = &root->fs_info->fs_devices->devices;
1499                /*
1500                 * It is safe to read the devices since the volume_mutex
1501                 * is held.
1502                 */
1503                list_for_each_entry(tmp, devices, dev_list) {
1504                        if (tmp->in_fs_metadata &&
1505                            !tmp->is_tgtdev_for_dev_replace &&
1506                            !tmp->bdev) {
1507                                device = tmp;
1508                                break;
1509                        }
1510                }
1511                bdev = NULL;
1512                bh = NULL;
1513                disk_super = NULL;
1514                if (!device) {
1515                        printk(KERN_ERR "btrfs: no missing devices found to "
1516                               "remove\n");
1517                        goto out;
1518                }
1519        } else {
1520                ret = btrfs_get_bdev_and_sb(device_path,
1521                                            FMODE_WRITE | FMODE_EXCL,
1522                                            root->fs_info->bdev_holder, 0,
1523                                            &bdev, &bh);
1524                if (ret)
1525                        goto out;
1526                disk_super = (struct btrfs_super_block *)bh->b_data;
1527                devid = btrfs_stack_device_id(&disk_super->dev_item);
1528                dev_uuid = disk_super->dev_item.uuid;
1529                device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1530                                           disk_super->fsid);
1531                if (!device) {
1532                        ret = -ENOENT;
1533                        goto error_brelse;
1534                }
1535        }
1536
1537        if (device->is_tgtdev_for_dev_replace) {
1538                pr_err("btrfs: unable to remove the dev_replace target dev\n");
1539                ret = -EINVAL;
1540                goto error_brelse;
1541        }
1542
1543        if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1544                printk(KERN_ERR "btrfs: unable to remove the only writeable "
1545                       "device\n");
1546                ret = -EINVAL;
1547                goto error_brelse;
1548        }
1549
1550        if (device->writeable) {
1551                lock_chunks(root);
1552                list_del_init(&device->dev_alloc_list);
1553                unlock_chunks(root);
1554                root->fs_info->fs_devices->rw_devices--;
1555                clear_super = true;
1556        }
1557
1558        ret = btrfs_shrink_device(device, 0);
1559        if (ret)
1560                goto error_undo;
1561
1562        /*
1563         * TODO: the superblock still includes this device in its num_devices
1564         * counter although write_all_supers() is not locked out. This
1565         * could give a filesystem state which requires a degraded mount.
1566         */
1567        ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1568        if (ret)
1569                goto error_undo;
1570
1571        spin_lock(&root->fs_info->free_chunk_lock);
1572        root->fs_info->free_chunk_space = device->total_bytes -
1573                device->bytes_used;
1574        spin_unlock(&root->fs_info->free_chunk_lock);
1575
1576        device->in_fs_metadata = 0;
1577        btrfs_scrub_cancel_dev(root->fs_info, device);
1578
1579        /*
1580         * the device list mutex makes sure that we don't change
1581         * the device list while someone else is writing out all
1582         * the device supers.
1583         */
1584
1585        cur_devices = device->fs_devices;
1586        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1587        list_del_rcu(&device->dev_list);
1588
1589        device->fs_devices->num_devices--;
1590        device->fs_devices->total_devices--;
1591
1592        if (device->missing)
1593                root->fs_info->fs_devices->missing_devices--;
1594
1595        next_device = list_entry(root->fs_info->fs_devices->devices.next,
1596                                 struct btrfs_device, dev_list);
1597        if (device->bdev == root->fs_info->sb->s_bdev)
1598                root->fs_info->sb->s_bdev = next_device->bdev;
1599        if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1600                root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1601
1602        if (device->bdev)
1603                device->fs_devices->open_devices--;
1604
1605        call_rcu(&device->rcu, free_device);
1606        mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1607
1608        num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1609        btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1610
1611        if (cur_devices->open_devices == 0) {
1612                struct btrfs_fs_devices *fs_devices;
1613                fs_devices = root->fs_info->fs_devices;
1614                while (fs_devices) {
1615                        if (fs_devices->seed == cur_devices)
1616                                break;
1617                        fs_devices = fs_devices->seed;
1618                }
1619                fs_devices->seed = cur_devices->seed;
1620                cur_devices->seed = NULL;
1621                lock_chunks(root);
1622                __btrfs_close_devices(cur_devices);
1623                unlock_chunks(root);
1624                free_fs_devices(cur_devices);
1625        }
1626
1627        root->fs_info->num_tolerated_disk_barrier_failures =
1628                btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1629
1630        /*
1631         * at this point, the device is zero sized.  We want to
1632         * remove it from the devices list and zero out the old super
1633         */
1634        if (clear_super && disk_super) {
1635                /* make sure this device isn't detected as part of
1636                 * the FS anymore
1637                 */
1638                memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1639                set_buffer_dirty(bh);
1640                sync_dirty_buffer(bh);
1641        }
1642
1643        ret = 0;
1644
1645        /* Notify udev that device has changed */
1646        if (bdev)
1647                btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1648
1649error_brelse:
1650        brelse(bh);
1651        if (bdev)
1652                blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1653out:
1654        mutex_unlock(&uuid_mutex);
1655        return ret;
1656error_undo:
1657        if (device->writeable) {
1658                lock_chunks(root);
1659                list_add(&device->dev_alloc_list,
1660                         &root->fs_info->fs_devices->alloc_list);
1661                unlock_chunks(root);
1662                root->fs_info->fs_devices->rw_devices++;
1663        }
1664        goto error_brelse;
1665}
1666
1667void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1668                                 struct btrfs_device *srcdev)
1669{
1670        WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1671        list_del_rcu(&srcdev->dev_list);
1672        list_del_rcu(&srcdev->dev_alloc_list);
1673        fs_info->fs_devices->num_devices--;
1674        if (srcdev->missing) {
1675                fs_info->fs_devices->missing_devices--;
1676                fs_info->fs_devices->rw_devices++;
1677        }
1678        if (srcdev->can_discard)
1679                fs_info->fs_devices->num_can_discard--;
1680        if (srcdev->bdev)
1681                fs_info->fs_devices->open_devices--;
1682
1683        call_rcu(&srcdev->rcu, free_device);
1684}
1685
1686void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1687                                      struct btrfs_device *tgtdev)
1688{
1689        struct btrfs_device *next_device;
1690
1691        WARN_ON(!tgtdev);
1692        mutex_lock(&fs_info->fs_devices->device_list_mutex);
1693        if (tgtdev->bdev) {
1694                btrfs_scratch_superblock(tgtdev);
1695                fs_info->fs_devices->open_devices--;
1696        }
1697        fs_info->fs_devices->num_devices--;
1698        if (tgtdev->can_discard)
1699                fs_info->fs_devices->num_can_discard++;
1700
1701        next_device = list_entry(fs_info->fs_devices->devices.next,
1702                                 struct btrfs_device, dev_list);
1703        if (tgtdev->bdev == fs_info->sb->s_bdev)
1704                fs_info->sb->s_bdev = next_device->bdev;
1705        if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1706                fs_info->fs_devices->latest_bdev = next_device->bdev;
1707        list_del_rcu(&tgtdev->dev_list);
1708
1709        call_rcu(&tgtdev->rcu, free_device);
1710
1711        mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1712}
1713
1714static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1715                                     struct btrfs_device **device)
1716{
1717        int ret = 0;
1718        struct btrfs_super_block *disk_super;
1719        u64 devid;
1720        u8 *dev_uuid;
1721        struct block_device *bdev;
1722        struct buffer_head *bh;
1723
1724        *device = NULL;
1725        ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1726                                    root->fs_info->bdev_holder, 0, &bdev, &bh);
1727        if (ret)
1728                return ret;
1729        disk_super = (struct btrfs_super_block *)bh->b_data;
1730        devid = btrfs_stack_device_id(&disk_super->dev_item);
1731        dev_uuid = disk_super->dev_item.uuid;
1732        *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1733                                    disk_super->fsid);
1734        brelse(bh);
1735        if (!*device)
1736                ret = -ENOENT;
1737        blkdev_put(bdev, FMODE_READ);
1738        return ret;
1739}
1740
1741int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1742                                         char *device_path,
1743                                         struct btrfs_device **device)
1744{
1745        *device = NULL;
1746        if (strcmp(device_path, "missing") == 0) {
1747                struct list_head *devices;
1748                struct btrfs_device *tmp;
1749
1750                devices = &root->fs_info->fs_devices->devices;
1751                /*
1752                 * It is safe to read the devices since the volume_mutex
1753                 * is held by the caller.
1754                 */
1755                list_for_each_entry(tmp, devices, dev_list) {
1756                        if (tmp->in_fs_metadata && !tmp->bdev) {
1757                                *device = tmp;
1758                                break;
1759                        }
1760                }
1761
1762                if (!*device) {
1763                        pr_err("btrfs: no missing device found\n");
1764                        return -ENOENT;
1765                }
1766
1767                return 0;
1768        } else {
1769                return btrfs_find_device_by_path(root, device_path, device);
1770        }
1771}
1772
1773/*
1774 * does all the dirty work required for changing file system's UUID.
1775 */
1776static int btrfs_prepare_sprout(struct btrfs_root *root)
1777{
1778        struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1779        struct btrfs_fs_devices *old_devices;
1780        struct btrfs_fs_devices *seed_devices;
1781        struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1782        struct btrfs_device *device;
1783        u64 super_flags;
1784
1785        BUG_ON(!mutex_is_locked(&uuid_mutex));
1786        if (!fs_devices->seeding)
1787                return -EINVAL;
1788
1789        seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1790        if (!seed_devices)
1791                return -ENOMEM;
1792
1793        old_devices = clone_fs_devices(fs_devices);
1794        if (IS_ERR(old_devices)) {
1795                kfree(seed_devices);
1796                return PTR_ERR(old_devices);
1797        }
1798
1799        list_add(&old_devices->list, &fs_uuids);
1800
1801        memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1802        seed_devices->opened = 1;
1803        INIT_LIST_HEAD(&seed_devices->devices);
1804        INIT_LIST_HEAD(&seed_devices->alloc_list);
1805        mutex_init(&seed_devices->device_list_mutex);
1806
1807        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1808        list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1809                              synchronize_rcu);
1810        mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1811
1812        list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1813        list_for_each_entry(device, &seed_devices->devices, dev_list) {
1814                device->fs_devices = seed_devices;
1815        }
1816
1817        fs_devices->seeding = 0;
1818        fs_devices->num_devices = 0;
1819        fs_devices->open_devices = 0;
1820        fs_devices->total_devices = 0;
1821        fs_devices->seed = seed_devices;
1822
1823        generate_random_uuid(fs_devices->fsid);
1824        memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1825        memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1826        super_flags = btrfs_super_flags(disk_super) &
1827                      ~BTRFS_SUPER_FLAG_SEEDING;
1828        btrfs_set_super_flags(disk_super, super_flags);
1829
1830        return 0;
1831}
1832
1833/*
1834 * strore the expected generation for seed devices in device items.
1835 */
1836static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1837                               struct btrfs_root *root)
1838{
1839        struct btrfs_path *path;
1840        struct extent_buffer *leaf;
1841        struct btrfs_dev_item *dev_item;
1842        struct btrfs_device *device;
1843        struct btrfs_key key;
1844        u8 fs_uuid[BTRFS_UUID_SIZE];
1845        u8 dev_uuid[BTRFS_UUID_SIZE];
1846        u64 devid;
1847        int ret;
1848
1849        path = btrfs_alloc_path();
1850        if (!path)
1851                return -ENOMEM;
1852
1853        root = root->fs_info->chunk_root;
1854        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1855        key.offset = 0;
1856        key.type = BTRFS_DEV_ITEM_KEY;
1857
1858        while (1) {
1859                ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1860                if (ret < 0)
1861                        goto error;
1862
1863                leaf = path->nodes[0];
1864next_slot:
1865                if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1866                        ret = btrfs_next_leaf(root, path);
1867                        if (ret > 0)
1868                                break;
1869                        if (ret < 0)
1870                                goto error;
1871                        leaf = path->nodes[0];
1872                        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1873                        btrfs_release_path(path);
1874                        continue;
1875                }
1876
1877                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1878                if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1879                    key.type != BTRFS_DEV_ITEM_KEY)
1880                        break;
1881
1882                dev_item = btrfs_item_ptr(leaf, path->slots[0],
1883                                          struct btrfs_dev_item);
1884                devid = btrfs_device_id(leaf, dev_item);
1885                read_extent_buffer(leaf, dev_uuid,
1886                                   (unsigned long)btrfs_device_uuid(dev_item),
1887                                   BTRFS_UUID_SIZE);
1888                read_extent_buffer(leaf, fs_uuid,
1889                                   (unsigned long)btrfs_device_fsid(dev_item),
1890                                   BTRFS_UUID_SIZE);
1891                device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1892                                           fs_uuid);
1893                BUG_ON(!device); /* Logic error */
1894
1895                if (device->fs_devices->seeding) {
1896                        btrfs_set_device_generation(leaf, dev_item,
1897                                                    device->generation);
1898                        btrfs_mark_buffer_dirty(leaf);
1899                }
1900
1901                path->slots[0]++;
1902                goto next_slot;
1903        }
1904        ret = 0;
1905error:
1906        btrfs_free_path(path);
1907        return ret;
1908}
1909
1910int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1911{
1912        struct request_queue *q;
1913        struct btrfs_trans_handle *trans;
1914        struct btrfs_device *device;
1915        struct block_device *bdev;
1916        struct list_head *devices;
1917        struct super_block *sb = root->fs_info->sb;
1918        struct rcu_string *name;
1919        u64 total_bytes;
1920        int seeding_dev = 0;
1921        int ret = 0;
1922
1923        if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1924                return -EROFS;
1925
1926        bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1927                                  root->fs_info->bdev_holder);
1928        if (IS_ERR(bdev))
1929                return PTR_ERR(bdev);
1930
1931        if (root->fs_info->fs_devices->seeding) {
1932                seeding_dev = 1;
1933                down_write(&sb->s_umount);
1934                mutex_lock(&uuid_mutex);
1935        }
1936
1937        filemap_write_and_wait(bdev->bd_inode->i_mapping);
1938
1939        devices = &root->fs_info->fs_devices->devices;
1940
1941        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1942        list_for_each_entry(device, devices, dev_list) {
1943                if (device->bdev == bdev) {
1944                        ret = -EEXIST;
1945                        mutex_unlock(
1946                                &root->fs_info->fs_devices->device_list_mutex);
1947                        goto error;
1948                }
1949        }
1950        mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1951
1952        device = kzalloc(sizeof(*device), GFP_NOFS);
1953        if (!device) {
1954                /* we can safely leave the fs_devices entry around */
1955                ret = -ENOMEM;
1956                goto error;
1957        }
1958
1959        name = rcu_string_strdup(device_path, GFP_NOFS);
1960        if (!name) {
1961                kfree(device);
1962                ret = -ENOMEM;
1963                goto error;
1964        }
1965        rcu_assign_pointer(device->name, name);
1966
1967        ret = find_next_devid(root, &device->devid);
1968        if (ret) {
1969                rcu_string_free(device->name);
1970                kfree(device);
1971                goto error;
1972        }
1973
1974        trans = btrfs_start_transaction(root, 0);
1975        if (IS_ERR(trans)) {
1976                rcu_string_free(device->name);
1977                kfree(device);
1978                ret = PTR_ERR(trans);
1979                goto error;
1980        }
1981
1982        lock_chunks(root);
1983
1984        q = bdev_get_queue(bdev);
1985        if (blk_queue_discard(q))
1986                device->can_discard = 1;
1987        device->writeable = 1;
1988        device->work.func = pending_bios_fn;
1989        generate_random_uuid(device->uuid);
1990        spin_lock_init(&device->io_lock);
1991        device->generation = trans->transid;
1992        device->io_width = root->sectorsize;
1993        device->io_align = root->sectorsize;
1994        device->sector_size = root->sectorsize;
1995        device->total_bytes = i_size_read(bdev->bd_inode);
1996        device->disk_total_bytes = device->total_bytes;
1997        device->dev_root = root->fs_info->dev_root;
1998        device->bdev = bdev;
1999        device->in_fs_metadata = 1;
2000        device->is_tgtdev_for_dev_replace = 0;
2001        device->mode = FMODE_EXCL;
2002        set_blocksize(device->bdev, 4096);
2003
2004        if (seeding_dev) {
2005                sb->s_flags &= ~MS_RDONLY;
2006                ret = btrfs_prepare_sprout(root);
2007                BUG_ON(ret); /* -ENOMEM */
2008        }
2009
2010        device->fs_devices = root->fs_info->fs_devices;
2011
2012        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2013        list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2014        list_add(&device->dev_alloc_list,
2015                 &root->fs_info->fs_devices->alloc_list);
2016        root->fs_info->fs_devices->num_devices++;
2017        root->fs_info->fs_devices->open_devices++;
2018        root->fs_info->fs_devices->rw_devices++;
2019        root->fs_info->fs_devices->total_devices++;
2020        if (device->can_discard)
2021                root->fs_info->fs_devices->num_can_discard++;
2022        root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2023
2024        spin_lock(&root->fs_info->free_chunk_lock);
2025        root->fs_info->free_chunk_space += device->total_bytes;
2026        spin_unlock(&root->fs_info->free_chunk_lock);
2027
2028        if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2029                root->fs_info->fs_devices->rotating = 1;
2030
2031        total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2032        btrfs_set_super_total_bytes(root->fs_info->super_copy,
2033                                    total_bytes + device->total_bytes);
2034
2035        total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2036        btrfs_set_super_num_devices(root->fs_info->super_copy,
2037                                    total_bytes + 1);
2038        mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2039
2040        if (seeding_dev) {
2041                ret = init_first_rw_device(trans, root, device);
2042                if (ret) {
2043                        btrfs_abort_transaction(trans, root, ret);
2044                        goto error_trans;
2045                }
2046                ret = btrfs_finish_sprout(trans, root);
2047                if (ret) {
2048                        btrfs_abort_transaction(trans, root, ret);
2049                        goto error_trans;
2050                }
2051        } else {
2052                ret = btrfs_add_device(trans, root, device);
2053                if (ret) {
2054                        btrfs_abort_transaction(trans, root, ret);
2055                        goto error_trans;
2056                }
2057        }
2058
2059        /*
2060         * we've got more storage, clear any full flags on the space
2061         * infos
2062         */
2063        btrfs_clear_space_info_full(root->fs_info);
2064
2065        unlock_chunks(root);
2066        root->fs_info->num_tolerated_disk_barrier_failures =
2067                btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2068        ret = btrfs_commit_transaction(trans, root);
2069
2070        if (seeding_dev) {
2071                mutex_unlock(&uuid_mutex);
2072                up_write(&sb->s_umount);
2073
2074                if (ret) /* transaction commit */
2075                        return ret;
2076
2077                ret = btrfs_relocate_sys_chunks(root);
2078                if (ret < 0)
2079                        btrfs_error(root->fs_info, ret,
2080                                    "Failed to relocate sys chunks after "
2081                                    "device initialization. This can be fixed "
2082                                    "using the \"btrfs balance\" command.");
2083                trans = btrfs_attach_transaction(root);
2084                if (IS_ERR(trans)) {
2085                        if (PTR_ERR(trans) == -ENOENT)
2086                                return 0;
2087                        return PTR_ERR(trans);
2088                }
2089                ret = btrfs_commit_transaction(trans, root);
2090        }
2091
2092        return ret;
2093
2094error_trans:
2095        unlock_chunks(root);
2096        btrfs_end_transaction(trans, root);
2097        rcu_string_free(device->name);
2098        kfree(device);
2099error:
2100        blkdev_put(bdev, FMODE_EXCL);
2101        if (seeding_dev) {
2102                mutex_unlock(&uuid_mutex);
2103                up_write(&sb->s_umount);
2104        }
2105        return ret;
2106}
2107
2108int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2109                                  struct btrfs_device **device_out)
2110{
2111        struct request_queue *q;
2112        struct btrfs_device *device;
2113        struct block_device *bdev;
2114        struct btrfs_fs_info *fs_info = root->fs_info;
2115        struct list_head *devices;
2116        struct rcu_string *name;
2117        int ret = 0;
2118
2119        *device_out = NULL;
2120        if (fs_info->fs_devices->seeding)
2121                return -EINVAL;
2122
2123        bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2124                                  fs_info->bdev_holder);
2125        if (IS_ERR(bdev))
2126                return PTR_ERR(bdev);
2127
2128        filemap_write_and_wait(bdev->bd_inode->i_mapping);
2129
2130        devices = &fs_info->fs_devices->devices;
2131        list_for_each_entry(device, devices, dev_list) {
2132                if (device->bdev == bdev) {
2133                        ret = -EEXIST;
2134                        goto error;
2135                }
2136        }
2137
2138        device = kzalloc(sizeof(*device), GFP_NOFS);
2139        if (!device) {
2140                ret = -ENOMEM;
2141                goto error;
2142        }
2143
2144        name = rcu_string_strdup(device_path, GFP_NOFS);
2145        if (!name) {
2146                kfree(device);
2147                ret = -ENOMEM;
2148                goto error;
2149        }
2150        rcu_assign_pointer(device->name, name);
2151
2152        q = bdev_get_queue(bdev);
2153        if (blk_queue_discard(q))
2154                device->can_discard = 1;
2155        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2156        device->writeable = 1;
2157        device->work.func = pending_bios_fn;
2158        generate_random_uuid(device->uuid);
2159        device->devid = BTRFS_DEV_REPLACE_DEVID;
2160        spin_lock_init(&device->io_lock);
2161        device->generation = 0;
2162        device->io_width = root->sectorsize;
2163        device->io_align = root->sectorsize;
2164        device->sector_size = root->sectorsize;
2165        device->total_bytes = i_size_read(bdev->bd_inode);
2166        device->disk_total_bytes = device->total_bytes;
2167        device->dev_root = fs_info->dev_root;
2168        device->bdev = bdev;
2169        device->in_fs_metadata = 1;
2170        device->is_tgtdev_for_dev_replace = 1;
2171        device->mode = FMODE_EXCL;
2172        set_blocksize(device->bdev, 4096);
2173        device->fs_devices = fs_info->fs_devices;
2174        list_add(&device->dev_list, &fs_info->fs_devices->devices);
2175        fs_info->fs_devices->num_devices++;
2176        fs_info->fs_devices->open_devices++;
2177        if (device->can_discard)
2178                fs_info->fs_devices->num_can_discard++;
2179        mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2180
2181        *device_out = device;
2182        return ret;
2183
2184error:
2185        blkdev_put(bdev, FMODE_EXCL);
2186        return ret;
2187}
2188
2189void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2190                                              struct btrfs_device *tgtdev)
2191{
2192        WARN_ON(fs_info->fs_devices->rw_devices == 0);
2193        tgtdev->io_width = fs_info->dev_root->sectorsize;
2194        tgtdev->io_align = fs_info->dev_root->sectorsize;
2195        tgtdev->sector_size = fs_info->dev_root->sectorsize;
2196        tgtdev->dev_root = fs_info->dev_root;
2197        tgtdev->in_fs_metadata = 1;
2198}
2199
2200static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2201                                        struct btrfs_device *device)
2202{
2203        int ret;
2204        struct btrfs_path *path;
2205        struct btrfs_root *root;
2206        struct btrfs_dev_item *dev_item;
2207        struct extent_buffer *leaf;
2208        struct btrfs_key key;
2209
2210        root = device->dev_root->fs_info->chunk_root;
2211
2212        path = btrfs_alloc_path();
2213        if (!path)
2214                return -ENOMEM;
2215
2216        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2217        key.type = BTRFS_DEV_ITEM_KEY;
2218        key.offset = device->devid;
2219
2220        ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2221        if (ret < 0)
2222                goto out;
2223
2224        if (ret > 0) {
2225                ret = -ENOENT;
2226                goto out;
2227        }
2228
2229        leaf = path->nodes[0];
2230        dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2231
2232        btrfs_set_device_id(leaf, dev_item, device->devid);
2233        btrfs_set_device_type(leaf, dev_item, device->type);
2234        btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2235        btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2236        btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2237        btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2238        btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2239        btrfs_mark_buffer_dirty(leaf);
2240
2241out:
2242        btrfs_free_path(path);
2243        return ret;
2244}
2245
2246static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2247                      struct btrfs_device *device, u64 new_size)
2248{
2249        struct btrfs_super_block *super_copy =
2250                device->dev_root->fs_info->super_copy;
2251        u64 old_total = btrfs_super_total_bytes(super_copy);
2252        u64 diff = new_size - device->total_bytes;
2253
2254        if (!device->writeable)
2255                return -EACCES;
2256        if (new_size <= device->total_bytes ||
2257            device->is_tgtdev_for_dev_replace)
2258                return -EINVAL;
2259
2260        btrfs_set_super_total_bytes(super_copy, old_total + diff);
2261        device->fs_devices->total_rw_bytes += diff;
2262
2263        device->total_bytes = new_size;
2264        device->disk_total_bytes = new_size;
2265        btrfs_clear_space_info_full(device->dev_root->fs_info);
2266
2267        return btrfs_update_device(trans, device);
2268}
2269
2270int btrfs_grow_device(struct btrfs_trans_handle *trans,
2271                      struct btrfs_device *device, u64 new_size)
2272{
2273        int ret;
2274        lock_chunks(device->dev_root);
2275        ret = __btrfs_grow_device(trans, device, new_size);
2276        unlock_chunks(device->dev_root);
2277        return ret;
2278}
2279
2280static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2281                            struct btrfs_root *root,
2282                            u64 chunk_tree, u64 chunk_objectid,
2283                            u64 chunk_offset)
2284{
2285        int ret;
2286        struct btrfs_path *path;
2287        struct btrfs_key key;
2288
2289        root = root->fs_info->chunk_root;
2290        path = btrfs_alloc_path();
2291        if (!path)
2292                return -ENOMEM;
2293
2294        key.objectid = chunk_objectid;
2295        key.offset = chunk_offset;
2296        key.type = BTRFS_CHUNK_ITEM_KEY;
2297
2298        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2299        if (ret < 0)
2300                goto out;
2301        else if (ret > 0) { /* Logic error or corruption */
2302                btrfs_error(root->fs_info, -ENOENT,
2303                            "Failed lookup while freeing chunk.");
2304                ret = -ENOENT;
2305                goto out;
2306        }
2307
2308        ret = btrfs_del_item(trans, root, path);
2309        if (ret < 0)
2310                btrfs_error(root->fs_info, ret,
2311                            "Failed to delete chunk item.");
2312out:
2313        btrfs_free_path(path);
2314        return ret;
2315}
2316
2317static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2318                        chunk_offset)
2319{
2320        struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2321        struct btrfs_disk_key *disk_key;
2322        struct btrfs_chunk *chunk;
2323        u8 *ptr;
2324        int ret = 0;
2325        u32 num_stripes;
2326        u32 array_size;
2327        u32 len = 0;
2328        u32 cur;
2329        struct btrfs_key key;
2330
2331        array_size = btrfs_super_sys_array_size(super_copy);
2332
2333        ptr = super_copy->sys_chunk_array;
2334        cur = 0;
2335
2336        while (cur < array_size) {
2337                disk_key = (struct btrfs_disk_key *)ptr;
2338                btrfs_disk_key_to_cpu(&key, disk_key);
2339
2340                len = sizeof(*disk_key);
2341
2342                if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2343                        chunk = (struct btrfs_chunk *)(ptr + len);
2344                        num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2345                        len += btrfs_chunk_item_size(num_stripes);
2346                } else {
2347                        ret = -EIO;
2348                        break;
2349                }
2350                if (key.objectid == chunk_objectid &&
2351                    key.offset == chunk_offset) {
2352                        memmove(ptr, ptr + len, array_size - (cur + len));
2353                        array_size -= len;
2354                        btrfs_set_super_sys_array_size(super_copy, array_size);
2355                } else {
2356                        ptr += len;
2357                        cur += len;
2358                }
2359        }
2360        return ret;
2361}
2362
2363static int btrfs_relocate_chunk(struct btrfs_root *root,
2364                         u64 chunk_tree, u64 chunk_objectid,
2365                         u64 chunk_offset)
2366{
2367        struct extent_map_tree *em_tree;
2368        struct btrfs_root *extent_root;
2369        struct btrfs_trans_handle *trans;
2370        struct extent_map *em;
2371        struct map_lookup *map;
2372        int ret;
2373        int i;
2374
2375        root = root->fs_info->chunk_root;
2376        extent_root = root->fs_info->extent_root;
2377        em_tree = &root->fs_info->mapping_tree.map_tree;
2378
2379        ret = btrfs_can_relocate(extent_root, chunk_offset);
2380        if (ret)
2381                return -ENOSPC;
2382
2383        /* step one, relocate all the extents inside this chunk */
2384        ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2385        if (ret)
2386                return ret;
2387
2388        trans = btrfs_start_transaction(root, 0);
2389        if (IS_ERR(trans)) {
2390                ret = PTR_ERR(trans);
2391                btrfs_std_error(root->fs_info, ret);
2392                return ret;
2393        }
2394
2395        lock_chunks(root);
2396
2397        /*
2398         * step two, delete the device extents and the
2399         * chunk tree entries
2400         */
2401        read_lock(&em_tree->lock);
2402        em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2403        read_unlock(&em_tree->lock);
2404
2405        BUG_ON(!em || em->start > chunk_offset ||
2406               em->start + em->len < chunk_offset);
2407        map = (struct map_lookup *)em->bdev;
2408
2409        for (i = 0; i < map->num_stripes; i++) {
2410                ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2411                                            map->stripes[i].physical);
2412                BUG_ON(ret);
2413
2414                if (map->stripes[i].dev) {
2415                        ret = btrfs_update_device(trans, map->stripes[i].dev);
2416                        BUG_ON(ret);
2417                }
2418        }
2419        ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2420                               chunk_offset);
2421
2422        BUG_ON(ret);
2423
2424        trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2425
2426        if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2427                ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2428                BUG_ON(ret);
2429        }
2430
2431        ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2432        BUG_ON(ret);
2433
2434        write_lock(&em_tree->lock);
2435        remove_extent_mapping(em_tree, em);
2436        write_unlock(&em_tree->lock);
2437
2438        kfree(map);
2439        em->bdev = NULL;
2440
2441        /* once for the tree */
2442        free_extent_map(em);
2443        /* once for us */
2444        free_extent_map(em);
2445
2446        unlock_chunks(root);
2447        btrfs_end_transaction(trans, root);
2448        return 0;
2449}
2450
2451static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2452{
2453        struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2454        struct btrfs_path *path;
2455        struct extent_buffer *leaf;
2456        struct btrfs_chunk *chunk;
2457        struct btrfs_key key;
2458        struct btrfs_key found_key;
2459        u64 chunk_tree = chunk_root->root_key.objectid;
2460        u64 chunk_type;
2461        bool retried = false;
2462        int failed = 0;
2463        int ret;
2464
2465        path = btrfs_alloc_path();
2466        if (!path)
2467                return -ENOMEM;
2468
2469again:
2470        key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2471        key.offset = (u64)-1;
2472        key.type = BTRFS_CHUNK_ITEM_KEY;
2473
2474        while (1) {
2475                ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2476                if (ret < 0)
2477                        goto error;
2478                BUG_ON(ret == 0); /* Corruption */
2479
2480                ret = btrfs_previous_item(chunk_root, path, key.objectid,
2481                                          key.type);
2482                if (ret < 0)
2483                        goto error;
2484                if (ret > 0)
2485                        break;
2486
2487                leaf = path->nodes[0];
2488                btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2489
2490                chunk = btrfs_item_ptr(leaf, path->slots[0],
2491                                       struct btrfs_chunk);
2492                chunk_type = btrfs_chunk_type(leaf, chunk);
2493                btrfs_release_path(path);
2494
2495                if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2496                        ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2497                                                   found_key.objectid,
2498                                                   found_key.offset);
2499                        if (ret == -ENOSPC)
2500                                failed++;
2501                        else if (ret)
2502                                BUG();
2503                }
2504
2505                if (found_key.offset == 0)
2506                        break;
2507                key.offset = found_key.offset - 1;
2508        }
2509        ret = 0;
2510        if (failed && !retried) {
2511                failed = 0;
2512                retried = true;
2513                goto again;
2514        } else if (failed && retried) {
2515                WARN_ON(1);
2516                ret = -ENOSPC;
2517        }
2518error:
2519        btrfs_free_path(path);
2520        return ret;
2521}
2522
2523static int insert_balance_item(struct btrfs_root *root,
2524                               struct btrfs_balance_control *bctl)
2525{
2526        struct btrfs_trans_handle *trans;
2527        struct btrfs_balance_item *item;
2528        struct btrfs_disk_balance_args disk_bargs;
2529        struct btrfs_path *path;
2530        struct extent_buffer *leaf;
2531        struct btrfs_key key;
2532        int ret, err;
2533
2534        path = btrfs_alloc_path();
2535        if (!path)
2536                return -ENOMEM;
2537
2538        trans = btrfs_start_transaction(root, 0);
2539        if (IS_ERR(trans)) {
2540                btrfs_free_path(path);
2541                return PTR_ERR(trans);
2542        }
2543
2544        key.objectid = BTRFS_BALANCE_OBJECTID;
2545        key.type = BTRFS_BALANCE_ITEM_KEY;
2546        key.offset = 0;
2547
2548        ret = btrfs_insert_empty_item(trans, root, path, &key,
2549                                      sizeof(*item));
2550        if (ret)
2551                goto out;
2552
2553        leaf = path->nodes[0];
2554        item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2555
2556        memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2557
2558        btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2559        btrfs_set_balance_data(leaf, item, &disk_bargs);
2560        btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2561        btrfs_set_balance_meta(leaf, item, &disk_bargs);
2562        btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2563        btrfs_set_balance_sys(leaf, item, &disk_bargs);
2564
2565        btrfs_set_balance_flags(leaf, item, bctl->flags);
2566
2567        btrfs_mark_buffer_dirty(leaf);
2568out:
2569        btrfs_free_path(path);
2570        err = btrfs_commit_transaction(trans, root);
2571        if (err && !ret)
2572                ret = err;
2573        return ret;
2574}
2575
2576static int del_balance_item(struct btrfs_root *root)
2577{
2578        struct btrfs_trans_handle *trans;
2579        struct btrfs_path *path;
2580        struct btrfs_key key;
2581        int ret, err;
2582
2583        path = btrfs_alloc_path();
2584        if (!path)
2585                return -ENOMEM;
2586
2587        trans = btrfs_start_transaction(root, 0);
2588        if (IS_ERR(trans)) {
2589                btrfs_free_path(path);
2590                return PTR_ERR(trans);
2591        }
2592
2593        key.objectid = BTRFS_BALANCE_OBJECTID;
2594        key.type = BTRFS_BALANCE_ITEM_KEY;
2595        key.offset = 0;
2596
2597        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2598        if (ret < 0)
2599                goto out;
2600        if (ret > 0) {
2601                ret = -ENOENT;
2602                goto out;
2603        }
2604
2605        ret = btrfs_del_item(trans, root, path);
2606out:
2607        btrfs_free_path(path);
2608        err = btrfs_commit_transaction(trans, root);
2609        if (err && !ret)
2610                ret = err;
2611        return ret;
2612}
2613
2614/*
2615 * This is a heuristic used to reduce the number of chunks balanced on
2616 * resume after balance was interrupted.
2617 */
2618static void update_balance_args(struct btrfs_balance_control *bctl)
2619{
2620        /*
2621         * Turn on soft mode for chunk types that were being converted.
2622         */
2623        if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2624                bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2625        if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2626                bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2627        if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2628                bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2629
2630        /*
2631         * Turn on usage filter if is not already used.  The idea is
2632         * that chunks that we have already balanced should be
2633         * reasonably full.  Don't do it for chunks that are being
2634         * converted - that will keep us from relocating unconverted
2635         * (albeit full) chunks.
2636         */
2637        if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2638            !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2639                bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2640                bctl->data.usage = 90;
2641        }
2642        if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2643            !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2644                bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2645                bctl->sys.usage = 90;
2646        }
2647        if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2648            !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2649                bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2650                bctl->meta.usage = 90;
2651        }
2652}
2653
2654/*
2655 * Should be called with both balance and volume mutexes held to
2656 * serialize other volume operations (add_dev/rm_dev/resize) with
2657 * restriper.  Same goes for unset_balance_control.
2658 */
2659static void set_balance_control(struct btrfs_balance_control *bctl)
2660{
2661        struct btrfs_fs_info *fs_info = bctl->fs_info;
2662
2663        BUG_ON(fs_info->balance_ctl);
2664
2665        spin_lock(&fs_info->balance_lock);
2666        fs_info->balance_ctl = bctl;
2667        spin_unlock(&fs_info->balance_lock);
2668}
2669
2670static void unset_balance_control(struct btrfs_fs_info *fs_info)
2671{
2672        struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2673
2674        BUG_ON(!fs_info->balance_ctl);
2675
2676        spin_lock(&fs_info->balance_lock);
2677        fs_info->balance_ctl = NULL;
2678        spin_unlock(&fs_info->balance_lock);
2679
2680        kfree(bctl);
2681}
2682
2683/*
2684 * Balance filters.  Return 1 if chunk should be filtered out
2685 * (should not be balanced).
2686 */
2687static int chunk_profiles_filter(u64 chunk_type,
2688                                 struct btrfs_balance_args *bargs)
2689{
2690        chunk_type = chunk_to_extended(chunk_type) &
2691                                BTRFS_EXTENDED_PROFILE_MASK;
2692
2693        if (bargs->profiles & chunk_type)
2694                return 0;
2695
2696        return 1;
2697}
2698
2699static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2700                              struct btrfs_balance_args *bargs)
2701{
2702        struct btrfs_block_group_cache *cache;
2703        u64 chunk_used, user_thresh;
2704        int ret = 1;
2705
2706        cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2707        chunk_used = btrfs_block_group_used(&cache->item);
2708
2709        if (bargs->usage == 0)
2710                user_thresh = 1;
2711        else if (bargs->usage > 100)
2712                user_thresh = cache->key.offset;
2713        else
2714                user_thresh = div_factor_fine(cache->key.offset,
2715                                              bargs->usage);
2716
2717        if (chunk_used < user_thresh)
2718                ret = 0;
2719
2720        btrfs_put_block_group(cache);
2721        return ret;
2722}
2723
2724static int chunk_devid_filter(struct extent_buffer *leaf,
2725                              struct btrfs_chunk *chunk,
2726                              struct btrfs_balance_args *bargs)
2727{
2728        struct btrfs_stripe *stripe;
2729        int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2730        int i;
2731
2732        for (i = 0; i < num_stripes; i++) {
2733                stripe = btrfs_stripe_nr(chunk, i);
2734                if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2735                        return 0;
2736        }
2737
2738        return 1;
2739}
2740
2741/* [pstart, pend) */
2742static int chunk_drange_filter(struct extent_buffer *leaf,
2743                               struct btrfs_chunk *chunk,
2744                               u64 chunk_offset,
2745                               struct btrfs_balance_args *bargs)
2746{
2747        struct btrfs_stripe *stripe;
2748        int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2749        u64 stripe_offset;
2750        u64 stripe_length;
2751        int factor;
2752        int i;
2753
2754        if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2755                return 0;
2756
2757        if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2758             BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
2759                factor = num_stripes / 2;
2760        } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
2761                factor = num_stripes - 1;
2762        } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
2763                factor = num_stripes - 2;
2764        } else {
2765                factor = num_stripes;
2766        }
2767
2768        for (i = 0; i < num_stripes; i++) {
2769                stripe = btrfs_stripe_nr(chunk, i);
2770                if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2771                        continue;
2772
2773                stripe_offset = btrfs_stripe_offset(leaf, stripe);
2774                stripe_length = btrfs_chunk_length(leaf, chunk);
2775                do_div(stripe_length, factor);
2776
2777                if (stripe_offset < bargs->pend &&
2778                    stripe_offset + stripe_length > bargs->pstart)
2779                        return 0;
2780        }
2781
2782        return 1;
2783}
2784
2785/* [vstart, vend) */
2786static int chunk_vrange_filter(struct extent_buffer *leaf,
2787                               struct btrfs_chunk *chunk,
2788                               u64 chunk_offset,
2789                               struct btrfs_balance_args *bargs)
2790{
2791        if (chunk_offset < bargs->vend &&
2792            chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2793                /* at least part of the chunk is inside this vrange */
2794                return 0;
2795
2796        return 1;
2797}
2798
2799static int chunk_soft_convert_filter(u64 chunk_type,
2800                                     struct btrfs_balance_args *bargs)
2801{
2802        if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2803                return 0;
2804
2805        chunk_type = chunk_to_extended(chunk_type) &
2806                                BTRFS_EXTENDED_PROFILE_MASK;
2807
2808        if (bargs->target == chunk_type)
2809                return 1;
2810
2811        return 0;
2812}
2813
2814static int should_balance_chunk(struct btrfs_root *root,
2815                                struct extent_buffer *leaf,
2816                                struct btrfs_chunk *chunk, u64 chunk_offset)
2817{
2818        struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2819        struct btrfs_balance_args *bargs = NULL;
2820        u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2821
2822        /* type filter */
2823        if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2824              (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2825                return 0;
2826        }
2827
2828        if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2829                bargs = &bctl->data;
2830        else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2831                bargs = &bctl->sys;
2832        else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2833                bargs = &bctl->meta;
2834
2835        /* profiles filter */
2836        if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2837            chunk_profiles_filter(chunk_type, bargs)) {
2838                return 0;
2839        }
2840
2841        /* usage filter */
2842        if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2843            chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2844                return 0;
2845        }
2846
2847        /* devid filter */
2848        if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2849            chunk_devid_filter(leaf, chunk, bargs)) {
2850                return 0;
2851        }
2852
2853        /* drange filter, makes sense only with devid filter */
2854        if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2855            chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2856                return 0;
2857        }
2858
2859        /* vrange filter */
2860        if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2861            chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2862                return 0;
2863        }
2864
2865        /* soft profile changing mode */
2866        if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2867            chunk_soft_convert_filter(chunk_type, bargs)) {
2868                return 0;
2869        }
2870
2871        return 1;
2872}
2873
2874static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2875{
2876        struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2877        struct btrfs_root *chunk_root = fs_info->chunk_root;
2878        struct btrfs_root *dev_root = fs_info->dev_root;
2879        struct list_head *devices;
2880        struct btrfs_device *device;
2881        u64 old_size;
2882        u64 size_to_free;
2883        struct btrfs_chunk *chunk;
2884        struct btrfs_path *path;
2885        struct btrfs_key key;
2886        struct btrfs_key found_key;
2887        struct btrfs_trans_handle *trans;
2888        struct extent_buffer *leaf;
2889        int slot;
2890        int ret;
2891        int enospc_errors = 0;
2892        bool counting = true;
2893
2894        /* step one make some room on all the devices */
2895        devices = &fs_info->fs_devices->devices;
2896        list_for_each_entry(device, devices, dev_list) {
2897                old_size = device->total_bytes;
2898                size_to_free = div_factor(old_size, 1);
2899                size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2900                if (!device->writeable ||
2901                    device->total_bytes - device->bytes_used > size_to_free ||
2902                    device->is_tgtdev_for_dev_replace)
2903                        continue;
2904
2905                ret = btrfs_shrink_device(device, old_size - size_to_free);
2906                if (ret == -ENOSPC)
2907                        break;
2908                BUG_ON(ret);
2909
2910                trans = btrfs_start_transaction(dev_root, 0);
2911                BUG_ON(IS_ERR(trans));
2912
2913                ret = btrfs_grow_device(trans, device, old_size);
2914                BUG_ON(ret);
2915
2916                btrfs_end_transaction(trans, dev_root);
2917        }
2918
2919        /* step two, relocate all the chunks */
2920        path = btrfs_alloc_path();
2921        if (!path) {
2922                ret = -ENOMEM;
2923                goto error;
2924        }
2925
2926        /* zero out stat counters */
2927        spin_lock(&fs_info->balance_lock);
2928        memset(&bctl->stat, 0, sizeof(bctl->stat));
2929        spin_unlock(&fs_info->balance_lock);
2930again:
2931        key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2932        key.offset = (u64)-1;
2933        key.type = BTRFS_CHUNK_ITEM_KEY;
2934
2935        while (1) {
2936                if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2937                    atomic_read(&fs_info->balance_cancel_req)) {
2938                        ret = -ECANCELED;
2939                        goto error;
2940                }
2941
2942                ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2943                if (ret < 0)
2944                        goto error;
2945
2946                /*
2947                 * this shouldn't happen, it means the last relocate
2948                 * failed
2949                 */
2950                if (ret == 0)
2951                        BUG(); /* FIXME break ? */
2952
2953                ret = btrfs_previous_item(chunk_root, path, 0,
2954                                          BTRFS_CHUNK_ITEM_KEY);
2955                if (ret) {
2956                        ret = 0;
2957                        break;
2958                }
2959
2960                leaf = path->nodes[0];
2961                slot = path->slots[0];
2962                btrfs_item_key_to_cpu(leaf, &found_key, slot);
2963
2964                if (found_key.objectid != key.objectid)
2965                        break;
2966
2967                /* chunk zero is special */
2968                if (found_key.offset == 0)
2969                        break;
2970
2971                chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2972
2973                if (!counting) {
2974                        spin_lock(&fs_info->balance_lock);
2975                        bctl->stat.considered++;
2976                        spin_unlock(&fs_info->balance_lock);
2977                }
2978
2979                ret = should_balance_chunk(chunk_root, leaf, chunk,
2980                                           found_key.offset);
2981                btrfs_release_path(path);
2982                if (!ret)
2983                        goto loop;
2984
2985                if (counting) {
2986                        spin_lock(&fs_info->balance_lock);
2987                        bctl->stat.expected++;
2988                        spin_unlock(&fs_info->balance_lock);
2989                        goto loop;
2990                }
2991
2992                ret = btrfs_relocate_chunk(chunk_root,
2993                                           chunk_root->root_key.objectid,
2994                                           found_key.objectid,
2995                                           found_key.offset);
2996                if (ret && ret != -ENOSPC)
2997                        goto error;
2998                if (ret == -ENOSPC) {
2999                        enospc_errors++;
3000                } else {
3001                        spin_lock(&fs_info->balance_lock);
3002                        bctl->stat.completed++;
3003                        spin_unlock(&fs_info->balance_lock);
3004                }
3005loop:
3006                key.offset = found_key.offset - 1;
3007        }
3008
3009        if (counting) {
3010                btrfs_release_path(path);
3011                counting = false;
3012                goto again;
3013        }
3014error:
3015        btrfs_free_path(path);
3016        if (enospc_errors) {
3017                printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
3018                       enospc_errors);
3019                if (!ret)
3020                        ret = -ENOSPC;
3021        }
3022
3023        return ret;
3024}
3025
3026/**
3027 * alloc_profile_is_valid - see if a given profile is valid and reduced
3028 * @flags: profile to validate
3029 * @extended: if true @flags is treated as an extended profile
3030 */
3031static int alloc_profile_is_valid(u64 flags, int extended)
3032{
3033        u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3034                               BTRFS_BLOCK_GROUP_PROFILE_MASK);
3035
3036        flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3037
3038        /* 1) check that all other bits are zeroed */
3039        if (flags & ~mask)
3040                return 0;
3041
3042        /* 2) see if profile is reduced */
3043        if (flags == 0)
3044                return !extended; /* "0" is valid for usual profiles */
3045
3046        /* true if exactly one bit set */
3047        return (flags & (flags - 1)) == 0;
3048}
3049
3050static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3051{
3052        /* cancel requested || normal exit path */
3053        return atomic_read(&fs_info->balance_cancel_req) ||
3054                (atomic_read(&fs_info->balance_pause_req) == 0 &&
3055                 atomic_read(&fs_info->balance_cancel_req) == 0);
3056}
3057
3058static void __cancel_balance(struct btrfs_fs_info *fs_info)
3059{
3060        int ret;
3061
3062        unset_balance_control(fs_info);
3063        ret = del_balance_item(fs_info->tree_root);
3064        if (ret)
3065                btrfs_std_error(fs_info, ret);
3066
3067        atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3068}
3069
3070void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
3071                               struct btrfs_ioctl_balance_args *bargs);
3072
3073/*
3074 * Should be called with both balance and volume mutexes held
3075 */
3076int btrfs_balance(struct btrfs_balance_control *bctl,
3077                  struct btrfs_ioctl_balance_args *bargs)
3078{
3079        struct btrfs_fs_info *fs_info = bctl->fs_info;
3080        u64 allowed;
3081        int mixed = 0;
3082        int ret;
3083        u64 num_devices;
3084        unsigned seq;
3085
3086        if (btrfs_fs_closing(fs_info) ||
3087            atomic_read(&fs_info->balance_pause_req) ||
3088            atomic_read(&fs_info->balance_cancel_req)) {
3089                ret = -EINVAL;
3090                goto out;
3091        }
3092
3093        allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3094        if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3095                mixed = 1;
3096
3097        /*
3098         * In case of mixed groups both data and meta should be picked,
3099         * and identical options should be given for both of them.
3100         */
3101        allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3102        if (mixed && (bctl->flags & allowed)) {
3103                if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3104                    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3105                    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3106                        printk(KERN_ERR "btrfs: with mixed groups data and "
3107                               "metadata balance options must be the same\n");
3108                        ret = -EINVAL;
3109                        goto out;
3110                }
3111        }
3112
3113        num_devices = fs_info->fs_devices->num_devices;
3114        btrfs_dev_replace_lock(&fs_info->dev_replace);
3115        if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3116                BUG_ON(num_devices < 1);
3117                num_devices--;
3118        }
3119        btrfs_dev_replace_unlock(&fs_info->dev_replace);
3120        allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3121        if (num_devices == 1)
3122                allowed |= BTRFS_BLOCK_GROUP_DUP;
3123        else if (num_devices > 1)
3124                allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3125        if (num_devices > 2)
3126                allowed |= BTRFS_BLOCK_GROUP_RAID5;
3127        if (num_devices > 3)
3128                allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3129                            BTRFS_BLOCK_GROUP_RAID6);
3130        if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3131            (!alloc_profile_is_valid(bctl->data.target, 1) ||
3132             (bctl->data.target & ~allowed))) {
3133                printk(KERN_ERR "btrfs: unable to start balance with target "
3134                       "data profile %llu\n",
3135                       (unsigned long long)bctl->data.target);
3136                ret = -EINVAL;
3137                goto out;
3138        }
3139        if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3140            (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3141             (bctl->meta.target & ~allowed))) {
3142                printk(KERN_ERR "btrfs: unable to start balance with target "
3143                       "metadata profile %llu\n",
3144                       (unsigned long long)bctl->meta.target);
3145                ret = -EINVAL;
3146                goto out;
3147        }
3148        if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3149            (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3150             (bctl->sys.target & ~allowed))) {
3151                printk(KERN_ERR "btrfs: unable to start balance with target "
3152                       "system profile %llu\n",
3153                       (unsigned long long)bctl->sys.target);
3154                ret = -EINVAL;
3155                goto out;
3156        }
3157
3158        /* allow dup'ed data chunks only in mixed mode */
3159        if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3160            (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3161                printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3162                ret = -EINVAL;
3163                goto out;
3164        }
3165
3166        /* allow to reduce meta or sys integrity only if force set */
3167        allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3168                        BTRFS_BLOCK_GROUP_RAID10 |
3169                        BTRFS_BLOCK_GROUP_RAID5 |
3170                        BTRFS_BLOCK_GROUP_RAID6;
3171        do {
3172                seq = read_seqbegin(&fs_info->profiles_lock);
3173
3174                if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3175                     (fs_info->avail_system_alloc_bits & allowed) &&
3176                     !(bctl->sys.target & allowed)) ||
3177                    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3178                     (fs_info->avail_metadata_alloc_bits & allowed) &&
3179                     !(bctl->meta.target & allowed))) {
3180                        if (bctl->flags & BTRFS_BALANCE_FORCE) {
3181                                printk(KERN_INFO "btrfs: force reducing metadata "
3182                                       "integrity\n");
3183                        } else {
3184                                printk(KERN_ERR "btrfs: balance will reduce metadata "
3185                                       "integrity, use force if you want this\n");
3186                                ret = -EINVAL;
3187                                goto out;
3188                        }
3189                }
3190        } while (read_seqretry(&fs_info->profiles_lock, seq));
3191
3192        if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3193                int num_tolerated_disk_barrier_failures;
3194                u64 target = bctl->sys.target;
3195
3196                num_tolerated_disk_barrier_failures =
3197                        btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3198                if (num_tolerated_disk_barrier_failures > 0 &&
3199                    (target &
3200                     (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3201                      BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3202                        num_tolerated_disk_barrier_failures = 0;
3203                else if (num_tolerated_disk_barrier_failures > 1 &&
3204                         (target &
3205                          (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3206                        num_tolerated_disk_barrier_failures = 1;
3207
3208                fs_info->num_tolerated_disk_barrier_failures =
3209                        num_tolerated_disk_barrier_failures;
3210        }
3211
3212        ret = insert_balance_item(fs_info->tree_root, bctl);
3213        if (ret && ret != -EEXIST)
3214                goto out;
3215
3216        if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3217                BUG_ON(ret == -EEXIST);
3218                set_balance_control(bctl);
3219        } else {
3220                BUG_ON(ret != -EEXIST);
3221                spin_lock(&fs_info->balance_lock);
3222                update_balance_args(bctl);
3223                spin_unlock(&fs_info->balance_lock);
3224        }
3225
3226        atomic_inc(&fs_info->balance_running);
3227        mutex_unlock(&fs_info->balance_mutex);
3228
3229        ret = __btrfs_balance(fs_info);
3230
3231        mutex_lock(&fs_info->balance_mutex);
3232        atomic_dec(&fs_info->balance_running);
3233
3234        if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3235                fs_info->num_tolerated_disk_barrier_failures =
3236                        btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3237        }
3238
3239        if (bargs) {
3240                memset(bargs, 0, sizeof(*bargs));
3241                update_ioctl_balance_args(fs_info, 0, bargs);
3242        }
3243
3244        if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3245            balance_need_close(fs_info)) {
3246                __cancel_balance(fs_info);
3247        }
3248
3249        wake_up(&fs_info->balance_wait_q);
3250
3251        return ret;
3252out:
3253        if (bctl->flags & BTRFS_BALANCE_RESUME)
3254                __cancel_balance(fs_info);
3255        else {
3256                kfree(bctl);
3257                atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3258        }
3259        return ret;
3260}
3261
3262static int balance_kthread(void *data)
3263{
3264        struct btrfs_fs_info *fs_info = data;
3265        int ret = 0;
3266
3267        mutex_lock(&fs_info->volume_mutex);
3268        mutex_lock(&fs_info->balance_mutex);
3269
3270        if (fs_info->balance_ctl) {
3271                printk(KERN_INFO "btrfs: continuing balance\n");
3272                ret = btrfs_balance(fs_info->balance_ctl, NULL);
3273        }
3274
3275        mutex_unlock(&fs_info->balance_mutex);
3276        mutex_unlock(&fs_info->volume_mutex);
3277
3278        return ret;
3279}
3280
3281int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3282{
3283        struct task_struct *tsk;
3284
3285        spin_lock(&fs_info->balance_lock);
3286        if (!fs_info->balance_ctl) {
3287                spin_unlock(&fs_info->balance_lock);
3288                return 0;
3289        }
3290        spin_unlock(&fs_info->balance_lock);
3291
3292        if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3293                printk(KERN_INFO "btrfs: force skipping balance\n");
3294                return 0;
3295        }
3296
3297        tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3298        if (IS_ERR(tsk))
3299                return PTR_ERR(tsk);
3300
3301        return 0;
3302}
3303
3304int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3305{
3306        struct btrfs_balance_control *bctl;
3307        struct btrfs_balance_item *item;
3308        struct btrfs_disk_balance_args disk_bargs;
3309        struct btrfs_path *path;
3310        struct extent_buffer *leaf;
3311        struct btrfs_key key;
3312        int ret;
3313
3314        path = btrfs_alloc_path();
3315        if (!path)
3316                return -ENOMEM;
3317
3318        key.objectid = BTRFS_BALANCE_OBJECTID;
3319        key.type = BTRFS_BALANCE_ITEM_KEY;
3320        key.offset = 0;
3321
3322        ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3323        if (ret < 0)
3324                goto out;
3325        if (ret > 0) { /* ret = -ENOENT; */
3326                ret = 0;
3327                goto out;
3328        }
3329
3330        bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3331        if (!bctl) {
3332                ret = -ENOMEM;
3333                goto out;
3334        }
3335
3336        leaf = path->nodes[0];
3337        item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3338
3339        bctl->fs_info = fs_info;
3340        bctl->flags = btrfs_balance_flags(leaf, item);
3341        bctl->flags |= BTRFS_BALANCE_RESUME;
3342
3343        btrfs_balance_data(leaf, item, &disk_bargs);
3344        btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3345        btrfs_balance_meta(leaf, item, &disk_bargs);
3346        btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3347        btrfs_balance_sys(leaf, item, &disk_bargs);
3348        btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3349
3350        WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3351
3352        mutex_lock(&fs_info->volume_mutex);
3353        mutex_lock(&fs_info->balance_mutex);
3354
3355        set_balance_control(bctl);
3356
3357        mutex_unlock(&fs_info->balance_mutex);
3358        mutex_unlock(&fs_info->volume_mutex);
3359out:
3360        btrfs_free_path(path);
3361        return ret;
3362}
3363
3364int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3365{
3366        int ret = 0;
3367
3368        mutex_lock(&fs_info->balance_mutex);
3369        if (!fs_info->balance_ctl) {
3370                mutex_unlock(&fs_info->balance_mutex);
3371                return -ENOTCONN;
3372        }
3373
3374        if (atomic_read(&fs_info->balance_running)) {
3375                atomic_inc(&fs_info->balance_pause_req);
3376                mutex_unlock(&fs_info->balance_mutex);
3377
3378                wait_event(fs_info->balance_wait_q,
3379                           atomic_read(&fs_info->balance_running) == 0);
3380
3381                mutex_lock(&fs_info->balance_mutex);
3382                /* we are good with balance_ctl ripped off from under us */
3383                BUG_ON(atomic_read(&fs_info->balance_running));
3384                atomic_dec(&fs_info->balance_pause_req);
3385        } else {
3386                ret = -ENOTCONN;
3387        }
3388
3389        mutex_unlock(&fs_info->balance_mutex);
3390        return ret;
3391}
3392
3393int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3394{
3395        mutex_lock(&fs_info->balance_mutex);
3396        if (!fs_info->balance_ctl) {
3397                mutex_unlock(&fs_info->balance_mutex);
3398                return -ENOTCONN;
3399        }
3400
3401        atomic_inc(&fs_info->balance_cancel_req);
3402        /*
3403         * if we are running just wait and return, balance item is
3404         * deleted in btrfs_balance in this case
3405         */
3406        if (atomic_read(&fs_info->balance_running)) {
3407                mutex_unlock(&fs_info->balance_mutex);
3408                wait_event(fs_info->balance_wait_q,
3409                           atomic_read(&fs_info->balance_running) == 0);
3410                mutex_lock(&fs_info->balance_mutex);
3411        } else {
3412                /* __cancel_balance needs volume_mutex */
3413                mutex_unlock(&fs_info->balance_mutex);
3414                mutex_lock(&fs_info->volume_mutex);
3415                mutex_lock(&fs_info->balance_mutex);
3416
3417                if (fs_info->balance_ctl)
3418                        __cancel_balance(fs_info);
3419
3420                mutex_unlock(&fs_info->volume_mutex);
3421        }
3422
3423        BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3424        atomic_dec(&fs_info->balance_cancel_req);
3425        mutex_unlock(&fs_info->balance_mutex);
3426        return 0;
3427}
3428
3429/*
3430 * shrinking a device means finding all of the device extents past
3431 * the new size, and then following the back refs to the chunks.
3432 * The chunk relocation code actually frees the device extent
3433 */
3434int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3435{
3436        struct btrfs_trans_handle *trans;
3437        struct btrfs_root *root = device->dev_root;
3438        struct btrfs_dev_extent *dev_extent = NULL;
3439        struct btrfs_path *path;
3440        u64 length;
3441        u64 chunk_tree;
3442        u64 chunk_objectid;
3443        u64 chunk_offset;
3444        int ret;
3445        int slot;
3446        int failed = 0;
3447        bool retried = false;
3448        struct extent_buffer *l;
3449        struct btrfs_key key;
3450        struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3451        u64 old_total = btrfs_super_total_bytes(super_copy);
3452        u64 old_size = device->total_bytes;
3453        u64 diff = device->total_bytes - new_size;
3454
3455        if (device->is_tgtdev_for_dev_replace)
3456                return -EINVAL;
3457
3458        path = btrfs_alloc_path();
3459        if (!path)
3460                return -ENOMEM;
3461
3462        path->reada = 2;
3463
3464        lock_chunks(root);
3465
3466        device->total_bytes = new_size;
3467        if (device->writeable) {
3468                device->fs_devices->total_rw_bytes -= diff;
3469                spin_lock(&root->fs_info->free_chunk_lock);
3470                root->fs_info->free_chunk_space -= diff;
3471                spin_unlock(&root->fs_info->free_chunk_lock);
3472        }
3473        unlock_chunks(root);
3474
3475again:
3476        key.objectid = device->devid;
3477        key.offset = (u64)-1;
3478        key.type = BTRFS_DEV_EXTENT_KEY;
3479
3480        do {
3481                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3482                if (ret < 0)
3483                        goto done;
3484
3485                ret = btrfs_previous_item(root, path, 0, key.type);
3486                if (ret < 0)
3487                        goto done;
3488                if (ret) {
3489                        ret = 0;
3490                        btrfs_release_path(path);
3491                        break;
3492                }
3493
3494                l = path->nodes[0];
3495                slot = path->slots[0];
3496                btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3497
3498                if (key.objectid != device->devid) {
3499                        btrfs_release_path(path);
3500                        break;
3501                }
3502
3503                dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3504                length = btrfs_dev_extent_length(l, dev_extent);
3505
3506                if (key.offset + length <= new_size) {
3507                        btrfs_release_path(path);
3508                        break;
3509                }
3510
3511                chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3512                chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3513                chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3514                btrfs_release_path(path);
3515
3516                ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3517                                           chunk_offset);
3518                if (ret && ret != -ENOSPC)
3519                        goto done;
3520                if (ret == -ENOSPC)
3521                        failed++;
3522        } while (key.offset-- > 0);
3523
3524        if (failed && !retried) {
3525                failed = 0;
3526                retried = true;
3527                goto again;
3528        } else if (failed && retried) {
3529                ret = -ENOSPC;
3530                lock_chunks(root);
3531
3532                device->total_bytes = old_size;
3533                if (device->writeable)
3534                        device->fs_devices->total_rw_bytes += diff;
3535                spin_lock(&root->fs_info->free_chunk_lock);
3536                root->fs_info->free_chunk_space += diff;
3537                spin_unlock(&root->fs_info->free_chunk_lock);
3538                unlock_chunks(root);
3539                goto done;
3540        }
3541
3542        /* Shrinking succeeded, else we would be at "done". */
3543        trans = btrfs_start_transaction(root, 0);
3544        if (IS_ERR(trans)) {
3545                ret = PTR_ERR(trans);
3546                goto done;
3547        }
3548
3549        lock_chunks(root);
3550
3551        device->disk_total_bytes = new_size;
3552        /* Now btrfs_update_device() will change the on-disk size. */
3553        ret = btrfs_update_device(trans, device);
3554        if (ret) {
3555                unlock_chunks(root);
3556                btrfs_end_transaction(trans, root);
3557                goto done;
3558        }
3559        WARN_ON(diff > old_total);
3560        btrfs_set_super_total_bytes(super_copy, old_total - diff);
3561        unlock_chunks(root);
3562        btrfs_end_transaction(trans, root);
3563done:
3564        btrfs_free_path(path);
3565        return ret;
3566}
3567
3568static int btrfs_add_system_chunk(struct btrfs_root *root,
3569                           struct btrfs_key *key,
3570                           struct btrfs_chunk *chunk, int item_size)
3571{
3572        struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3573        struct btrfs_disk_key disk_key;
3574        u32 array_size;
3575        u8 *ptr;
3576
3577        array_size = btrfs_super_sys_array_size(super_copy);
3578        if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3579                return -EFBIG;
3580
3581        ptr = super_copy->sys_chunk_array + array_size;
3582        btrfs_cpu_key_to_disk(&disk_key, key);
3583        memcpy(ptr, &disk_key, sizeof(disk_key));
3584        ptr += sizeof(disk_key);
3585        memcpy(ptr, chunk, item_size);
3586        item_size += sizeof(disk_key);
3587        btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3588        return 0;
3589}
3590
3591/*
3592 * sort the devices in descending order by max_avail, total_avail
3593 */
3594static int btrfs_cmp_device_info(const void *a, const void *b)
3595{
3596        const struct btrfs_device_info *di_a = a;
3597        const struct btrfs_device_info *di_b = b;
3598
3599        if (di_a->max_avail > di_b->max_avail)
3600                return -1;
3601        if (di_a->max_avail < di_b->max_avail)
3602                return 1;
3603        if (di_a->total_avail > di_b->total_avail)
3604                return -1;
3605        if (di_a->total_avail < di_b->total_avail)
3606                return 1;
3607        return 0;
3608}
3609
3610static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3611        [BTRFS_RAID_RAID10] = {
3612                .sub_stripes    = 2,
3613                .dev_stripes    = 1,
3614                .devs_max       = 0,    /* 0 == as many as possible */
3615                .devs_min       = 4,
3616                .devs_increment = 2,
3617                .ncopies        = 2,
3618        },
3619        [BTRFS_RAID_RAID1] = {
3620                .sub_stripes    = 1,
3621                .dev_stripes    = 1,
3622                .devs_max       = 2,
3623                .devs_min       = 2,
3624                .devs_increment = 2,
3625                .ncopies        = 2,
3626        },
3627        [BTRFS_RAID_DUP] = {
3628                .sub_stripes    = 1,
3629                .dev_stripes    = 2,
3630                .devs_max       = 1,
3631                .devs_min       = 1,
3632                .devs_increment = 1,
3633                .ncopies        = 2,
3634        },
3635        [BTRFS_RAID_RAID0] = {
3636                .sub_stripes    = 1,
3637                .dev_stripes    = 1,
3638                .devs_max       = 0,
3639                .devs_min       = 2,
3640                .devs_increment = 1,
3641                .ncopies        = 1,
3642        },
3643        [BTRFS_RAID_SINGLE] = {
3644                .sub_stripes    = 1,
3645                .dev_stripes    = 1,
3646                .devs_max       = 1,
3647                .devs_min       = 1,
3648                .devs_increment = 1,
3649                .ncopies        = 1,
3650        },
3651        [BTRFS_RAID_RAID5] = {
3652                .sub_stripes    = 1,
3653                .dev_stripes    = 1,
3654                .devs_max       = 0,
3655                .devs_min       = 2,
3656                .devs_increment = 1,
3657                .ncopies        = 2,
3658        },
3659        [BTRFS_RAID_RAID6] = {
3660                .sub_stripes    = 1,
3661                .dev_stripes    = 1,
3662                .devs_max       = 0,
3663                .devs_min       = 3,
3664                .devs_increment = 1,
3665                .ncopies        = 3,
3666        },
3667};
3668
3669static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
3670{
3671        /* TODO allow them to set a preferred stripe size */
3672        return 64 * 1024;
3673}
3674
3675static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
3676{
3677        if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
3678                return;
3679
3680        btrfs_set_fs_incompat(info, RAID56);
3681}
3682
3683static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3684                               struct btrfs_root *extent_root,
3685                               struct map_lookup **map_ret,
3686                               u64 *num_bytes_out, u64 *stripe_size_out,
3687                               u64 start, u64 type)
3688{
3689        struct btrfs_fs_info *info = extent_root->fs_info;
3690        struct btrfs_fs_devices *fs_devices = info->fs_devices;
3691        struct list_head *cur;
3692        struct map_lookup *map = NULL;
3693        struct extent_map_tree *em_tree;
3694        struct extent_map *em;
3695        struct btrfs_device_info *devices_info = NULL;
3696        u64 total_avail;
3697        int num_stripes;        /* total number of stripes to allocate */
3698        int data_stripes;       /* number of stripes that count for
3699                                   block group size */
3700        int sub_stripes;        /* sub_stripes info for map */
3701        int dev_stripes;        /* stripes per dev */
3702        int devs_max;           /* max devs to use */
3703        int devs_min;           /* min devs needed */
3704        int devs_increment;     /* ndevs has to be a multiple of this */
3705        int ncopies;            /* how many copies to data has */
3706        int ret;
3707        u64 max_stripe_size;
3708        u64 max_chunk_size;
3709        u64 stripe_size;
3710        u64 num_bytes;
3711        u64 raid_stripe_len = BTRFS_STRIPE_LEN;
3712        int ndevs;
3713        int i;
3714        int j;
3715        int index;
3716
3717        BUG_ON(!alloc_profile_is_valid(type, 0));
3718
3719        if (list_empty(&fs_devices->alloc_list))
3720                return -ENOSPC;
3721
3722        index = __get_raid_index(type);
3723
3724        sub_stripes = btrfs_raid_array[index].sub_stripes;
3725        dev_stripes = btrfs_raid_array[index].dev_stripes;
3726        devs_max = btrfs_raid_array[index].devs_max;
3727        devs_min = btrfs_raid_array[index].devs_min;
3728        devs_increment = btrfs_raid_array[index].devs_increment;
3729        ncopies = btrfs_raid_array[index].ncopies;
3730
3731        if (type & BTRFS_BLOCK_GROUP_DATA) {
3732                max_stripe_size = 1024 * 1024 * 1024;
3733                max_chunk_size = 10 * max_stripe_size;
3734        } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3735                /* for larger filesystems, use larger metadata chunks */
3736                if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3737                        max_stripe_size = 1024 * 1024 * 1024;
3738                else
3739                        max_stripe_size = 256 * 1024 * 1024;
3740                max_chunk_size = max_stripe_size;
3741        } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3742                max_stripe_size = 32 * 1024 * 1024;
3743                max_chunk_size = 2 * max_stripe_size;
3744        } else {
3745                printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3746                       type);
3747                BUG_ON(1);
3748        }
3749
3750        /* we don't want a chunk larger than 10% of writeable space */
3751        max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3752                             max_chunk_size);
3753
3754        devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3755                               GFP_NOFS);
3756        if (!devices_info)
3757                return -ENOMEM;
3758
3759        cur = fs_devices->alloc_list.next;
3760
3761        /*
3762         * in the first pass through the devices list, we gather information
3763         * about the available holes on each device.
3764         */
3765        ndevs = 0;
3766        while (cur != &fs_devices->alloc_list) {
3767                struct btrfs_device *device;
3768                u64 max_avail;
3769                u64 dev_offset;
3770
3771                device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3772
3773                cur = cur->next;
3774
3775                if (!device->writeable) {
3776                        WARN(1, KERN_ERR
3777                               "btrfs: read-only device in alloc_list\n");
3778                        continue;
3779                }
3780
3781                if (!device->in_fs_metadata ||
3782                    device->is_tgtdev_for_dev_replace)
3783                        continue;
3784
3785                if (device->total_bytes > device->bytes_used)
3786                        total_avail = device->total_bytes - device->bytes_used;
3787                else
3788                        total_avail = 0;
3789
3790                /* If there is no space on this device, skip it. */
3791                if (total_avail == 0)
3792                        continue;
3793
3794                ret = find_free_dev_extent(device,
3795                                           max_stripe_size * dev_stripes,
3796                                           &dev_offset, &max_avail);
3797                if (ret && ret != -ENOSPC)
3798                        goto error;
3799
3800                if (ret == 0)
3801                        max_avail = max_stripe_size * dev_stripes;
3802
3803                if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3804                        continue;
3805
3806                if (ndevs == fs_devices->rw_devices) {
3807                        WARN(1, "%s: found more than %llu devices\n",
3808                             __func__, fs_devices->rw_devices);
3809                        break;
3810                }
3811                devices_info[ndevs].dev_offset = dev_offset;
3812                devices_info[ndevs].max_avail = max_avail;
3813                devices_info[ndevs].total_avail = total_avail;
3814                devices_info[ndevs].dev = device;
3815                ++ndevs;
3816        }
3817
3818        /*
3819         * now sort the devices by hole size / available space
3820         */
3821        sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3822             btrfs_cmp_device_info, NULL);
3823
3824        /* round down to number of usable stripes */
3825        ndevs -= ndevs % devs_increment;
3826
3827        if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3828                ret = -ENOSPC;
3829                goto error;
3830        }
3831
3832        if (devs_max && ndevs > devs_max)
3833                ndevs = devs_max;
3834        /*
3835         * the primary goal is to maximize the number of stripes, so use as many
3836         * devices as possible, even if the stripes are not maximum sized.
3837         */
3838        stripe_size = devices_info[ndevs-1].max_avail;
3839        num_stripes = ndevs * dev_stripes;
3840
3841        /*
3842         * this will have to be fixed for RAID1 and RAID10 over
3843         * more drives
3844         */
3845        data_stripes = num_stripes / ncopies;
3846
3847        if (type & BTRFS_BLOCK_GROUP_RAID5) {
3848                raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
3849                                 btrfs_super_stripesize(info->super_copy));
3850                data_stripes = num_stripes - 1;
3851        }
3852        if (type & BTRFS_BLOCK_GROUP_RAID6) {
3853                raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
3854                                 btrfs_super_stripesize(info->super_copy));
3855                data_stripes = num_stripes - 2;
3856        }
3857
3858        /*
3859         * Use the number of data stripes to figure out how big this chunk
3860         * is really going to be in terms of logical address space,
3861         * and compare that answer with the max chunk size
3862         */
3863        if (stripe_size * data_stripes > max_chunk_size) {
3864                u64 mask = (1ULL << 24) - 1;
3865                stripe_size = max_chunk_size;
3866                do_div(stripe_size, data_stripes);
3867
3868                /* bump the answer up to a 16MB boundary */
3869                stripe_size = (stripe_size + mask) & ~mask;
3870
3871                /* but don't go higher than the limits we found
3872                 * while searching for free extents
3873                 */
3874                if (stripe_size > devices_info[ndevs-1].max_avail)
3875                        stripe_size = devices_info[ndevs-1].max_avail;
3876        }
3877
3878        do_div(stripe_size, dev_stripes);
3879
3880        /* align to BTRFS_STRIPE_LEN */
3881        do_div(stripe_size, raid_stripe_len);
3882        stripe_size *= raid_stripe_len;
3883
3884        map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3885        if (!map) {
3886                ret = -ENOMEM;
3887                goto error;
3888        }
3889        map->num_stripes = num_stripes;
3890
3891        for (i = 0; i < ndevs; ++i) {
3892                for (j = 0; j < dev_stripes; ++j) {
3893                        int s = i * dev_stripes + j;
3894                        map->stripes[s].dev = devices_info[i].dev;
3895                        map->stripes[s].physical = devices_info[i].dev_offset +
3896                                                   j * stripe_size;
3897                }
3898        }
3899        map->sector_size = extent_root->sectorsize;
3900        map->stripe_len = raid_stripe_len;
3901        map->io_align = raid_stripe_len;
3902        map->io_width = raid_stripe_len;
3903        map->type = type;
3904        map->sub_stripes = sub_stripes;
3905
3906        *map_ret = map;
3907        num_bytes = stripe_size * data_stripes;
3908
3909        *stripe_size_out = stripe_size;
3910        *num_bytes_out = num_bytes;
3911
3912        trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3913
3914        em = alloc_extent_map();
3915        if (!em) {
3916                ret = -ENOMEM;
3917                goto error;
3918        }
3919        em->bdev = (struct block_device *)map;
3920        em->start = start;
3921        em->len = num_bytes;
3922        em->block_start = 0;
3923        em->block_len = em->len;
3924
3925        em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3926        write_lock(&em_tree->lock);
3927        ret = add_extent_mapping(em_tree, em, 0);
3928        write_unlock(&em_tree->lock);
3929        if (ret) {
3930                free_extent_map(em);
3931                goto error;
3932        }
3933
3934        for (i = 0; i < map->num_stripes; ++i) {
3935                struct btrfs_device *device;
3936                u64 dev_offset;
3937
3938                device = map->stripes[i].dev;
3939                dev_offset = map->stripes[i].physical;
3940
3941                ret = btrfs_alloc_dev_extent(trans, device,
3942                                info->chunk_root->root_key.objectid,
3943                                BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3944                                start, dev_offset, stripe_size);
3945                if (ret)
3946                        goto error_dev_extent;
3947        }
3948
3949        ret = btrfs_make_block_group(trans, extent_root, 0, type,
3950                                     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3951                                     start, num_bytes);
3952        if (ret) {
3953                i = map->num_stripes - 1;
3954                goto error_dev_extent;
3955        }
3956
3957        free_extent_map(em);
3958        check_raid56_incompat_flag(extent_root->fs_info, type);
3959
3960        kfree(devices_info);
3961        return 0;
3962
3963error_dev_extent:
3964        for (; i >= 0; i--) {
3965                struct btrfs_device *device;
3966                int err;
3967
3968                device = map->stripes[i].dev;
3969                err = btrfs_free_dev_extent(trans, device, start);
3970                if (err) {
3971                        btrfs_abort_transaction(trans, extent_root, err);
3972                        break;
3973                }
3974        }
3975        write_lock(&em_tree->lock);
3976        remove_extent_mapping(em_tree, em);
3977        write_unlock(&em_tree->lock);
3978
3979        /* One for our allocation */
3980        free_extent_map(em);
3981        /* One for the tree reference */
3982        free_extent_map(em);
3983error:
3984        kfree(map);
3985        kfree(devices_info);
3986        return ret;
3987}
3988
3989static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3990                                struct btrfs_root *extent_root,
3991                                struct map_lookup *map, u64 chunk_offset,
3992                                u64 chunk_size, u64 stripe_size)
3993{
3994        u64 dev_offset;
3995        struct btrfs_key key;
3996        struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3997        struct btrfs_device *device;
3998        struct btrfs_chunk *chunk;
3999        struct btrfs_stripe *stripe;
4000        size_t item_size = btrfs_chunk_item_size(map->num_stripes);
4001        int index = 0;
4002        int ret;
4003
4004        chunk = kzalloc(item_size, GFP_NOFS);
4005        if (!chunk)
4006                return -ENOMEM;
4007
4008        index = 0;
4009        while (index < map->num_stripes) {
4010                device = map->stripes[index].dev;
4011                device->bytes_used += stripe_size;
4012                ret = btrfs_update_device(trans, device);
4013                if (ret)
4014                        goto out_free;
4015                index++;
4016        }
4017
4018        spin_lock(&extent_root->fs_info->free_chunk_lock);
4019        extent_root->fs_info->free_chunk_space -= (stripe_size *
4020                                                   map->num_stripes);
4021        spin_unlock(&extent_root->fs_info->free_chunk_lock);
4022
4023        index = 0;
4024        stripe = &chunk->stripe;
4025        while (index < map->num_stripes) {
4026                device = map->stripes[index].dev;
4027                dev_offset = map->stripes[index].physical;
4028
4029                btrfs_set_stack_stripe_devid(stripe, device->devid);
4030                btrfs_set_stack_stripe_offset(stripe, dev_offset);
4031                memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4032                stripe++;
4033                index++;
4034        }
4035
4036        btrfs_set_stack_chunk_length(chunk, chunk_size);
4037        btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4038        btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4039        btrfs_set_stack_chunk_type(chunk, map->type);
4040        btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4041        btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4042        btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4043        btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4044        btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4045
4046        key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4047        key.type = BTRFS_CHUNK_ITEM_KEY;
4048        key.offset = chunk_offset;
4049
4050        ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4051
4052        if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4053                /*
4054                 * TODO: Cleanup of inserted chunk root in case of
4055                 * failure.
4056                 */
4057                ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4058                                             item_size);
4059        }
4060
4061out_free:
4062        kfree(chunk);
4063        return ret;
4064}
4065
4066/*
4067 * Chunk allocation falls into two parts. The first part does works
4068 * that make the new allocated chunk useable, but not do any operation
4069 * that modifies the chunk tree. The second part does the works that
4070 * require modifying the chunk tree. This division is important for the
4071 * bootstrap process of adding storage to a seed btrfs.
4072 */
4073int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4074                      struct btrfs_root *extent_root, u64 type)
4075{
4076        u64 chunk_offset;
4077        u64 chunk_size;
4078        u64 stripe_size;
4079        struct map_lookup *map;
4080        struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4081        int ret;
4082
4083        ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4084                              &chunk_offset);
4085        if (ret)
4086                return ret;
4087
4088        ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
4089                                  &stripe_size, chunk_offset, type);
4090        if (ret)
4091                return ret;
4092
4093        ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
4094                                   chunk_size, stripe_size);
4095        if (ret)
4096                return ret;
4097        return 0;
4098}
4099
4100static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4101                                         struct btrfs_root *root,
4102                                         struct btrfs_device *device)
4103{
4104        u64 chunk_offset;
4105        u64 sys_chunk_offset;
4106        u64 chunk_size;
4107        u64 sys_chunk_size;
4108        u64 stripe_size;
4109        u64 sys_stripe_size;
4110        u64 alloc_profile;
4111        struct map_lookup *map;
4112        struct map_lookup *sys_map;
4113        struct btrfs_fs_info *fs_info = root->fs_info;
4114        struct btrfs_root *extent_root = fs_info->extent_root;
4115        int ret;
4116
4117        ret = find_next_chunk(fs_info->chunk_root,
4118                              BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
4119        if (ret)
4120                return ret;
4121
4122        alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4123        ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
4124                                  &stripe_size, chunk_offset, alloc_profile);
4125        if (ret)
4126                return ret;
4127
4128        sys_chunk_offset = chunk_offset + chunk_size;
4129
4130        alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4131        ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
4132                                  &sys_chunk_size, &sys_stripe_size,
4133                                  sys_chunk_offset, alloc_profile);
4134        if (ret) {
4135                btrfs_abort_transaction(trans, root, ret);
4136                goto out;
4137        }
4138
4139        ret = btrfs_add_device(trans, fs_info->chunk_root, device);
4140        if (ret) {
4141                btrfs_abort_transaction(trans, root, ret);
4142                goto out;
4143        }
4144
4145        /*
4146         * Modifying chunk tree needs allocating new blocks from both
4147         * system block group and metadata block group. So we only can
4148         * do operations require modifying the chunk tree after both
4149         * block groups were created.
4150         */
4151        ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
4152                                   chunk_size, stripe_size);
4153        if (ret) {
4154                btrfs_abort_transaction(trans, root, ret);
4155                goto out;
4156        }
4157
4158        ret = __finish_chunk_alloc(trans, extent_root, sys_map,
4159                                   sys_chunk_offset, sys_chunk_size,
4160                                   sys_stripe_size);
4161        if (ret)
4162                btrfs_abort_transaction(trans, root, ret);
4163
4164out:
4165
4166        return ret;
4167}
4168
4169int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4170{
4171        struct extent_map *em;
4172        struct map_lookup *map;
4173        struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4174        int readonly = 0;
4175        int i;
4176
4177        read_lock(&map_tree->map_tree.lock);
4178        em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4179        read_unlock(&map_tree->map_tree.lock);
4180        if (!em)
4181                return 1;
4182
4183        if (btrfs_test_opt(root, DEGRADED)) {
4184                free_extent_map(em);
4185                return 0;
4186        }
4187
4188        map = (struct map_lookup *)em->bdev;
4189        for (i = 0; i < map->num_stripes; i++) {
4190                if (!map->stripes[i].dev->writeable) {
4191                        readonly = 1;
4192                        break;
4193                }
4194        }
4195        free_extent_map(em);
4196        return readonly;
4197}
4198
4199void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4200{
4201        extent_map_tree_init(&tree->map_tree);
4202}
4203
4204void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4205{
4206        struct extent_map *em;
4207
4208        while (1) {
4209                write_lock(&tree->map_tree.lock);
4210                em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4211                if (em)
4212                        remove_extent_mapping(&tree->map_tree, em);
4213                write_unlock(&tree->map_tree.lock);
4214                if (!em)
4215                        break;
4216                kfree(em->bdev);
4217                /* once for us */
4218                free_extent_map(em);
4219                /* once for the tree */
4220                free_extent_map(em);
4221        }
4222}
4223
4224int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4225{
4226        struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4227        struct extent_map *em;
4228        struct map_lookup *map;
4229        struct extent_map_tree *em_tree = &map_tree->map_tree;
4230        int ret;
4231
4232        read_lock(&em_tree->lock);
4233        em = lookup_extent_mapping(em_tree, logical, len);
4234        read_unlock(&em_tree->lock);
4235
4236        /*
4237         * We could return errors for these cases, but that could get ugly and
4238         * we'd probably do the same thing which is just not do anything else
4239         * and exit, so return 1 so the callers don't try to use other copies.
4240         */
4241        if (!em) {
4242                btrfs_emerg(fs_info, "No mapping for %Lu-%Lu\n", logical,
4243                            logical+len);
4244                return 1;
4245        }
4246
4247        if (em->start > logical || em->start + em->len < logical) {
4248                btrfs_emerg(fs_info, "Invalid mapping for %Lu-%Lu, got "
4249                            "%Lu-%Lu\n", logical, logical+len, em->start,
4250                            em->start + em->len);
4251                return 1;
4252        }
4253
4254        map = (struct map_lookup *)em->bdev;
4255        if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4256                ret = map->num_stripes;
4257        else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4258                ret = map->sub_stripes;
4259        else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4260                ret = 2;
4261        else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4262                ret = 3;
4263        else
4264                ret = 1;
4265        free_extent_map(em);
4266
4267        btrfs_dev_replace_lock(&fs_info->dev_replace);
4268        if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4269                ret++;
4270        btrfs_dev_replace_unlock(&fs_info->dev_replace);
4271
4272        return ret;
4273}
4274
4275unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4276                                    struct btrfs_mapping_tree *map_tree,
4277                                    u64 logical)
4278{
4279        struct extent_map *em;
4280        struct map_lookup *map;
4281        struct extent_map_tree *em_tree = &map_tree->map_tree;
4282        unsigned long len = root->sectorsize;
4283
4284        read_lock(&em_tree->lock);
4285        em = lookup_extent_mapping(em_tree, logical, len);
4286        read_unlock(&em_tree->lock);
4287        BUG_ON(!em);
4288
4289        BUG_ON(em->start > logical || em->start + em->len < logical);
4290        map = (struct map_lookup *)em->bdev;
4291        if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4292                         BTRFS_BLOCK_GROUP_RAID6)) {
4293                len = map->stripe_len * nr_data_stripes(map);
4294        }
4295        free_extent_map(em);
4296        return len;
4297}
4298
4299int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4300                           u64 logical, u64 len, int mirror_num)
4301{
4302        struct extent_map *em;
4303        struct map_lookup *map;
4304        struct extent_map_tree *em_tree = &map_tree->map_tree;
4305        int ret = 0;
4306
4307        read_lock(&em_tree->lock);
4308        em = lookup_extent_mapping(em_tree, logical, len);
4309        read_unlock(&em_tree->lock);
4310        BUG_ON(!em);
4311
4312        BUG_ON(em->start > logical || em->start + em->len < logical);
4313        map = (struct map_lookup *)em->bdev;
4314        if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4315                         BTRFS_BLOCK_GROUP_RAID6))
4316                ret = 1;
4317        free_extent_map(em);
4318        return ret;
4319}
4320
4321static int find_live_mirror(struct btrfs_fs_info *fs_info,
4322                            struct map_lookup *map, int first, int num,
4323                            int optimal, int dev_replace_is_ongoing)
4324{
4325        int i;
4326        int tolerance;
4327        struct btrfs_device *srcdev;
4328
4329        if (dev_replace_is_ongoing &&
4330            fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4331             BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4332                srcdev = fs_info->dev_replace.srcdev;
4333        else
4334                srcdev = NULL;
4335
4336        /*
4337         * try to avoid the drive that is the source drive for a
4338         * dev-replace procedure, only choose it if no other non-missing
4339         * mirror is available
4340         */
4341        for (tolerance = 0; tolerance < 2; tolerance++) {
4342                if (map->stripes[optimal].dev->bdev &&
4343                    (tolerance || map->stripes[optimal].dev != srcdev))
4344                        return optimal;
4345                for (i = first; i < first + num; i++) {
4346                        if (map->stripes[i].dev->bdev &&
4347                            (tolerance || map->stripes[i].dev != srcdev))
4348                                return i;
4349                }
4350        }
4351
4352        /* we couldn't find one that doesn't fail.  Just return something
4353         * and the io error handling code will clean up eventually
4354         */
4355        return optimal;
4356}
4357
4358static inline int parity_smaller(u64 a, u64 b)
4359{
4360        return a > b;
4361}
4362
4363/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4364static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4365{
4366        struct btrfs_bio_stripe s;
4367        int i;
4368        u64 l;
4369        int again = 1;
4370
4371        while (again) {
4372                again = 0;
4373                for (i = 0; i < bbio->num_stripes - 1; i++) {
4374                        if (parity_smaller(raid_map[i], raid_map[i+1])) {
4375                                s = bbio->stripes[i];
4376                                l = raid_map[i];
4377                                bbio->stripes[i] = bbio->stripes[i+1];
4378                                raid_map[i] = raid_map[i+1];
4379                                bbio->stripes[i+1] = s;
4380                                raid_map[i+1] = l;
4381                                again = 1;
4382                        }
4383                }
4384        }
4385}
4386
4387static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4388                             u64 logical, u64 *length,
4389                             struct btrfs_bio **bbio_ret,
4390                             int mirror_num, u64 **raid_map_ret)
4391{
4392        struct extent_map *em;
4393        struct map_lookup *map;
4394        struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4395        struct extent_map_tree *em_tree = &map_tree->map_tree;
4396        u64 offset;
4397        u64 stripe_offset;
4398        u64 stripe_end_offset;
4399        u64 stripe_nr;
4400        u64 stripe_nr_orig;
4401        u64 stripe_nr_end;
4402        u64 stripe_len;
4403        u64 *raid_map = NULL;
4404        int stripe_index;
4405        int i;
4406        int ret = 0;
4407        int num_stripes;
4408        int max_errors = 0;
4409        struct btrfs_bio *bbio = NULL;
4410        struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4411        int dev_replace_is_ongoing = 0;
4412        int num_alloc_stripes;
4413        int patch_the_first_stripe_for_dev_replace = 0;
4414        u64 physical_to_patch_in_first_stripe = 0;
4415        u64 raid56_full_stripe_start = (u64)-1;
4416
4417        read_lock(&em_tree->lock);
4418        em = lookup_extent_mapping(em_tree, logical, *length);
4419        read_unlock(&em_tree->lock);
4420
4421        if (!em) {
4422                btrfs_crit(fs_info, "unable to find logical %llu len %llu",
4423                        (unsigned long long)logical,
4424                        (unsigned long long)*length);
4425                return -EINVAL;
4426        }
4427
4428        if (em->start > logical || em->start + em->len < logical) {
4429                btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
4430                           "found %Lu-%Lu\n", logical, em->start,
4431                           em->start + em->len);
4432                return -EINVAL;
4433        }
4434
4435        map = (struct map_lookup *)em->bdev;
4436        offset = logical - em->start;
4437
4438        if (mirror_num > map->num_stripes)
4439                mirror_num = 0;
4440
4441        stripe_len = map->stripe_len;
4442        stripe_nr = offset;
4443        /*
4444         * stripe_nr counts the total number of stripes we have to stride
4445         * to get to this block
4446         */
4447        do_div(stripe_nr, stripe_len);
4448
4449        stripe_offset = stripe_nr * stripe_len;
4450        BUG_ON(offset < stripe_offset);
4451
4452        /* stripe_offset is the offset of this block in its stripe*/
4453        stripe_offset = offset - stripe_offset;
4454
4455        /* if we're here for raid56, we need to know the stripe aligned start */
4456        if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4457                unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4458                raid56_full_stripe_start = offset;
4459
4460                /* allow a write of a full stripe, but make sure we don't
4461                 * allow straddling of stripes
4462                 */
4463                do_div(raid56_full_stripe_start, full_stripe_len);
4464                raid56_full_stripe_start *= full_stripe_len;
4465        }
4466
4467        if (rw & REQ_DISCARD) {
4468                /* we don't discard raid56 yet */
4469                if (map->type &
4470                    (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4471                        ret = -EOPNOTSUPP;
4472                        goto out;
4473                }
4474                *length = min_t(u64, em->len - offset, *length);
4475        } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4476                u64 max_len;
4477                /* For writes to RAID[56], allow a full stripeset across all disks.
4478                   For other RAID types and for RAID[56] reads, just allow a single
4479                   stripe (on a single disk). */
4480                if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
4481                    (rw & REQ_WRITE)) {
4482                        max_len = stripe_len * nr_data_stripes(map) -
4483                                (offset - raid56_full_stripe_start);
4484                } else {
4485                        /* we limit the length of each bio to what fits in a stripe */
4486                        max_len = stripe_len - stripe_offset;
4487                }
4488                *length = min_t(u64, em->len - offset, max_len);
4489        } else {
4490                *length = em->len - offset;
4491        }
4492
4493        /* This is for when we're called from btrfs_merge_bio_hook() and all
4494           it cares about is the length */
4495        if (!bbio_ret)
4496                goto out;
4497
4498        btrfs_dev_replace_lock(dev_replace);
4499        dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4500        if (!dev_replace_is_ongoing)
4501                btrfs_dev_replace_unlock(dev_replace);
4502
4503        if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4504            !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4505            dev_replace->tgtdev != NULL) {
4506                /*
4507                 * in dev-replace case, for repair case (that's the only
4508                 * case where the mirror is selected explicitly when
4509                 * calling btrfs_map_block), blocks left of the left cursor
4510                 * can also be read from the target drive.
4511                 * For REQ_GET_READ_MIRRORS, the target drive is added as
4512                 * the last one to the array of stripes. For READ, it also
4513                 * needs to be supported using the same mirror number.
4514                 * If the requested block is not left of the left cursor,
4515                 * EIO is returned. This can happen because btrfs_num_copies()
4516                 * returns one more in the dev-replace case.
4517                 */
4518                u64 tmp_length = *length;
4519                struct btrfs_bio *tmp_bbio = NULL;
4520                int tmp_num_stripes;
4521                u64 srcdev_devid = dev_replace->srcdev->devid;
4522                int index_srcdev = 0;
4523                int found = 0;
4524                u64 physical_of_found = 0;
4525
4526                ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4527                             logical, &tmp_length, &tmp_bbio, 0, NULL);
4528                if (ret) {
4529                        WARN_ON(tmp_bbio != NULL);
4530                        goto out;
4531                }
4532
4533                tmp_num_stripes = tmp_bbio->num_stripes;
4534                if (mirror_num > tmp_num_stripes) {
4535                        /*
4536                         * REQ_GET_READ_MIRRORS does not contain this
4537                         * mirror, that means that the requested area
4538                         * is not left of the left cursor
4539                         */
4540                        ret = -EIO;
4541                        kfree(tmp_bbio);
4542                        goto out;
4543                }
4544
4545                /*
4546                 * process the rest of the function using the mirror_num
4547                 * of the source drive. Therefore look it up first.
4548                 * At the end, patch the device pointer to the one of the
4549                 * target drive.
4550                 */
4551                for (i = 0; i < tmp_num_stripes; i++) {
4552                        if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4553                                /*
4554                                 * In case of DUP, in order to keep it
4555                                 * simple, only add the mirror with the
4556                                 * lowest physical address
4557                                 */
4558                                if (found &&
4559                                    physical_of_found <=
4560                                     tmp_bbio->stripes[i].physical)
4561                                        continue;
4562                                index_srcdev = i;
4563                                found = 1;
4564                                physical_of_found =
4565                                        tmp_bbio->stripes[i].physical;
4566                        }
4567                }
4568
4569                if (found) {
4570                        mirror_num = index_srcdev + 1;
4571                        patch_the_first_stripe_for_dev_replace = 1;
4572                        physical_to_patch_in_first_stripe = physical_of_found;
4573                } else {
4574                        WARN_ON(1);
4575                        ret = -EIO;
4576                        kfree(tmp_bbio);
4577                        goto out;
4578                }
4579
4580                kfree(tmp_bbio);
4581        } else if (mirror_num > map->num_stripes) {
4582                mirror_num = 0;
4583        }
4584
4585        num_stripes = 1;
4586        stripe_index = 0;
4587        stripe_nr_orig = stripe_nr;
4588        stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
4589        do_div(stripe_nr_end, map->stripe_len);
4590        stripe_end_offset = stripe_nr_end * map->stripe_len -
4591                            (offset + *length);
4592
4593        if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4594                if (rw & REQ_DISCARD)
4595                        num_stripes = min_t(u64, map->num_stripes,
4596                                            stripe_nr_end - stripe_nr_orig);
4597                stripe_index = do_div(stripe_nr, map->num_stripes);
4598        } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4599                if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4600                        num_stripes = map->num_stripes;
4601                else if (mirror_num)
4602                        stripe_index = mirror_num - 1;
4603                else {
4604                        stripe_index = find_live_mirror(fs_info, map, 0,
4605                                            map->num_stripes,
4606                                            current->pid % map->num_stripes,
4607                                            dev_replace_is_ongoing);
4608                        mirror_num = stripe_index + 1;
4609                }
4610
4611        } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4612                if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4613                        num_stripes = map->num_stripes;
4614                } else if (mirror_num) {
4615                        stripe_index = mirror_num - 1;
4616                } else {
4617                        mirror_num = 1;
4618                }
4619
4620        } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4621                int factor = map->num_stripes / map->sub_stripes;
4622
4623                stripe_index = do_div(stripe_nr, factor);
4624                stripe_index *= map->sub_stripes;
4625
4626                if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4627                        num_stripes = map->sub_stripes;
4628                else if (rw & REQ_DISCARD)
4629                        num_stripes = min_t(u64, map->sub_stripes *
4630                                            (stripe_nr_end - stripe_nr_orig),
4631                                            map->num_stripes);
4632                else if (mirror_num)
4633                        stripe_index += mirror_num - 1;
4634                else {
4635                        int old_stripe_index = stripe_index;
4636                        stripe_index = find_live_mirror(fs_info, map,
4637                                              stripe_index,
4638                                              map->sub_stripes, stripe_index +
4639                                              current->pid % map->sub_stripes,
4640                                              dev_replace_is_ongoing);
4641                        mirror_num = stripe_index - old_stripe_index + 1;
4642                }
4643
4644        } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4645                                BTRFS_BLOCK_GROUP_RAID6)) {
4646                u64 tmp;
4647
4648                if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
4649                    && raid_map_ret) {
4650                        int i, rot;
4651
4652                        /* push stripe_nr back to the start of the full stripe */
4653                        stripe_nr = raid56_full_stripe_start;
4654                        do_div(stripe_nr, stripe_len);
4655
4656                        stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4657
4658                        /* RAID[56] write or recovery. Return all stripes */
4659                        num_stripes = map->num_stripes;
4660                        max_errors = nr_parity_stripes(map);
4661
4662                        raid_map = kmalloc(sizeof(u64) * num_stripes,
4663                                           GFP_NOFS);
4664                        if (!raid_map) {
4665                                ret = -ENOMEM;
4666                                goto out;
4667                        }
4668
4669                        /* Work out the disk rotation on this stripe-set */
4670                        tmp = stripe_nr;
4671                        rot = do_div(tmp, num_stripes);
4672
4673                        /* Fill in the logical address of each stripe */
4674                        tmp = stripe_nr * nr_data_stripes(map);
4675                        for (i = 0; i < nr_data_stripes(map); i++)
4676                                raid_map[(i+rot) % num_stripes] =
4677                                        em->start + (tmp + i) * map->stripe_len;
4678
4679                        raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
4680                        if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4681                                raid_map[(i+rot+1) % num_stripes] =
4682                                        RAID6_Q_STRIPE;
4683
4684                        *length = map->stripe_len;
4685                        stripe_index = 0;
4686                        stripe_offset = 0;
4687                } else {
4688                        /*
4689                         * Mirror #0 or #1 means the original data block.
4690                         * Mirror #2 is RAID5 parity block.
4691                         * Mirror #3 is RAID6 Q block.
4692                         */
4693                        stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4694                        if (mirror_num > 1)
4695                                stripe_index = nr_data_stripes(map) +
4696                                                mirror_num - 2;
4697
4698                        /* We distribute the parity blocks across stripes */
4699                        tmp = stripe_nr + stripe_index;
4700                        stripe_index = do_div(tmp, map->num_stripes);
4701                }
4702        } else {
4703                /*
4704                 * after this do_div call, stripe_nr is the number of stripes
4705                 * on this device we have to walk to find the data, and
4706                 * stripe_index is the number of our device in the stripe array
4707                 */
4708                stripe_index = do_div(stripe_nr, map->num_stripes);
4709                mirror_num = stripe_index + 1;
4710        }
4711        BUG_ON(stripe_index >= map->num_stripes);
4712
4713        num_alloc_stripes = num_stripes;
4714        if (dev_replace_is_ongoing) {
4715                if (rw & (REQ_WRITE | REQ_DISCARD))
4716                        num_alloc_stripes <<= 1;
4717                if (rw & REQ_GET_READ_MIRRORS)
4718                        num_alloc_stripes++;
4719        }
4720        bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4721        if (!bbio) {
4722                ret = -ENOMEM;
4723                goto out;
4724        }
4725        atomic_set(&bbio->error, 0);
4726
4727        if (rw & REQ_DISCARD) {
4728                int factor = 0;
4729                int sub_stripes = 0;
4730                u64 stripes_per_dev = 0;
4731                u32 remaining_stripes = 0;
4732                u32 last_stripe = 0;
4733
4734                if (map->type &
4735                    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4736                        if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4737                                sub_stripes = 1;
4738                        else
4739                                sub_stripes = map->sub_stripes;
4740
4741                        factor = map->num_stripes / sub_stripes;
4742                        stripes_per_dev = div_u64_rem(stripe_nr_end -
4743                                                      stripe_nr_orig,
4744                                                      factor,
4745                                                      &remaining_stripes);
4746                        div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4747                        last_stripe *= sub_stripes;
4748                }
4749
4750                for (i = 0; i < num_stripes; i++) {
4751                        bbio->stripes[i].physical =
4752                                map->stripes[stripe_index].physical +
4753                                stripe_offset + stripe_nr * map->stripe_len;
4754                        bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4755
4756                        if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4757                                         BTRFS_BLOCK_GROUP_RAID10)) {
4758                                bbio->stripes[i].length = stripes_per_dev *
4759                                                          map->stripe_len;
4760
4761                                if (i / sub_stripes < remaining_stripes)
4762                                        bbio->stripes[i].length +=
4763                                                map->stripe_len;
4764
4765                                /*
4766                                 * Special for the first stripe and
4767                                 * the last stripe:
4768                                 *
4769                                 * |-------|...|-------|
4770                                 *     |----------|
4771                                 *    off     end_off
4772                                 */
4773                                if (i < sub_stripes)
4774                                        bbio->stripes[i].length -=
4775                                                stripe_offset;
4776
4777                                if (stripe_index >= last_stripe &&
4778                                    stripe_index <= (last_stripe +
4779                                                     sub_stripes - 1))
4780                                        bbio->stripes[i].length -=
4781                                                stripe_end_offset;
4782
4783                                if (i == sub_stripes - 1)
4784                                        stripe_offset = 0;
4785                        } else
4786                                bbio->stripes[i].length = *length;
4787
4788                        stripe_index++;
4789                        if (stripe_index == map->num_stripes) {
4790                                /* This could only happen for RAID0/10 */
4791                                stripe_index = 0;
4792                                stripe_nr++;
4793                        }
4794                }
4795        } else {
4796                for (i = 0; i < num_stripes; i++) {
4797                        bbio->stripes[i].physical =
4798                                map->stripes[stripe_index].physical +
4799                                stripe_offset +
4800                                stripe_nr * map->stripe_len;
4801                        bbio->stripes[i].dev =
4802                                map->stripes[stripe_index].dev;
4803                        stripe_index++;
4804                }
4805        }
4806
4807        if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
4808                if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4809                                 BTRFS_BLOCK_GROUP_RAID10 |
4810                                 BTRFS_BLOCK_GROUP_RAID5 |
4811                                 BTRFS_BLOCK_GROUP_DUP)) {
4812                        max_errors = 1;
4813                } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4814                        max_errors = 2;
4815                }
4816        }
4817
4818        if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
4819            dev_replace->tgtdev != NULL) {
4820                int index_where_to_add;
4821                u64 srcdev_devid = dev_replace->srcdev->devid;
4822
4823                /*
4824                 * duplicate the write operations while the dev replace
4825                 * procedure is running. Since the copying of the old disk
4826                 * to the new disk takes place at run time while the
4827                 * filesystem is mounted writable, the regular write
4828                 * operations to the old disk have to be duplicated to go
4829                 * to the new disk as well.
4830                 * Note that device->missing is handled by the caller, and
4831                 * that the write to the old disk is already set up in the
4832                 * stripes array.
4833                 */
4834                index_where_to_add = num_stripes;
4835                for (i = 0; i < num_stripes; i++) {
4836                        if (bbio->stripes[i].dev->devid == srcdev_devid) {
4837                                /* write to new disk, too */
4838                                struct btrfs_bio_stripe *new =
4839                                        bbio->stripes + index_where_to_add;
4840                                struct btrfs_bio_stripe *old =
4841                                        bbio->stripes + i;
4842
4843                                new->physical = old->physical;
4844                                new->length = old->length;
4845                                new->dev = dev_replace->tgtdev;
4846                                index_where_to_add++;
4847                                max_errors++;
4848                        }
4849                }
4850                num_stripes = index_where_to_add;
4851        } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
4852                   dev_replace->tgtdev != NULL) {
4853                u64 srcdev_devid = dev_replace->srcdev->devid;
4854                int index_srcdev = 0;
4855                int found = 0;
4856                u64 physical_of_found = 0;
4857
4858                /*
4859                 * During the dev-replace procedure, the target drive can
4860                 * also be used to read data in case it is needed to repair
4861                 * a corrupt block elsewhere. This is possible if the
4862                 * requested area is left of the left cursor. In this area,
4863                 * the target drive is a full copy of the source drive.
4864                 */
4865                for (i = 0; i < num_stripes; i++) {
4866                        if (bbio->stripes[i].dev->devid == srcdev_devid) {
4867                                /*
4868                                 * In case of DUP, in order to keep it
4869                                 * simple, only add the mirror with the
4870                                 * lowest physical address
4871                                 */
4872                                if (found &&
4873                                    physical_of_found <=
4874                                     bbio->stripes[i].physical)
4875                                        continue;
4876                                index_srcdev = i;
4877                                found = 1;
4878                                physical_of_found = bbio->stripes[i].physical;
4879                        }
4880                }
4881                if (found) {
4882                        u64 length = map->stripe_len;
4883
4884                        if (physical_of_found + length <=
4885                            dev_replace->cursor_left) {
4886                                struct btrfs_bio_stripe *tgtdev_stripe =
4887                                        bbio->stripes + num_stripes;
4888
4889                                tgtdev_stripe->physical = physical_of_found;
4890                                tgtdev_stripe->length =
4891                                        bbio->stripes[index_srcdev].length;
4892                                tgtdev_stripe->dev = dev_replace->tgtdev;
4893
4894                                num_stripes++;
4895                        }
4896                }
4897        }
4898
4899        *bbio_ret = bbio;
4900        bbio->num_stripes = num_stripes;
4901        bbio->max_errors = max_errors;
4902        bbio->mirror_num = mirror_num;
4903
4904        /*
4905         * this is the case that REQ_READ && dev_replace_is_ongoing &&
4906         * mirror_num == num_stripes + 1 && dev_replace target drive is
4907         * available as a mirror
4908         */
4909        if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
4910                WARN_ON(num_stripes > 1);
4911                bbio->stripes[0].dev = dev_replace->tgtdev;
4912                bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
4913                bbio->mirror_num = map->num_stripes + 1;
4914        }
4915        if (raid_map) {
4916                sort_parity_stripes(bbio, raid_map);
4917                *raid_map_ret = raid_map;
4918        }
4919out:
4920        if (dev_replace_is_ongoing)
4921                btrfs_dev_replace_unlock(dev_replace);
4922        free_extent_map(em);
4923        return ret;
4924}
4925
4926int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4927                      u64 logical, u64 *length,
4928                      struct btrfs_bio **bbio_ret, int mirror_num)
4929{
4930        return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
4931                                 mirror_num, NULL);
4932}
4933
4934int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4935                     u64 chunk_start, u64 physical, u64 devid,
4936                     u64 **logical, int *naddrs, int *stripe_len)
4937{
4938        struct extent_map_tree *em_tree = &map_tree->map_tree;
4939        struct extent_map *em;
4940        struct map_lookup *map;
4941        u64 *buf;
4942        u64 bytenr;
4943        u64 length;
4944        u64 stripe_nr;
4945        u64 rmap_len;
4946        int i, j, nr = 0;
4947
4948        read_lock(&em_tree->lock);
4949        em = lookup_extent_mapping(em_tree, chunk_start, 1);
4950        read_unlock(&em_tree->lock);
4951
4952        if (!em) {
4953                printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
4954                       chunk_start);
4955                return -EIO;
4956        }
4957
4958        if (em->start != chunk_start) {
4959                printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
4960                       em->start, chunk_start);
4961                free_extent_map(em);
4962                return -EIO;
4963        }
4964        map = (struct map_lookup *)em->bdev;
4965
4966        length = em->len;
4967        rmap_len = map->stripe_len;
4968
4969        if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4970                do_div(length, map->num_stripes / map->sub_stripes);
4971        else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4972                do_div(length, map->num_stripes);
4973        else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4974                              BTRFS_BLOCK_GROUP_RAID6)) {
4975                do_div(length, nr_data_stripes(map));
4976                rmap_len = map->stripe_len * nr_data_stripes(map);
4977        }
4978
4979        buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4980        BUG_ON(!buf); /* -ENOMEM */
4981
4982        for (i = 0; i < map->num_stripes; i++) {
4983                if (devid && map->stripes[i].dev->devid != devid)
4984                        continue;
4985                if (map->stripes[i].physical > physical ||
4986                    map->stripes[i].physical + length <= physical)
4987                        continue;
4988
4989                stripe_nr = physical - map->stripes[i].physical;
4990                do_div(stripe_nr, map->stripe_len);
4991
4992                if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4993                        stripe_nr = stripe_nr * map->num_stripes + i;
4994                        do_div(stripe_nr, map->sub_stripes);
4995                } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4996                        stripe_nr = stripe_nr * map->num_stripes + i;
4997                } /* else if RAID[56], multiply by nr_data_stripes().
4998                   * Alternatively, just use rmap_len below instead of
4999                   * map->stripe_len */
5000
5001                bytenr = chunk_start + stripe_nr * rmap_len;
5002                WARN_ON(nr >= map->num_stripes);
5003                for (j = 0; j < nr; j++) {
5004                        if (buf[j] == bytenr)
5005                                break;
5006                }
5007                if (j == nr) {
5008                        WARN_ON(nr >= map->num_stripes);
5009                        buf[nr++] = bytenr;
5010                }
5011        }
5012
5013        *logical = buf;
5014        *naddrs = nr;
5015        *stripe_len = rmap_len;
5016
5017        free_extent_map(em);
5018        return 0;
5019}
5020
5021static void btrfs_end_bio(struct bio *bio, int err)
5022{
5023        struct btrfs_bio *bbio = bio->bi_private;
5024        int is_orig_bio = 0;
5025
5026        if (err) {
5027                atomic_inc(&bbio->error);
5028                if (err == -EIO || err == -EREMOTEIO) {
5029                        unsigned int stripe_index =
5030                                btrfs_io_bio(bio)->stripe_index;
5031                        struct btrfs_device *dev;
5032
5033                        BUG_ON(stripe_index >= bbio->num_stripes);
5034                        dev = bbio->stripes[stripe_index].dev;
5035                        if (dev->bdev) {
5036                                if (bio->bi_rw & WRITE)
5037                                        btrfs_dev_stat_inc(dev,
5038                                                BTRFS_DEV_STAT_WRITE_ERRS);
5039                                else
5040                                        btrfs_dev_stat_inc(dev,
5041                                                BTRFS_DEV_STAT_READ_ERRS);
5042                                if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5043                                        btrfs_dev_stat_inc(dev,
5044                                                BTRFS_DEV_STAT_FLUSH_ERRS);
5045                                btrfs_dev_stat_print_on_error(dev);
5046                        }
5047                }
5048        }
5049
5050        if (bio == bbio->orig_bio)
5051                is_orig_bio = 1;
5052
5053        if (atomic_dec_and_test(&bbio->stripes_pending)) {
5054                if (!is_orig_bio) {
5055                        bio_put(bio);
5056                        bio = bbio->orig_bio;
5057                }
5058                bio->bi_private = bbio->private;
5059                bio->bi_end_io = bbio->end_io;
5060                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5061                /* only send an error to the higher layers if it is
5062                 * beyond the tolerance of the btrfs bio
5063                 */
5064                if (atomic_read(&bbio->error) > bbio->max_errors) {
5065                        err = -EIO;
5066                } else {
5067                        /*
5068                         * this bio is actually up to date, we didn't
5069                         * go over the max number of errors
5070                         */
5071                        set_bit(BIO_UPTODATE, &bio->bi_flags);
5072                        err = 0;
5073                }
5074                kfree(bbio);
5075
5076                bio_endio(bio, err);
5077        } else if (!is_orig_bio) {
5078                bio_put(bio);
5079        }
5080}
5081
5082struct async_sched {
5083        struct bio *bio;
5084        int rw;
5085        struct btrfs_fs_info *info;
5086        struct btrfs_work work;
5087};
5088
5089/*
5090 * see run_scheduled_bios for a description of why bios are collected for
5091 * async submit.
5092 *
5093 * This will add one bio to the pending list for a device and make sure
5094 * the work struct is scheduled.
5095 */
5096static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5097                                        struct btrfs_device *device,
5098                                        int rw, struct bio *bio)
5099{
5100        int should_queue = 1;
5101        struct btrfs_pending_bios *pending_bios;
5102
5103        if (device->missing || !device->bdev) {
5104                bio_endio(bio, -EIO);
5105                return;
5106        }
5107
5108        /* don't bother with additional async steps for reads, right now */
5109        if (!(rw & REQ_WRITE)) {
5110                bio_get(bio);
5111                btrfsic_submit_bio(rw, bio);
5112                bio_put(bio);
5113                return;
5114        }
5115
5116        /*
5117         * nr_async_bios allows us to reliably return congestion to the
5118         * higher layers.  Otherwise, the async bio makes it appear we have
5119         * made progress against dirty pages when we've really just put it
5120         * on a queue for later
5121         */
5122        atomic_inc(&root->fs_info->nr_async_bios);
5123        WARN_ON(bio->bi_next);
5124        bio->bi_next = NULL;
5125        bio->bi_rw |= rw;
5126
5127        spin_lock(&device->io_lock);
5128        if (bio->bi_rw & REQ_SYNC)
5129                pending_bios = &device->pending_sync_bios;
5130        else
5131                pending_bios = &device->pending_bios;
5132
5133        if (pending_bios->tail)
5134                pending_bios->tail->bi_next = bio;
5135
5136        pending_bios->tail = bio;
5137        if (!pending_bios->head)
5138                pending_bios->head = bio;
5139        if (device->running_pending)
5140                should_queue = 0;
5141
5142        spin_unlock(&device->io_lock);
5143
5144        if (should_queue)
5145                btrfs_queue_worker(&root->fs_info->submit_workers,
5146                                   &device->work);
5147}
5148
5149static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5150                       sector_t sector)
5151{
5152        struct bio_vec *prev;
5153        struct request_queue *q = bdev_get_queue(bdev);
5154        unsigned short max_sectors = queue_max_sectors(q);
5155        struct bvec_merge_data bvm = {
5156                .bi_bdev = bdev,
5157                .bi_sector = sector,
5158                .bi_rw = bio->bi_rw,
5159        };
5160
5161        if (bio->bi_vcnt == 0) {
5162                WARN_ON(1);
5163                return 1;
5164        }
5165
5166        prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5167        if (bio_sectors(bio) > max_sectors)
5168                return 0;
5169
5170        if (!q->merge_bvec_fn)
5171                return 1;
5172
5173        bvm.bi_size = bio->bi_size - prev->bv_len;
5174        if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5175                return 0;
5176        return 1;
5177}
5178
5179static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5180                              struct bio *bio, u64 physical, int dev_nr,
5181                              int rw, int async)
5182{
5183        struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5184
5185        bio->bi_private = bbio;
5186        btrfs_io_bio(bio)->stripe_index = dev_nr;
5187        bio->bi_end_io = btrfs_end_bio;
5188        bio->bi_sector = physical >> 9;
5189#ifdef DEBUG
5190        {
5191                struct rcu_string *name;
5192
5193                rcu_read_lock();
5194                name = rcu_dereference(dev->name);
5195                pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5196                         "(%s id %llu), size=%u\n", rw,
5197                         (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
5198                         name->str, dev->devid, bio->bi_size);
5199                rcu_read_unlock();
5200        }
5201#endif
5202        bio->bi_bdev = dev->bdev;
5203        if (async)
5204                btrfs_schedule_bio(root, dev, rw, bio);
5205        else
5206                btrfsic_submit_bio(rw, bio);
5207}
5208
5209static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5210                              struct bio *first_bio, struct btrfs_device *dev,
5211                              int dev_nr, int rw, int async)
5212{
5213        struct bio_vec *bvec = first_bio->bi_io_vec;
5214        struct bio *bio;
5215        int nr_vecs = bio_get_nr_vecs(dev->bdev);
5216        u64 physical = bbio->stripes[dev_nr].physical;
5217
5218again:
5219        bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5220        if (!bio)
5221                return -ENOMEM;
5222
5223        while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5224                if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5225                                 bvec->bv_offset) < bvec->bv_len) {
5226                        u64 len = bio->bi_size;
5227
5228                        atomic_inc(&bbio->stripes_pending);
5229                        submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5230                                          rw, async);
5231                        physical += len;
5232                        goto again;
5233                }
5234                bvec++;
5235        }
5236
5237        submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5238        return 0;
5239}
5240
5241static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5242{
5243        atomic_inc(&bbio->error);
5244        if (atomic_dec_and_test(&bbio->stripes_pending)) {
5245                bio->bi_private = bbio->private;
5246                bio->bi_end_io = bbio->end_io;
5247                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5248                bio->bi_sector = logical >> 9;
5249                kfree(bbio);
5250                bio_endio(bio, -EIO);
5251        }
5252}
5253
5254int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5255                  int mirror_num, int async_submit)
5256{
5257        struct btrfs_device *dev;
5258        struct bio *first_bio = bio;
5259        u64 logical = (u64)bio->bi_sector << 9;
5260        u64 length = 0;
5261        u64 map_length;
5262        u64 *raid_map = NULL;
5263        int ret;
5264        int dev_nr = 0;
5265        int total_devs = 1;
5266        struct btrfs_bio *bbio = NULL;
5267
5268        length = bio->bi_size;
5269        map_length = length;
5270
5271        ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5272                              mirror_num, &raid_map);
5273        if (ret) /* -ENOMEM */
5274                return ret;
5275
5276        total_devs = bbio->num_stripes;
5277        bbio->orig_bio = first_bio;
5278        bbio->private = first_bio->bi_private;
5279        bbio->end_io = first_bio->bi_end_io;
5280        atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5281
5282        if (raid_map) {
5283                /* In this case, map_length has been set to the length of
5284                   a single stripe; not the whole write */
5285                if (rw & WRITE) {
5286                        return raid56_parity_write(root, bio, bbio,
5287                                                   raid_map, map_length);
5288                } else {
5289                        return raid56_parity_recover(root, bio, bbio,
5290                                                     raid_map, map_length,
5291                                                     mirror_num);
5292                }
5293        }
5294
5295        if (map_length < length) {
5296                btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5297                        (unsigned long long)logical,
5298                        (unsigned long long)length,
5299                        (unsigned long long)map_length);
5300                BUG();
5301        }
5302
5303        while (dev_nr < total_devs) {
5304                dev = bbio->stripes[dev_nr].dev;
5305                if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5306                        bbio_error(bbio, first_bio, logical);
5307                        dev_nr++;
5308                        continue;
5309                }
5310
5311                /*
5312                 * Check and see if we're ok with this bio based on it's size
5313                 * and offset with the given device.
5314                 */
5315                if (!bio_size_ok(dev->bdev, first_bio,
5316                                 bbio->stripes[dev_nr].physical >> 9)) {
5317                        ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5318                                                 dev_nr, rw, async_submit);
5319                        BUG_ON(ret);
5320                        dev_nr++;
5321                        continue;
5322                }
5323
5324                if (dev_nr < total_devs - 1) {
5325                        bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5326                        BUG_ON(!bio); /* -ENOMEM */
5327                } else {
5328                        bio = first_bio;
5329                }
5330
5331                submit_stripe_bio(root, bbio, bio,
5332                                  bbio->stripes[dev_nr].physical, dev_nr, rw,
5333                                  async_submit);
5334                dev_nr++;
5335        }
5336        return 0;
5337}
5338
5339struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5340                                       u8 *uuid, u8 *fsid)
5341{
5342        struct btrfs_device *device;
5343        struct btrfs_fs_devices *cur_devices;
5344
5345        cur_devices = fs_info->fs_devices;
5346        while (cur_devices) {
5347                if (!fsid ||
5348                    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5349                        device = __find_device(&cur_devices->devices,
5350                                               devid, uuid);
5351                        if (device)
5352                                return device;
5353                }
5354                cur_devices = cur_devices->seed;
5355        }
5356        return NULL;
5357}
5358
5359static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5360                                            u64 devid, u8 *dev_uuid)
5361{
5362        struct btrfs_device *device;
5363        struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5364
5365        device = kzalloc(sizeof(*device), GFP_NOFS);
5366        if (!device)
5367                return NULL;
5368        list_add(&device->dev_list,
5369                 &fs_devices->devices);
5370        device->dev_root = root->fs_info->dev_root;
5371        device->devid = devid;
5372        device->work.func = pending_bios_fn;
5373        device->fs_devices = fs_devices;
5374        device->missing = 1;
5375        fs_devices->num_devices++;
5376        fs_devices->missing_devices++;
5377        spin_lock_init(&device->io_lock);
5378        INIT_LIST_HEAD(&device->dev_alloc_list);
5379        memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
5380        return device;
5381}
5382
5383static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5384                          struct extent_buffer *leaf,
5385                          struct btrfs_chunk *chunk)
5386{
5387        struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5388        struct map_lookup *map;
5389        struct extent_map *em;
5390        u64 logical;
5391        u64 length;
5392        u64 devid;
5393        u8 uuid[BTRFS_UUID_SIZE];
5394        int num_stripes;
5395        int ret;
5396        int i;
5397
5398        logical = key->offset;
5399        length = btrfs_chunk_length(leaf, chunk);
5400
5401        read_lock(&map_tree->map_tree.lock);
5402        em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
5403        read_unlock(&map_tree->map_tree.lock);
5404
5405        /* already mapped? */
5406        if (em && em->start <= logical && em->start + em->len > logical) {
5407                free_extent_map(em);
5408                return 0;
5409        } else if (em) {
5410                free_extent_map(em);
5411        }
5412
5413        em = alloc_extent_map();
5414        if (!em)
5415                return -ENOMEM;
5416        num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
5417        map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5418        if (!map) {
5419                free_extent_map(em);
5420                return -ENOMEM;
5421        }
5422
5423        em->bdev = (struct block_device *)map;
5424        em->start = logical;
5425        em->len = length;
5426        em->orig_start = 0;
5427        em->block_start = 0;
5428        em->block_len = em->len;
5429
5430        map->num_stripes = num_stripes;
5431        map->io_width = btrfs_chunk_io_width(leaf, chunk);
5432        map->io_align = btrfs_chunk_io_align(leaf, chunk);
5433        map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
5434        map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
5435        map->type = btrfs_chunk_type(leaf, chunk);
5436        map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
5437        for (i = 0; i < num_stripes; i++) {
5438                map->stripes[i].physical =
5439                        btrfs_stripe_offset_nr(leaf, chunk, i);
5440                devid = btrfs_stripe_devid_nr(leaf, chunk, i);
5441                read_extent_buffer(leaf, uuid, (unsigned long)
5442                                   btrfs_stripe_dev_uuid_nr(chunk, i),
5443                                   BTRFS_UUID_SIZE);
5444                map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5445                                                        uuid, NULL);
5446                if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5447                        kfree(map);
5448                        free_extent_map(em);
5449                        return -EIO;
5450                }
5451                if (!map->stripes[i].dev) {
5452                        map->stripes[i].dev =
5453                                add_missing_dev(root, devid, uuid);
5454                        if (!map->stripes[i].dev) {
5455                                kfree(map);
5456                                free_extent_map(em);
5457                                return -EIO;
5458                        }
5459                }
5460                map->stripes[i].dev->in_fs_metadata = 1;
5461        }
5462
5463        write_lock(&map_tree->map_tree.lock);
5464        ret = add_extent_mapping(&map_tree->map_tree, em, 0);
5465        write_unlock(&map_tree->map_tree.lock);
5466        BUG_ON(ret); /* Tree corruption */
5467        free_extent_map(em);
5468
5469        return 0;
5470}
5471
5472static void fill_device_from_item(struct extent_buffer *leaf,
5473                                 struct btrfs_dev_item *dev_item,
5474                                 struct btrfs_device *device)
5475{
5476        unsigned long ptr;
5477
5478        device->devid = btrfs_device_id(leaf, dev_item);
5479        device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5480        device->total_bytes = device->disk_total_bytes;
5481        device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5482        device->type = btrfs_device_type(leaf, dev_item);
5483        device->io_align = btrfs_device_io_align(leaf, dev_item);
5484        device->io_width = btrfs_device_io_width(leaf, dev_item);
5485        device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5486        WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5487        device->is_tgtdev_for_dev_replace = 0;
5488
5489        ptr = (unsigned long)btrfs_device_uuid(dev_item);
5490        read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5491}
5492
5493static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5494{
5495        struct btrfs_fs_devices *fs_devices;
5496        int ret;
5497
5498        BUG_ON(!mutex_is_locked(&uuid_mutex));
5499
5500        fs_devices = root->fs_info->fs_devices->seed;
5501        while (fs_devices) {
5502                if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5503                        ret = 0;
5504                        goto out;
5505                }
5506                fs_devices = fs_devices->seed;
5507        }
5508
5509        fs_devices = find_fsid(fsid);
5510        if (!fs_devices) {
5511                ret = -ENOENT;
5512                goto out;
5513        }
5514
5515        fs_devices = clone_fs_devices(fs_devices);
5516        if (IS_ERR(fs_devices)) {
5517                ret = PTR_ERR(fs_devices);
5518                goto out;
5519        }
5520
5521        ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5522                                   root->fs_info->bdev_holder);
5523        if (ret) {
5524                free_fs_devices(fs_devices);
5525                goto out;
5526        }
5527
5528        if (!fs_devices->seeding) {
5529                __btrfs_close_devices(fs_devices);
5530                free_fs_devices(fs_devices);
5531                ret = -EINVAL;
5532                goto out;
5533        }
5534
5535        fs_devices->seed = root->fs_info->fs_devices->seed;
5536        root->fs_info->fs_devices->seed = fs_devices;
5537out:
5538        return ret;
5539}
5540
5541static int read_one_dev(struct btrfs_root *root,
5542                        struct extent_buffer *leaf,
5543                        struct btrfs_dev_item *dev_item)
5544{
5545        struct btrfs_device *device;
5546        u64 devid;
5547        int ret;
5548        u8 fs_uuid[BTRFS_UUID_SIZE];
5549        u8 dev_uuid[BTRFS_UUID_SIZE];
5550
5551        devid = btrfs_device_id(leaf, dev_item);
5552        read_extent_buffer(leaf, dev_uuid,
5553                           (unsigned long)btrfs_device_uuid(dev_item),
5554                           BTRFS_UUID_SIZE);
5555        read_extent_buffer(leaf, fs_uuid,
5556                           (unsigned long)btrfs_device_fsid(dev_item),
5557                           BTRFS_UUID_SIZE);
5558
5559        if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5560                ret = open_seed_devices(root, fs_uuid);
5561                if (ret && !btrfs_test_opt(root, DEGRADED))
5562                        return ret;
5563        }
5564
5565        device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5566        if (!device || !device->bdev) {
5567                if (!btrfs_test_opt(root, DEGRADED))
5568                        return -EIO;
5569
5570                if (!device) {
5571                        btrfs_warn(root->fs_info, "devid %llu missing",
5572                                (unsigned long long)devid);
5573                        device = add_missing_dev(root, devid, dev_uuid);
5574                        if (!device)
5575                                return -ENOMEM;
5576                } else if (!device->missing) {
5577                        /*
5578                         * this happens when a device that was properly setup
5579                         * in the device info lists suddenly goes bad.
5580                         * device->bdev is NULL, and so we have to set
5581                         * device->missing to one here
5582                         */
5583                        root->fs_info->fs_devices->missing_devices++;
5584                        device->missing = 1;
5585                }
5586        }
5587
5588        if (device->fs_devices != root->fs_info->fs_devices) {
5589                BUG_ON(device->writeable);
5590                if (device->generation !=
5591                    btrfs_device_generation(leaf, dev_item))
5592                        return -EINVAL;
5593        }
5594
5595        fill_device_from_item(leaf, dev_item, device);
5596        device->dev_root = root->fs_info->dev_root;
5597        device->in_fs_metadata = 1;
5598        if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5599                device->fs_devices->total_rw_bytes += device->total_bytes;
5600                spin_lock(&root->fs_info->free_chunk_lock);
5601                root->fs_info->free_chunk_space += device->total_bytes -
5602                        device->bytes_used;
5603                spin_unlock(&root->fs_info->free_chunk_lock);
5604        }
5605        ret = 0;
5606        return ret;
5607}
5608
5609int btrfs_read_sys_array(struct btrfs_root *root)
5610{
5611        struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5612        struct extent_buffer *sb;
5613        struct btrfs_disk_key *disk_key;
5614        struct btrfs_chunk *chunk;
5615        u8 *ptr;
5616        unsigned long sb_ptr;
5617        int ret = 0;
5618        u32 num_stripes;
5619        u32 array_size;
5620        u32 len = 0;
5621        u32 cur;
5622        struct btrfs_key key;
5623
5624        sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5625                                          BTRFS_SUPER_INFO_SIZE);
5626        if (!sb)
5627                return -ENOMEM;
5628        btrfs_set_buffer_uptodate(sb);
5629        btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5630        /*
5631         * The sb extent buffer is artifical and just used to read the system array.
5632         * btrfs_set_buffer_uptodate() call does not properly mark all it's
5633         * pages up-to-date when the page is larger: extent does not cover the
5634         * whole page and consequently check_page_uptodate does not find all
5635         * the page's extents up-to-date (the hole beyond sb),
5636         * write_extent_buffer then triggers a WARN_ON.
5637         *
5638         * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5639         * but sb spans only this function. Add an explicit SetPageUptodate call
5640         * to silence the warning eg. on PowerPC 64.
5641         */
5642        if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5643                SetPageUptodate(sb->pages[0]);
5644
5645        write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5646        array_size = btrfs_super_sys_array_size(super_copy);
5647
5648        ptr = super_copy->sys_chunk_array;
5649        sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5650        cur = 0;
5651
5652        while (cur < array_size) {
5653                disk_key = (struct btrfs_disk_key *)ptr;
5654                btrfs_disk_key_to_cpu(&key, disk_key);
5655
5656                len = sizeof(*disk_key); ptr += len;
5657                sb_ptr += len;
5658                cur += len;
5659
5660                if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5661                        chunk = (struct btrfs_chunk *)sb_ptr;
5662                        ret = read_one_chunk(root, &key, sb, chunk);
5663                        if (ret)
5664                                break;
5665                        num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5666                        len = btrfs_chunk_item_size(num_stripes);
5667                } else {
5668                        ret = -EIO;
5669                        break;
5670                }
5671                ptr += len;
5672                sb_ptr += len;
5673                cur += len;
5674        }
5675        free_extent_buffer(sb);
5676        return ret;
5677}
5678
5679int btrfs_read_chunk_tree(struct btrfs_root *root)
5680{
5681        struct btrfs_path *path;
5682        struct extent_buffer *leaf;
5683        struct btrfs_key key;
5684        struct btrfs_key found_key;
5685        int ret;
5686        int slot;
5687
5688        root = root->fs_info->chunk_root;
5689
5690        path = btrfs_alloc_path();
5691        if (!path)
5692                return -ENOMEM;
5693
5694        mutex_lock(&uuid_mutex);
5695        lock_chunks(root);
5696
5697        /* first we search for all of the device items, and then we
5698         * read in all of the chunk items.  This way we can create chunk
5699         * mappings that reference all of the devices that are afound
5700         */
5701        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5702        key.offset = 0;
5703        key.type = 0;
5704again:
5705        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5706        if (ret < 0)
5707                goto error;
5708        while (1) {
5709                leaf = path->nodes[0];
5710                slot = path->slots[0];
5711                if (slot >= btrfs_header_nritems(leaf)) {
5712                        ret = btrfs_next_leaf(root, path);
5713                        if (ret == 0)
5714                                continue;
5715                        if (ret < 0)
5716                                goto error;
5717                        break;
5718                }
5719                btrfs_item_key_to_cpu(leaf, &found_key, slot);
5720                if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5721                        if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
5722                                break;
5723                        if (found_key.type == BTRFS_DEV_ITEM_KEY) {
5724                                struct btrfs_dev_item *dev_item;
5725                                dev_item = btrfs_item_ptr(leaf, slot,
5726                                                  struct btrfs_dev_item);
5727                                ret = read_one_dev(root, leaf, dev_item);
5728                                if (ret)
5729                                        goto error;
5730                        }
5731                } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
5732                        struct btrfs_chunk *chunk;
5733                        chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
5734                        ret = read_one_chunk(root, &found_key, leaf, chunk);
5735                        if (ret)
5736                                goto error;
5737                }
5738                path->slots[0]++;
5739        }
5740        if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5741                key.objectid = 0;
5742                btrfs_release_path(path);
5743                goto again;
5744        }
5745        ret = 0;
5746error:
5747        unlock_chunks(root);
5748        mutex_unlock(&uuid_mutex);
5749
5750        btrfs_free_path(path);
5751        return ret;
5752}
5753
5754static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
5755{
5756        int i;
5757
5758        for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5759                btrfs_dev_stat_reset(dev, i);
5760}
5761
5762int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
5763{
5764        struct btrfs_key key;
5765        struct btrfs_key found_key;
5766        struct btrfs_root *dev_root = fs_info->dev_root;
5767        struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5768        struct extent_buffer *eb;
5769        int slot;
5770        int ret = 0;
5771        struct btrfs_device *device;
5772        struct btrfs_path *path = NULL;
5773        int i;
5774
5775        path = btrfs_alloc_path();
5776        if (!path) {
5777                ret = -ENOMEM;
5778                goto out;
5779        }
5780
5781        mutex_lock(&fs_devices->device_list_mutex);
5782        list_for_each_entry(device, &fs_devices->devices, dev_list) {
5783                int item_size;
5784                struct btrfs_dev_stats_item *ptr;
5785
5786                key.objectid = 0;
5787                key.type = BTRFS_DEV_STATS_KEY;
5788                key.offset = device->devid;
5789                ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
5790                if (ret) {
5791                        __btrfs_reset_dev_stats(device);
5792                        device->dev_stats_valid = 1;
5793                        btrfs_release_path(path);
5794                        continue;
5795                }
5796                slot = path->slots[0];
5797                eb = path->nodes[0];
5798                btrfs_item_key_to_cpu(eb, &found_key, slot);
5799                item_size = btrfs_item_size_nr(eb, slot);
5800
5801                ptr = btrfs_item_ptr(eb, slot,
5802                                     struct btrfs_dev_stats_item);
5803
5804                for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5805                        if (item_size >= (1 + i) * sizeof(__le64))
5806                                btrfs_dev_stat_set(device, i,
5807                                        btrfs_dev_stats_value(eb, ptr, i));
5808                        else
5809                                btrfs_dev_stat_reset(device, i);
5810                }
5811
5812                device->dev_stats_valid = 1;
5813                btrfs_dev_stat_print_on_load(device);
5814                btrfs_release_path(path);
5815        }
5816        mutex_unlock(&fs_devices->device_list_mutex);
5817
5818out:
5819        btrfs_free_path(path);
5820        return ret < 0 ? ret : 0;
5821}
5822
5823static int update_dev_stat_item(struct btrfs_trans_handle *trans,
5824                                struct btrfs_root *dev_root,
5825                                struct btrfs_device *device)
5826{
5827        struct btrfs_path *path;
5828        struct btrfs_key key;
5829        struct extent_buffer *eb;
5830        struct btrfs_dev_stats_item *ptr;
5831        int ret;
5832        int i;
5833
5834        key.objectid = 0;
5835        key.type = BTRFS_DEV_STATS_KEY;
5836        key.offset = device->devid;
5837
5838        path = btrfs_alloc_path();
5839        BUG_ON(!path);
5840        ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
5841        if (ret < 0) {
5842                printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
5843                              ret, rcu_str_deref(device->name));
5844                goto out;
5845        }
5846
5847        if (ret == 0 &&
5848            btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
5849                /* need to delete old one and insert a new one */
5850                ret = btrfs_del_item(trans, dev_root, path);
5851                if (ret != 0) {
5852                        printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
5853                                      rcu_str_deref(device->name), ret);
5854                        goto out;
5855                }
5856                ret = 1;
5857        }
5858
5859        if (ret == 1) {
5860                /* need to insert a new item */
5861                btrfs_release_path(path);
5862                ret = btrfs_insert_empty_item(trans, dev_root, path,
5863                                              &key, sizeof(*ptr));
5864                if (ret < 0) {
5865                        printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
5866                                      rcu_str_deref(device->name), ret);
5867                        goto out;
5868                }
5869        }
5870
5871        eb = path->nodes[0];
5872        ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
5873        for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5874                btrfs_set_dev_stats_value(eb, ptr, i,
5875                                          btrfs_dev_stat_read(device, i));
5876        btrfs_mark_buffer_dirty(eb);
5877
5878out:
5879        btrfs_free_path(path);
5880        return ret;
5881}
5882
5883/*
5884 * called from commit_transaction. Writes all changed device stats to disk.
5885 */
5886int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
5887                        struct btrfs_fs_info *fs_info)
5888{
5889        struct btrfs_root *dev_root = fs_info->dev_root;
5890        struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5891        struct btrfs_device *device;
5892        int ret = 0;
5893
5894        mutex_lock(&fs_devices->device_list_mutex);
5895        list_for_each_entry(device, &fs_devices->devices, dev_list) {
5896                if (!device->dev_stats_valid || !device->dev_stats_dirty)
5897                        continue;
5898
5899                ret = update_dev_stat_item(trans, dev_root, device);
5900                if (!ret)
5901                        device->dev_stats_dirty = 0;
5902        }
5903        mutex_unlock(&fs_devices->device_list_mutex);
5904
5905        return ret;
5906}
5907
5908void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
5909{
5910        btrfs_dev_stat_inc(dev, index);
5911        btrfs_dev_stat_print_on_error(dev);
5912}
5913
5914static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
5915{
5916        if (!dev->dev_stats_valid)
5917                return;
5918        printk_ratelimited_in_rcu(KERN_ERR
5919                           "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5920                           rcu_str_deref(dev->name),
5921                           btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5922                           btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5923                           btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5924                           btrfs_dev_stat_read(dev,
5925                                               BTRFS_DEV_STAT_CORRUPTION_ERRS),
5926                           btrfs_dev_stat_read(dev,
5927                                               BTRFS_DEV_STAT_GENERATION_ERRS));
5928}
5929
5930static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
5931{
5932        int i;
5933
5934        for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5935                if (btrfs_dev_stat_read(dev, i) != 0)
5936                        break;
5937        if (i == BTRFS_DEV_STAT_VALUES_MAX)
5938                return; /* all values == 0, suppress message */
5939
5940        printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5941               rcu_str_deref(dev->name),
5942               btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5943               btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5944               btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5945               btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
5946               btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
5947}
5948
5949int btrfs_get_dev_stats(struct btrfs_root *root,
5950                        struct btrfs_ioctl_get_dev_stats *stats)
5951{
5952        struct btrfs_device *dev;
5953        struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5954        int i;
5955
5956        mutex_lock(&fs_devices->device_list_mutex);
5957        dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
5958        mutex_unlock(&fs_devices->device_list_mutex);
5959
5960        if (!dev) {
5961                printk(KERN_WARNING
5962                       "btrfs: get dev_stats failed, device not found\n");
5963                return -ENODEV;
5964        } else if (!dev->dev_stats_valid) {
5965                printk(KERN_WARNING
5966                       "btrfs: get dev_stats failed, not yet valid\n");
5967                return -ENODEV;
5968        } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
5969                for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5970                        if (stats->nr_items > i)
5971                                stats->values[i] =
5972                                        btrfs_dev_stat_read_and_reset(dev, i);
5973                        else
5974                                btrfs_dev_stat_reset(dev, i);
5975                }
5976        } else {
5977                for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5978                        if (stats->nr_items > i)
5979                                stats->values[i] = btrfs_dev_stat_read(dev, i);
5980        }
5981        if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
5982                stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
5983        return 0;
5984}
5985
5986int btrfs_scratch_superblock(struct btrfs_device *device)
5987{
5988        struct buffer_head *bh;
5989        struct btrfs_super_block *disk_super;
5990
5991        bh = btrfs_read_dev_super(device->bdev);
5992        if (!bh)
5993                return -EINVAL;
5994        disk_super = (struct btrfs_super_block *)bh->b_data;
5995
5996        memset(&disk_super->magic, 0, sizeof(disk_super->magic));
5997        set_buffer_dirty(bh);
5998        sync_dirty_buffer(bh);
5999        brelse(bh);
6000
6001        return 0;
6002}
6003