linux/fs/btrfs/file.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
   8#include <linux/time.h>
   9#include <linux/init.h>
  10#include <linux/string.h>
  11#include <linux/backing-dev.h>
  12#include <linux/falloc.h>
  13#include <linux/writeback.h>
  14#include <linux/compat.h>
  15#include <linux/slab.h>
  16#include <linux/btrfs.h>
  17#include <linux/uio.h>
  18#include <linux/iversion.h>
  19#include "ctree.h"
  20#include "disk-io.h"
  21#include "transaction.h"
  22#include "btrfs_inode.h"
  23#include "print-tree.h"
  24#include "tree-log.h"
  25#include "locking.h"
  26#include "volumes.h"
  27#include "qgroup.h"
  28#include "compression.h"
  29
  30static struct kmem_cache *btrfs_inode_defrag_cachep;
  31/*
  32 * when auto defrag is enabled we
  33 * queue up these defrag structs to remember which
  34 * inodes need defragging passes
  35 */
  36struct inode_defrag {
  37        struct rb_node rb_node;
  38        /* objectid */
  39        u64 ino;
  40        /*
  41         * transid where the defrag was added, we search for
  42         * extents newer than this
  43         */
  44        u64 transid;
  45
  46        /* root objectid */
  47        u64 root;
  48
  49        /* last offset we were able to defrag */
  50        u64 last_offset;
  51
  52        /* if we've wrapped around back to zero once already */
  53        int cycled;
  54};
  55
  56static int __compare_inode_defrag(struct inode_defrag *defrag1,
  57                                  struct inode_defrag *defrag2)
  58{
  59        if (defrag1->root > defrag2->root)
  60                return 1;
  61        else if (defrag1->root < defrag2->root)
  62                return -1;
  63        else if (defrag1->ino > defrag2->ino)
  64                return 1;
  65        else if (defrag1->ino < defrag2->ino)
  66                return -1;
  67        else
  68                return 0;
  69}
  70
  71/* pop a record for an inode into the defrag tree.  The lock
  72 * must be held already
  73 *
  74 * If you're inserting a record for an older transid than an
  75 * existing record, the transid already in the tree is lowered
  76 *
  77 * If an existing record is found the defrag item you
  78 * pass in is freed
  79 */
  80static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
  81                                    struct inode_defrag *defrag)
  82{
  83        struct btrfs_fs_info *fs_info = inode->root->fs_info;
  84        struct inode_defrag *entry;
  85        struct rb_node **p;
  86        struct rb_node *parent = NULL;
  87        int ret;
  88
  89        p = &fs_info->defrag_inodes.rb_node;
  90        while (*p) {
  91                parent = *p;
  92                entry = rb_entry(parent, struct inode_defrag, rb_node);
  93
  94                ret = __compare_inode_defrag(defrag, entry);
  95                if (ret < 0)
  96                        p = &parent->rb_left;
  97                else if (ret > 0)
  98                        p = &parent->rb_right;
  99                else {
 100                        /* if we're reinserting an entry for
 101                         * an old defrag run, make sure to
 102                         * lower the transid of our existing record
 103                         */
 104                        if (defrag->transid < entry->transid)
 105                                entry->transid = defrag->transid;
 106                        if (defrag->last_offset > entry->last_offset)
 107                                entry->last_offset = defrag->last_offset;
 108                        return -EEXIST;
 109                }
 110        }
 111        set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
 112        rb_link_node(&defrag->rb_node, parent, p);
 113        rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
 114        return 0;
 115}
 116
 117static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
 118{
 119        if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
 120                return 0;
 121
 122        if (btrfs_fs_closing(fs_info))
 123                return 0;
 124
 125        return 1;
 126}
 127
 128/*
 129 * insert a defrag record for this inode if auto defrag is
 130 * enabled
 131 */
 132int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 133                           struct btrfs_inode *inode)
 134{
 135        struct btrfs_root *root = inode->root;
 136        struct btrfs_fs_info *fs_info = root->fs_info;
 137        struct inode_defrag *defrag;
 138        u64 transid;
 139        int ret;
 140
 141        if (!__need_auto_defrag(fs_info))
 142                return 0;
 143
 144        if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
 145                return 0;
 146
 147        if (trans)
 148                transid = trans->transid;
 149        else
 150                transid = inode->root->last_trans;
 151
 152        defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
 153        if (!defrag)
 154                return -ENOMEM;
 155
 156        defrag->ino = btrfs_ino(inode);
 157        defrag->transid = transid;
 158        defrag->root = root->root_key.objectid;
 159
 160        spin_lock(&fs_info->defrag_inodes_lock);
 161        if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
 162                /*
 163                 * If we set IN_DEFRAG flag and evict the inode from memory,
 164                 * and then re-read this inode, this new inode doesn't have
 165                 * IN_DEFRAG flag. At the case, we may find the existed defrag.
 166                 */
 167                ret = __btrfs_add_inode_defrag(inode, defrag);
 168                if (ret)
 169                        kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 170        } else {
 171                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 172        }
 173        spin_unlock(&fs_info->defrag_inodes_lock);
 174        return 0;
 175}
 176
 177/*
 178 * Requeue the defrag object. If there is a defrag object that points to
 179 * the same inode in the tree, we will merge them together (by
 180 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
 181 */
 182static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
 183                                       struct inode_defrag *defrag)
 184{
 185        struct btrfs_fs_info *fs_info = inode->root->fs_info;
 186        int ret;
 187
 188        if (!__need_auto_defrag(fs_info))
 189                goto out;
 190
 191        /*
 192         * Here we don't check the IN_DEFRAG flag, because we need merge
 193         * them together.
 194         */
 195        spin_lock(&fs_info->defrag_inodes_lock);
 196        ret = __btrfs_add_inode_defrag(inode, defrag);
 197        spin_unlock(&fs_info->defrag_inodes_lock);
 198        if (ret)
 199                goto out;
 200        return;
 201out:
 202        kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 203}
 204
 205/*
 206 * pick the defragable inode that we want, if it doesn't exist, we will get
 207 * the next one.
 208 */
 209static struct inode_defrag *
 210btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
 211{
 212        struct inode_defrag *entry = NULL;
 213        struct inode_defrag tmp;
 214        struct rb_node *p;
 215        struct rb_node *parent = NULL;
 216        int ret;
 217
 218        tmp.ino = ino;
 219        tmp.root = root;
 220
 221        spin_lock(&fs_info->defrag_inodes_lock);
 222        p = fs_info->defrag_inodes.rb_node;
 223        while (p) {
 224                parent = p;
 225                entry = rb_entry(parent, struct inode_defrag, rb_node);
 226
 227                ret = __compare_inode_defrag(&tmp, entry);
 228                if (ret < 0)
 229                        p = parent->rb_left;
 230                else if (ret > 0)
 231                        p = parent->rb_right;
 232                else
 233                        goto out;
 234        }
 235
 236        if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
 237                parent = rb_next(parent);
 238                if (parent)
 239                        entry = rb_entry(parent, struct inode_defrag, rb_node);
 240                else
 241                        entry = NULL;
 242        }
 243out:
 244        if (entry)
 245                rb_erase(parent, &fs_info->defrag_inodes);
 246        spin_unlock(&fs_info->defrag_inodes_lock);
 247        return entry;
 248}
 249
 250void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
 251{
 252        struct inode_defrag *defrag;
 253        struct rb_node *node;
 254
 255        spin_lock(&fs_info->defrag_inodes_lock);
 256        node = rb_first(&fs_info->defrag_inodes);
 257        while (node) {
 258                rb_erase(node, &fs_info->defrag_inodes);
 259                defrag = rb_entry(node, struct inode_defrag, rb_node);
 260                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 261
 262                cond_resched_lock(&fs_info->defrag_inodes_lock);
 263
 264                node = rb_first(&fs_info->defrag_inodes);
 265        }
 266        spin_unlock(&fs_info->defrag_inodes_lock);
 267}
 268
 269#define BTRFS_DEFRAG_BATCH      1024
 270
 271static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
 272                                    struct inode_defrag *defrag)
 273{
 274        struct btrfs_root *inode_root;
 275        struct inode *inode;
 276        struct btrfs_key key;
 277        struct btrfs_ioctl_defrag_range_args range;
 278        int num_defrag;
 279        int index;
 280        int ret;
 281
 282        /* get the inode */
 283        key.objectid = defrag->root;
 284        key.type = BTRFS_ROOT_ITEM_KEY;
 285        key.offset = (u64)-1;
 286
 287        index = srcu_read_lock(&fs_info->subvol_srcu);
 288
 289        inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
 290        if (IS_ERR(inode_root)) {
 291                ret = PTR_ERR(inode_root);
 292                goto cleanup;
 293        }
 294
 295        key.objectid = defrag->ino;
 296        key.type = BTRFS_INODE_ITEM_KEY;
 297        key.offset = 0;
 298        inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
 299        if (IS_ERR(inode)) {
 300                ret = PTR_ERR(inode);
 301                goto cleanup;
 302        }
 303        srcu_read_unlock(&fs_info->subvol_srcu, index);
 304
 305        /* do a chunk of defrag */
 306        clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 307        memset(&range, 0, sizeof(range));
 308        range.len = (u64)-1;
 309        range.start = defrag->last_offset;
 310
 311        sb_start_write(fs_info->sb);
 312        num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
 313                                       BTRFS_DEFRAG_BATCH);
 314        sb_end_write(fs_info->sb);
 315        /*
 316         * if we filled the whole defrag batch, there
 317         * must be more work to do.  Queue this defrag
 318         * again
 319         */
 320        if (num_defrag == BTRFS_DEFRAG_BATCH) {
 321                defrag->last_offset = range.start;
 322                btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 323        } else if (defrag->last_offset && !defrag->cycled) {
 324                /*
 325                 * we didn't fill our defrag batch, but
 326                 * we didn't start at zero.  Make sure we loop
 327                 * around to the start of the file.
 328                 */
 329                defrag->last_offset = 0;
 330                defrag->cycled = 1;
 331                btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 332        } else {
 333                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 334        }
 335
 336        iput(inode);
 337        return 0;
 338cleanup:
 339        srcu_read_unlock(&fs_info->subvol_srcu, index);
 340        kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 341        return ret;
 342}
 343
 344/*
 345 * run through the list of inodes in the FS that need
 346 * defragging
 347 */
 348int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 349{
 350        struct inode_defrag *defrag;
 351        u64 first_ino = 0;
 352        u64 root_objectid = 0;
 353
 354        atomic_inc(&fs_info->defrag_running);
 355        while (1) {
 356                /* Pause the auto defragger. */
 357                if (test_bit(BTRFS_FS_STATE_REMOUNTING,
 358                             &fs_info->fs_state))
 359                        break;
 360
 361                if (!__need_auto_defrag(fs_info))
 362                        break;
 363
 364                /* find an inode to defrag */
 365                defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
 366                                                 first_ino);
 367                if (!defrag) {
 368                        if (root_objectid || first_ino) {
 369                                root_objectid = 0;
 370                                first_ino = 0;
 371                                continue;
 372                        } else {
 373                                break;
 374                        }
 375                }
 376
 377                first_ino = defrag->ino + 1;
 378                root_objectid = defrag->root;
 379
 380                __btrfs_run_defrag_inode(fs_info, defrag);
 381        }
 382        atomic_dec(&fs_info->defrag_running);
 383
 384        /*
 385         * during unmount, we use the transaction_wait queue to
 386         * wait for the defragger to stop
 387         */
 388        wake_up(&fs_info->transaction_wait);
 389        return 0;
 390}
 391
 392/* simple helper to fault in pages and copy.  This should go away
 393 * and be replaced with calls into generic code.
 394 */
 395static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
 396                                         struct page **prepared_pages,
 397                                         struct iov_iter *i)
 398{
 399        size_t copied = 0;
 400        size_t total_copied = 0;
 401        int pg = 0;
 402        int offset = pos & (PAGE_SIZE - 1);
 403
 404        while (write_bytes > 0) {
 405                size_t count = min_t(size_t,
 406                                     PAGE_SIZE - offset, write_bytes);
 407                struct page *page = prepared_pages[pg];
 408                /*
 409                 * Copy data from userspace to the current page
 410                 */
 411                copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
 412
 413                /* Flush processor's dcache for this page */
 414                flush_dcache_page(page);
 415
 416                /*
 417                 * if we get a partial write, we can end up with
 418                 * partially up to date pages.  These add
 419                 * a lot of complexity, so make sure they don't
 420                 * happen by forcing this copy to be retried.
 421                 *
 422                 * The rest of the btrfs_file_write code will fall
 423                 * back to page at a time copies after we return 0.
 424                 */
 425                if (!PageUptodate(page) && copied < count)
 426                        copied = 0;
 427
 428                iov_iter_advance(i, copied);
 429                write_bytes -= copied;
 430                total_copied += copied;
 431
 432                /* Return to btrfs_file_write_iter to fault page */
 433                if (unlikely(copied == 0))
 434                        break;
 435
 436                if (copied < PAGE_SIZE - offset) {
 437                        offset += copied;
 438                } else {
 439                        pg++;
 440                        offset = 0;
 441                }
 442        }
 443        return total_copied;
 444}
 445
 446/*
 447 * unlocks pages after btrfs_file_write is done with them
 448 */
 449static void btrfs_drop_pages(struct page **pages, size_t num_pages)
 450{
 451        size_t i;
 452        for (i = 0; i < num_pages; i++) {
 453                /* page checked is some magic around finding pages that
 454                 * have been modified without going through btrfs_set_page_dirty
 455                 * clear it here. There should be no need to mark the pages
 456                 * accessed as prepare_pages should have marked them accessed
 457                 * in prepare_pages via find_or_create_page()
 458                 */
 459                ClearPageChecked(pages[i]);
 460                unlock_page(pages[i]);
 461                put_page(pages[i]);
 462        }
 463}
 464
 465static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
 466                                         const u64 start,
 467                                         const u64 len,
 468                                         struct extent_state **cached_state)
 469{
 470        u64 search_start = start;
 471        const u64 end = start + len - 1;
 472
 473        while (search_start < end) {
 474                const u64 search_len = end - search_start + 1;
 475                struct extent_map *em;
 476                u64 em_len;
 477                int ret = 0;
 478
 479                em = btrfs_get_extent(inode, NULL, 0, search_start,
 480                                      search_len, 0);
 481                if (IS_ERR(em))
 482                        return PTR_ERR(em);
 483
 484                if (em->block_start != EXTENT_MAP_HOLE)
 485                        goto next;
 486
 487                em_len = em->len;
 488                if (em->start < search_start)
 489                        em_len -= search_start - em->start;
 490                if (em_len > search_len)
 491                        em_len = search_len;
 492
 493                ret = set_extent_bit(&inode->io_tree, search_start,
 494                                     search_start + em_len - 1,
 495                                     EXTENT_DELALLOC_NEW,
 496                                     NULL, cached_state, GFP_NOFS);
 497next:
 498                search_start = extent_map_end(em);
 499                free_extent_map(em);
 500                if (ret)
 501                        return ret;
 502        }
 503        return 0;
 504}
 505
 506/*
 507 * after copy_from_user, pages need to be dirtied and we need to make
 508 * sure holes are created between the current EOF and the start of
 509 * any next extents (if required).
 510 *
 511 * this also makes the decision about creating an inline extent vs
 512 * doing real data extents, marking pages dirty and delalloc as required.
 513 */
 514int btrfs_dirty_pages(struct inode *inode, struct page **pages,
 515                      size_t num_pages, loff_t pos, size_t write_bytes,
 516                      struct extent_state **cached)
 517{
 518        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 519        int err = 0;
 520        int i;
 521        u64 num_bytes;
 522        u64 start_pos;
 523        u64 end_of_last_block;
 524        u64 end_pos = pos + write_bytes;
 525        loff_t isize = i_size_read(inode);
 526        unsigned int extra_bits = 0;
 527
 528        start_pos = pos & ~((u64) fs_info->sectorsize - 1);
 529        num_bytes = round_up(write_bytes + pos - start_pos,
 530                             fs_info->sectorsize);
 531
 532        end_of_last_block = start_pos + num_bytes - 1;
 533
 534        if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
 535                if (start_pos >= isize &&
 536                    !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
 537                        /*
 538                         * There can't be any extents following eof in this case
 539                         * so just set the delalloc new bit for the range
 540                         * directly.
 541                         */
 542                        extra_bits |= EXTENT_DELALLOC_NEW;
 543                } else {
 544                        err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
 545                                                            start_pos,
 546                                                            num_bytes, cached);
 547                        if (err)
 548                                return err;
 549                }
 550        }
 551
 552        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
 553                                        extra_bits, cached, 0);
 554        if (err)
 555                return err;
 556
 557        for (i = 0; i < num_pages; i++) {
 558                struct page *p = pages[i];
 559                SetPageUptodate(p);
 560                ClearPageChecked(p);
 561                set_page_dirty(p);
 562        }
 563
 564        /*
 565         * we've only changed i_size in ram, and we haven't updated
 566         * the disk i_size.  There is no need to log the inode
 567         * at this time.
 568         */
 569        if (end_pos > isize)
 570                i_size_write(inode, end_pos);
 571        return 0;
 572}
 573
 574/*
 575 * this drops all the extents in the cache that intersect the range
 576 * [start, end].  Existing extents are split as required.
 577 */
 578void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
 579                             int skip_pinned)
 580{
 581        struct extent_map *em;
 582        struct extent_map *split = NULL;
 583        struct extent_map *split2 = NULL;
 584        struct extent_map_tree *em_tree = &inode->extent_tree;
 585        u64 len = end - start + 1;
 586        u64 gen;
 587        int ret;
 588        int testend = 1;
 589        unsigned long flags;
 590        int compressed = 0;
 591        bool modified;
 592
 593        WARN_ON(end < start);
 594        if (end == (u64)-1) {
 595                len = (u64)-1;
 596                testend = 0;
 597        }
 598        while (1) {
 599                int no_splits = 0;
 600
 601                modified = false;
 602                if (!split)
 603                        split = alloc_extent_map();
 604                if (!split2)
 605                        split2 = alloc_extent_map();
 606                if (!split || !split2)
 607                        no_splits = 1;
 608
 609                write_lock(&em_tree->lock);
 610                em = lookup_extent_mapping(em_tree, start, len);
 611                if (!em) {
 612                        write_unlock(&em_tree->lock);
 613                        break;
 614                }
 615                flags = em->flags;
 616                gen = em->generation;
 617                if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
 618                        if (testend && em->start + em->len >= start + len) {
 619                                free_extent_map(em);
 620                                write_unlock(&em_tree->lock);
 621                                break;
 622                        }
 623                        start = em->start + em->len;
 624                        if (testend)
 625                                len = start + len - (em->start + em->len);
 626                        free_extent_map(em);
 627                        write_unlock(&em_tree->lock);
 628                        continue;
 629                }
 630                compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 631                clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 632                clear_bit(EXTENT_FLAG_LOGGING, &flags);
 633                modified = !list_empty(&em->list);
 634                if (no_splits)
 635                        goto next;
 636
 637                if (em->start < start) {
 638                        split->start = em->start;
 639                        split->len = start - em->start;
 640
 641                        if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 642                                split->orig_start = em->orig_start;
 643                                split->block_start = em->block_start;
 644
 645                                if (compressed)
 646                                        split->block_len = em->block_len;
 647                                else
 648                                        split->block_len = split->len;
 649                                split->orig_block_len = max(split->block_len,
 650                                                em->orig_block_len);
 651                                split->ram_bytes = em->ram_bytes;
 652                        } else {
 653                                split->orig_start = split->start;
 654                                split->block_len = 0;
 655                                split->block_start = em->block_start;
 656                                split->orig_block_len = 0;
 657                                split->ram_bytes = split->len;
 658                        }
 659
 660                        split->generation = gen;
 661                        split->bdev = em->bdev;
 662                        split->flags = flags;
 663                        split->compress_type = em->compress_type;
 664                        replace_extent_mapping(em_tree, em, split, modified);
 665                        free_extent_map(split);
 666                        split = split2;
 667                        split2 = NULL;
 668                }
 669                if (testend && em->start + em->len > start + len) {
 670                        u64 diff = start + len - em->start;
 671
 672                        split->start = start + len;
 673                        split->len = em->start + em->len - (start + len);
 674                        split->bdev = em->bdev;
 675                        split->flags = flags;
 676                        split->compress_type = em->compress_type;
 677                        split->generation = gen;
 678
 679                        if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 680                                split->orig_block_len = max(em->block_len,
 681                                                    em->orig_block_len);
 682
 683                                split->ram_bytes = em->ram_bytes;
 684                                if (compressed) {
 685                                        split->block_len = em->block_len;
 686                                        split->block_start = em->block_start;
 687                                        split->orig_start = em->orig_start;
 688                                } else {
 689                                        split->block_len = split->len;
 690                                        split->block_start = em->block_start
 691                                                + diff;
 692                                        split->orig_start = em->orig_start;
 693                                }
 694                        } else {
 695                                split->ram_bytes = split->len;
 696                                split->orig_start = split->start;
 697                                split->block_len = 0;
 698                                split->block_start = em->block_start;
 699                                split->orig_block_len = 0;
 700                        }
 701
 702                        if (extent_map_in_tree(em)) {
 703                                replace_extent_mapping(em_tree, em, split,
 704                                                       modified);
 705                        } else {
 706                                ret = add_extent_mapping(em_tree, split,
 707                                                         modified);
 708                                ASSERT(ret == 0); /* Logic error */
 709                        }
 710                        free_extent_map(split);
 711                        split = NULL;
 712                }
 713next:
 714                if (extent_map_in_tree(em))
 715                        remove_extent_mapping(em_tree, em);
 716                write_unlock(&em_tree->lock);
 717
 718                /* once for us */
 719                free_extent_map(em);
 720                /* once for the tree*/
 721                free_extent_map(em);
 722        }
 723        if (split)
 724                free_extent_map(split);
 725        if (split2)
 726                free_extent_map(split2);
 727}
 728
 729/*
 730 * this is very complex, but the basic idea is to drop all extents
 731 * in the range start - end.  hint_block is filled in with a block number
 732 * that would be a good hint to the block allocator for this file.
 733 *
 734 * If an extent intersects the range but is not entirely inside the range
 735 * it is either truncated or split.  Anything entirely inside the range
 736 * is deleted from the tree.
 737 */
 738int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 739                         struct btrfs_root *root, struct inode *inode,
 740                         struct btrfs_path *path, u64 start, u64 end,
 741                         u64 *drop_end, int drop_cache,
 742                         int replace_extent,
 743                         u32 extent_item_size,
 744                         int *key_inserted)
 745{
 746        struct btrfs_fs_info *fs_info = root->fs_info;
 747        struct extent_buffer *leaf;
 748        struct btrfs_file_extent_item *fi;
 749        struct btrfs_key key;
 750        struct btrfs_key new_key;
 751        u64 ino = btrfs_ino(BTRFS_I(inode));
 752        u64 search_start = start;
 753        u64 disk_bytenr = 0;
 754        u64 num_bytes = 0;
 755        u64 extent_offset = 0;
 756        u64 extent_end = 0;
 757        u64 last_end = start;
 758        int del_nr = 0;
 759        int del_slot = 0;
 760        int extent_type;
 761        int recow;
 762        int ret;
 763        int modify_tree = -1;
 764        int update_refs;
 765        int found = 0;
 766        int leafs_visited = 0;
 767
 768        if (drop_cache)
 769                btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0);
 770
 771        if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
 772                modify_tree = 0;
 773
 774        update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
 775                       root == fs_info->tree_root);
 776        while (1) {
 777                recow = 0;
 778                ret = btrfs_lookup_file_extent(trans, root, path, ino,
 779                                               search_start, modify_tree);
 780                if (ret < 0)
 781                        break;
 782                if (ret > 0 && path->slots[0] > 0 && search_start == start) {
 783                        leaf = path->nodes[0];
 784                        btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
 785                        if (key.objectid == ino &&
 786                            key.type == BTRFS_EXTENT_DATA_KEY)
 787                                path->slots[0]--;
 788                }
 789                ret = 0;
 790                leafs_visited++;
 791next_slot:
 792                leaf = path->nodes[0];
 793                if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 794                        BUG_ON(del_nr > 0);
 795                        ret = btrfs_next_leaf(root, path);
 796                        if (ret < 0)
 797                                break;
 798                        if (ret > 0) {
 799                                ret = 0;
 800                                break;
 801                        }
 802                        leafs_visited++;
 803                        leaf = path->nodes[0];
 804                        recow = 1;
 805                }
 806
 807                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 808
 809                if (key.objectid > ino)
 810                        break;
 811                if (WARN_ON_ONCE(key.objectid < ino) ||
 812                    key.type < BTRFS_EXTENT_DATA_KEY) {
 813                        ASSERT(del_nr == 0);
 814                        path->slots[0]++;
 815                        goto next_slot;
 816                }
 817                if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
 818                        break;
 819
 820                fi = btrfs_item_ptr(leaf, path->slots[0],
 821                                    struct btrfs_file_extent_item);
 822                extent_type = btrfs_file_extent_type(leaf, fi);
 823
 824                if (extent_type == BTRFS_FILE_EXTENT_REG ||
 825                    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 826                        disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 827                        num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 828                        extent_offset = btrfs_file_extent_offset(leaf, fi);
 829                        extent_end = key.offset +
 830                                btrfs_file_extent_num_bytes(leaf, fi);
 831                } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 832                        extent_end = key.offset +
 833                                btrfs_file_extent_ram_bytes(leaf, fi);
 834                } else {
 835                        /* can't happen */
 836                        BUG();
 837                }
 838
 839                /*
 840                 * Don't skip extent items representing 0 byte lengths. They
 841                 * used to be created (bug) if while punching holes we hit
 842                 * -ENOSPC condition. So if we find one here, just ensure we
 843                 * delete it, otherwise we would insert a new file extent item
 844                 * with the same key (offset) as that 0 bytes length file
 845                 * extent item in the call to setup_items_for_insert() later
 846                 * in this function.
 847                 */
 848                if (extent_end == key.offset && extent_end >= search_start) {
 849                        last_end = extent_end;
 850                        goto delete_extent_item;
 851                }
 852
 853                if (extent_end <= search_start) {
 854                        path->slots[0]++;
 855                        goto next_slot;
 856                }
 857
 858                found = 1;
 859                search_start = max(key.offset, start);
 860                if (recow || !modify_tree) {
 861                        modify_tree = -1;
 862                        btrfs_release_path(path);
 863                        continue;
 864                }
 865
 866                /*
 867                 *     | - range to drop - |
 868                 *  | -------- extent -------- |
 869                 */
 870                if (start > key.offset && end < extent_end) {
 871                        BUG_ON(del_nr > 0);
 872                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 873                                ret = -EOPNOTSUPP;
 874                                break;
 875                        }
 876
 877                        memcpy(&new_key, &key, sizeof(new_key));
 878                        new_key.offset = start;
 879                        ret = btrfs_duplicate_item(trans, root, path,
 880                                                   &new_key);
 881                        if (ret == -EAGAIN) {
 882                                btrfs_release_path(path);
 883                                continue;
 884                        }
 885                        if (ret < 0)
 886                                break;
 887
 888                        leaf = path->nodes[0];
 889                        fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 890                                            struct btrfs_file_extent_item);
 891                        btrfs_set_file_extent_num_bytes(leaf, fi,
 892                                                        start - key.offset);
 893
 894                        fi = btrfs_item_ptr(leaf, path->slots[0],
 895                                            struct btrfs_file_extent_item);
 896
 897                        extent_offset += start - key.offset;
 898                        btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 899                        btrfs_set_file_extent_num_bytes(leaf, fi,
 900                                                        extent_end - start);
 901                        btrfs_mark_buffer_dirty(leaf);
 902
 903                        if (update_refs && disk_bytenr > 0) {
 904                                ret = btrfs_inc_extent_ref(trans, root,
 905                                                disk_bytenr, num_bytes, 0,
 906                                                root->root_key.objectid,
 907                                                new_key.objectid,
 908                                                start - extent_offset);
 909                                BUG_ON(ret); /* -ENOMEM */
 910                        }
 911                        key.offset = start;
 912                }
 913                /*
 914                 * From here on out we will have actually dropped something, so
 915                 * last_end can be updated.
 916                 */
 917                last_end = extent_end;
 918
 919                /*
 920                 *  | ---- range to drop ----- |
 921                 *      | -------- extent -------- |
 922                 */
 923                if (start <= key.offset && end < extent_end) {
 924                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 925                                ret = -EOPNOTSUPP;
 926                                break;
 927                        }
 928
 929                        memcpy(&new_key, &key, sizeof(new_key));
 930                        new_key.offset = end;
 931                        btrfs_set_item_key_safe(fs_info, path, &new_key);
 932
 933                        extent_offset += end - key.offset;
 934                        btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 935                        btrfs_set_file_extent_num_bytes(leaf, fi,
 936                                                        extent_end - end);
 937                        btrfs_mark_buffer_dirty(leaf);
 938                        if (update_refs && disk_bytenr > 0)
 939                                inode_sub_bytes(inode, end - key.offset);
 940                        break;
 941                }
 942
 943                search_start = extent_end;
 944                /*
 945                 *       | ---- range to drop ----- |
 946                 *  | -------- extent -------- |
 947                 */
 948                if (start > key.offset && end >= extent_end) {
 949                        BUG_ON(del_nr > 0);
 950                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 951                                ret = -EOPNOTSUPP;
 952                                break;
 953                        }
 954
 955                        btrfs_set_file_extent_num_bytes(leaf, fi,
 956                                                        start - key.offset);
 957                        btrfs_mark_buffer_dirty(leaf);
 958                        if (update_refs && disk_bytenr > 0)
 959                                inode_sub_bytes(inode, extent_end - start);
 960                        if (end == extent_end)
 961                                break;
 962
 963                        path->slots[0]++;
 964                        goto next_slot;
 965                }
 966
 967                /*
 968                 *  | ---- range to drop ----- |
 969                 *    | ------ extent ------ |
 970                 */
 971                if (start <= key.offset && end >= extent_end) {
 972delete_extent_item:
 973                        if (del_nr == 0) {
 974                                del_slot = path->slots[0];
 975                                del_nr = 1;
 976                        } else {
 977                                BUG_ON(del_slot + del_nr != path->slots[0]);
 978                                del_nr++;
 979                        }
 980
 981                        if (update_refs &&
 982                            extent_type == BTRFS_FILE_EXTENT_INLINE) {
 983                                inode_sub_bytes(inode,
 984                                                extent_end - key.offset);
 985                                extent_end = ALIGN(extent_end,
 986                                                   fs_info->sectorsize);
 987                        } else if (update_refs && disk_bytenr > 0) {
 988                                ret = btrfs_free_extent(trans, root,
 989                                                disk_bytenr, num_bytes, 0,
 990                                                root->root_key.objectid,
 991                                                key.objectid, key.offset -
 992                                                extent_offset);
 993                                BUG_ON(ret); /* -ENOMEM */
 994                                inode_sub_bytes(inode,
 995                                                extent_end - key.offset);
 996                        }
 997
 998                        if (end == extent_end)
 999                                break;
1000
1001                        if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
1002                                path->slots[0]++;
1003                                goto next_slot;
1004                        }
1005
1006                        ret = btrfs_del_items(trans, root, path, del_slot,
1007                                              del_nr);
1008                        if (ret) {
1009                                btrfs_abort_transaction(trans, ret);
1010                                break;
1011                        }
1012
1013                        del_nr = 0;
1014                        del_slot = 0;
1015
1016                        btrfs_release_path(path);
1017                        continue;
1018                }
1019
1020                BUG_ON(1);
1021        }
1022
1023        if (!ret && del_nr > 0) {
1024                /*
1025                 * Set path->slots[0] to first slot, so that after the delete
1026                 * if items are move off from our leaf to its immediate left or
1027                 * right neighbor leafs, we end up with a correct and adjusted
1028                 * path->slots[0] for our insertion (if replace_extent != 0).
1029                 */
1030                path->slots[0] = del_slot;
1031                ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1032                if (ret)
1033                        btrfs_abort_transaction(trans, ret);
1034        }
1035
1036        leaf = path->nodes[0];
1037        /*
1038         * If btrfs_del_items() was called, it might have deleted a leaf, in
1039         * which case it unlocked our path, so check path->locks[0] matches a
1040         * write lock.
1041         */
1042        if (!ret && replace_extent && leafs_visited == 1 &&
1043            (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
1044             path->locks[0] == BTRFS_WRITE_LOCK) &&
1045            btrfs_leaf_free_space(fs_info, leaf) >=
1046            sizeof(struct btrfs_item) + extent_item_size) {
1047
1048                key.objectid = ino;
1049                key.type = BTRFS_EXTENT_DATA_KEY;
1050                key.offset = start;
1051                if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
1052                        struct btrfs_key slot_key;
1053
1054                        btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
1055                        if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1056                                path->slots[0]++;
1057                }
1058                setup_items_for_insert(root, path, &key,
1059                                       &extent_item_size,
1060                                       extent_item_size,
1061                                       sizeof(struct btrfs_item) +
1062                                       extent_item_size, 1);
1063                *key_inserted = 1;
1064        }
1065
1066        if (!replace_extent || !(*key_inserted))
1067                btrfs_release_path(path);
1068        if (drop_end)
1069                *drop_end = found ? min(end, last_end) : end;
1070        return ret;
1071}
1072
1073int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1074                       struct btrfs_root *root, struct inode *inode, u64 start,
1075                       u64 end, int drop_cache)
1076{
1077        struct btrfs_path *path;
1078        int ret;
1079
1080        path = btrfs_alloc_path();
1081        if (!path)
1082                return -ENOMEM;
1083        ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
1084                                   drop_cache, 0, 0, NULL);
1085        btrfs_free_path(path);
1086        return ret;
1087}
1088
1089static int extent_mergeable(struct extent_buffer *leaf, int slot,
1090                            u64 objectid, u64 bytenr, u64 orig_offset,
1091                            u64 *start, u64 *end)
1092{
1093        struct btrfs_file_extent_item *fi;
1094        struct btrfs_key key;
1095        u64 extent_end;
1096
1097        if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1098                return 0;
1099
1100        btrfs_item_key_to_cpu(leaf, &key, slot);
1101        if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1102                return 0;
1103
1104        fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1105        if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1106            btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1107            btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1108            btrfs_file_extent_compression(leaf, fi) ||
1109            btrfs_file_extent_encryption(leaf, fi) ||
1110            btrfs_file_extent_other_encoding(leaf, fi))
1111                return 0;
1112
1113        extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1114        if ((*start && *start != key.offset) || (*end && *end != extent_end))
1115                return 0;
1116
1117        *start = key.offset;
1118        *end = extent_end;
1119        return 1;
1120}
1121
1122/*
1123 * Mark extent in the range start - end as written.
1124 *
1125 * This changes extent type from 'pre-allocated' to 'regular'. If only
1126 * part of extent is marked as written, the extent will be split into
1127 * two or three.
1128 */
1129int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1130                              struct btrfs_inode *inode, u64 start, u64 end)
1131{
1132        struct btrfs_fs_info *fs_info = trans->fs_info;
1133        struct btrfs_root *root = inode->root;
1134        struct extent_buffer *leaf;
1135        struct btrfs_path *path;
1136        struct btrfs_file_extent_item *fi;
1137        struct btrfs_key key;
1138        struct btrfs_key new_key;
1139        u64 bytenr;
1140        u64 num_bytes;
1141        u64 extent_end;
1142        u64 orig_offset;
1143        u64 other_start;
1144        u64 other_end;
1145        u64 split;
1146        int del_nr = 0;
1147        int del_slot = 0;
1148        int recow;
1149        int ret;
1150        u64 ino = btrfs_ino(inode);
1151
1152        path = btrfs_alloc_path();
1153        if (!path)
1154                return -ENOMEM;
1155again:
1156        recow = 0;
1157        split = start;
1158        key.objectid = ino;
1159        key.type = BTRFS_EXTENT_DATA_KEY;
1160        key.offset = split;
1161
1162        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1163        if (ret < 0)
1164                goto out;
1165        if (ret > 0 && path->slots[0] > 0)
1166                path->slots[0]--;
1167
1168        leaf = path->nodes[0];
1169        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1170        if (key.objectid != ino ||
1171            key.type != BTRFS_EXTENT_DATA_KEY) {
1172                ret = -EINVAL;
1173                btrfs_abort_transaction(trans, ret);
1174                goto out;
1175        }
1176        fi = btrfs_item_ptr(leaf, path->slots[0],
1177                            struct btrfs_file_extent_item);
1178        if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1179                ret = -EINVAL;
1180                btrfs_abort_transaction(trans, ret);
1181                goto out;
1182        }
1183        extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1184        if (key.offset > start || extent_end < end) {
1185                ret = -EINVAL;
1186                btrfs_abort_transaction(trans, ret);
1187                goto out;
1188        }
1189
1190        bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1191        num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1192        orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1193        memcpy(&new_key, &key, sizeof(new_key));
1194
1195        if (start == key.offset && end < extent_end) {
1196                other_start = 0;
1197                other_end = start;
1198                if (extent_mergeable(leaf, path->slots[0] - 1,
1199                                     ino, bytenr, orig_offset,
1200                                     &other_start, &other_end)) {
1201                        new_key.offset = end;
1202                        btrfs_set_item_key_safe(fs_info, path, &new_key);
1203                        fi = btrfs_item_ptr(leaf, path->slots[0],
1204                                            struct btrfs_file_extent_item);
1205                        btrfs_set_file_extent_generation(leaf, fi,
1206                                                         trans->transid);
1207                        btrfs_set_file_extent_num_bytes(leaf, fi,
1208                                                        extent_end - end);
1209                        btrfs_set_file_extent_offset(leaf, fi,
1210                                                     end - orig_offset);
1211                        fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1212                                            struct btrfs_file_extent_item);
1213                        btrfs_set_file_extent_generation(leaf, fi,
1214                                                         trans->transid);
1215                        btrfs_set_file_extent_num_bytes(leaf, fi,
1216                                                        end - other_start);
1217                        btrfs_mark_buffer_dirty(leaf);
1218                        goto out;
1219                }
1220        }
1221
1222        if (start > key.offset && end == extent_end) {
1223                other_start = end;
1224                other_end = 0;
1225                if (extent_mergeable(leaf, path->slots[0] + 1,
1226                                     ino, bytenr, orig_offset,
1227                                     &other_start, &other_end)) {
1228                        fi = btrfs_item_ptr(leaf, path->slots[0],
1229                                            struct btrfs_file_extent_item);
1230                        btrfs_set_file_extent_num_bytes(leaf, fi,
1231                                                        start - key.offset);
1232                        btrfs_set_file_extent_generation(leaf, fi,
1233                                                         trans->transid);
1234                        path->slots[0]++;
1235                        new_key.offset = start;
1236                        btrfs_set_item_key_safe(fs_info, path, &new_key);
1237
1238                        fi = btrfs_item_ptr(leaf, path->slots[0],
1239                                            struct btrfs_file_extent_item);
1240                        btrfs_set_file_extent_generation(leaf, fi,
1241                                                         trans->transid);
1242                        btrfs_set_file_extent_num_bytes(leaf, fi,
1243                                                        other_end - start);
1244                        btrfs_set_file_extent_offset(leaf, fi,
1245                                                     start - orig_offset);
1246                        btrfs_mark_buffer_dirty(leaf);
1247                        goto out;
1248                }
1249        }
1250
1251        while (start > key.offset || end < extent_end) {
1252                if (key.offset == start)
1253                        split = end;
1254
1255                new_key.offset = split;
1256                ret = btrfs_duplicate_item(trans, root, path, &new_key);
1257                if (ret == -EAGAIN) {
1258                        btrfs_release_path(path);
1259                        goto again;
1260                }
1261                if (ret < 0) {
1262                        btrfs_abort_transaction(trans, ret);
1263                        goto out;
1264                }
1265
1266                leaf = path->nodes[0];
1267                fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1268                                    struct btrfs_file_extent_item);
1269                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1270                btrfs_set_file_extent_num_bytes(leaf, fi,
1271                                                split - key.offset);
1272
1273                fi = btrfs_item_ptr(leaf, path->slots[0],
1274                                    struct btrfs_file_extent_item);
1275
1276                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1277                btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1278                btrfs_set_file_extent_num_bytes(leaf, fi,
1279                                                extent_end - split);
1280                btrfs_mark_buffer_dirty(leaf);
1281
1282                ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
1283                                           0, root->root_key.objectid,
1284                                           ino, orig_offset);
1285                if (ret) {
1286                        btrfs_abort_transaction(trans, ret);
1287                        goto out;
1288                }
1289
1290                if (split == start) {
1291                        key.offset = start;
1292                } else {
1293                        if (start != key.offset) {
1294                                ret = -EINVAL;
1295                                btrfs_abort_transaction(trans, ret);
1296                                goto out;
1297                        }
1298                        path->slots[0]--;
1299                        extent_end = end;
1300                }
1301                recow = 1;
1302        }
1303
1304        other_start = end;
1305        other_end = 0;
1306        if (extent_mergeable(leaf, path->slots[0] + 1,
1307                             ino, bytenr, orig_offset,
1308                             &other_start, &other_end)) {
1309                if (recow) {
1310                        btrfs_release_path(path);
1311                        goto again;
1312                }
1313                extent_end = other_end;
1314                del_slot = path->slots[0] + 1;
1315                del_nr++;
1316                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1317                                        0, root->root_key.objectid,
1318                                        ino, orig_offset);
1319                if (ret) {
1320                        btrfs_abort_transaction(trans, ret);
1321                        goto out;
1322                }
1323        }
1324        other_start = 0;
1325        other_end = start;
1326        if (extent_mergeable(leaf, path->slots[0] - 1,
1327                             ino, bytenr, orig_offset,
1328                             &other_start, &other_end)) {
1329                if (recow) {
1330                        btrfs_release_path(path);
1331                        goto again;
1332                }
1333                key.offset = other_start;
1334                del_slot = path->slots[0];
1335                del_nr++;
1336                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1337                                        0, root->root_key.objectid,
1338                                        ino, orig_offset);
1339                if (ret) {
1340                        btrfs_abort_transaction(trans, ret);
1341                        goto out;
1342                }
1343        }
1344        if (del_nr == 0) {
1345                fi = btrfs_item_ptr(leaf, path->slots[0],
1346                           struct btrfs_file_extent_item);
1347                btrfs_set_file_extent_type(leaf, fi,
1348                                           BTRFS_FILE_EXTENT_REG);
1349                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1350                btrfs_mark_buffer_dirty(leaf);
1351        } else {
1352                fi = btrfs_item_ptr(leaf, del_slot - 1,
1353                           struct btrfs_file_extent_item);
1354                btrfs_set_file_extent_type(leaf, fi,
1355                                           BTRFS_FILE_EXTENT_REG);
1356                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1357                btrfs_set_file_extent_num_bytes(leaf, fi,
1358                                                extent_end - key.offset);
1359                btrfs_mark_buffer_dirty(leaf);
1360
1361                ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1362                if (ret < 0) {
1363                        btrfs_abort_transaction(trans, ret);
1364                        goto out;
1365                }
1366        }
1367out:
1368        btrfs_free_path(path);
1369        return 0;
1370}
1371
1372/*
1373 * on error we return an unlocked page and the error value
1374 * on success we return a locked page and 0
1375 */
1376static int prepare_uptodate_page(struct inode *inode,
1377                                 struct page *page, u64 pos,
1378                                 bool force_uptodate)
1379{
1380        int ret = 0;
1381
1382        if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1383            !PageUptodate(page)) {
1384                ret = btrfs_readpage(NULL, page);
1385                if (ret)
1386                        return ret;
1387                lock_page(page);
1388                if (!PageUptodate(page)) {
1389                        unlock_page(page);
1390                        return -EIO;
1391                }
1392                if (page->mapping != inode->i_mapping) {
1393                        unlock_page(page);
1394                        return -EAGAIN;
1395                }
1396        }
1397        return 0;
1398}
1399
1400/*
1401 * this just gets pages into the page cache and locks them down.
1402 */
1403static noinline int prepare_pages(struct inode *inode, struct page **pages,
1404                                  size_t num_pages, loff_t pos,
1405                                  size_t write_bytes, bool force_uptodate)
1406{
1407        int i;
1408        unsigned long index = pos >> PAGE_SHIFT;
1409        gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1410        int err = 0;
1411        int faili;
1412
1413        for (i = 0; i < num_pages; i++) {
1414again:
1415                pages[i] = find_or_create_page(inode->i_mapping, index + i,
1416                                               mask | __GFP_WRITE);
1417                if (!pages[i]) {
1418                        faili = i - 1;
1419                        err = -ENOMEM;
1420                        goto fail;
1421                }
1422
1423                if (i == 0)
1424                        err = prepare_uptodate_page(inode, pages[i], pos,
1425                                                    force_uptodate);
1426                if (!err && i == num_pages - 1)
1427                        err = prepare_uptodate_page(inode, pages[i],
1428                                                    pos + write_bytes, false);
1429                if (err) {
1430                        put_page(pages[i]);
1431                        if (err == -EAGAIN) {
1432                                err = 0;
1433                                goto again;
1434                        }
1435                        faili = i - 1;
1436                        goto fail;
1437                }
1438                wait_on_page_writeback(pages[i]);
1439        }
1440
1441        return 0;
1442fail:
1443        while (faili >= 0) {
1444                unlock_page(pages[faili]);
1445                put_page(pages[faili]);
1446                faili--;
1447        }
1448        return err;
1449
1450}
1451
1452/*
1453 * This function locks the extent and properly waits for data=ordered extents
1454 * to finish before allowing the pages to be modified if need.
1455 *
1456 * The return value:
1457 * 1 - the extent is locked
1458 * 0 - the extent is not locked, and everything is OK
1459 * -EAGAIN - need re-prepare the pages
1460 * the other < 0 number - Something wrong happens
1461 */
1462static noinline int
1463lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1464                                size_t num_pages, loff_t pos,
1465                                size_t write_bytes,
1466                                u64 *lockstart, u64 *lockend,
1467                                struct extent_state **cached_state)
1468{
1469        struct btrfs_fs_info *fs_info = inode->root->fs_info;
1470        u64 start_pos;
1471        u64 last_pos;
1472        int i;
1473        int ret = 0;
1474
1475        start_pos = round_down(pos, fs_info->sectorsize);
1476        last_pos = start_pos
1477                + round_up(pos + write_bytes - start_pos,
1478                           fs_info->sectorsize) - 1;
1479
1480        if (start_pos < inode->vfs_inode.i_size) {
1481                struct btrfs_ordered_extent *ordered;
1482
1483                lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1484                                cached_state);
1485                ordered = btrfs_lookup_ordered_range(inode, start_pos,
1486                                                     last_pos - start_pos + 1);
1487                if (ordered &&
1488                    ordered->file_offset + ordered->len > start_pos &&
1489                    ordered->file_offset <= last_pos) {
1490                        unlock_extent_cached(&inode->io_tree, start_pos,
1491                                        last_pos, cached_state);
1492                        for (i = 0; i < num_pages; i++) {
1493                                unlock_page(pages[i]);
1494                                put_page(pages[i]);
1495                        }
1496                        btrfs_start_ordered_extent(&inode->vfs_inode,
1497                                        ordered, 1);
1498                        btrfs_put_ordered_extent(ordered);
1499                        return -EAGAIN;
1500                }
1501                if (ordered)
1502                        btrfs_put_ordered_extent(ordered);
1503                clear_extent_bit(&inode->io_tree, start_pos, last_pos,
1504                                 EXTENT_DIRTY | EXTENT_DELALLOC |
1505                                 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1506                                 0, 0, cached_state);
1507                *lockstart = start_pos;
1508                *lockend = last_pos;
1509                ret = 1;
1510        }
1511
1512        for (i = 0; i < num_pages; i++) {
1513                if (clear_page_dirty_for_io(pages[i]))
1514                        account_page_redirty(pages[i]);
1515                set_page_extent_mapped(pages[i]);
1516                WARN_ON(!PageLocked(pages[i]));
1517        }
1518
1519        return ret;
1520}
1521
1522static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1523                                    size_t *write_bytes)
1524{
1525        struct btrfs_fs_info *fs_info = inode->root->fs_info;
1526        struct btrfs_root *root = inode->root;
1527        struct btrfs_ordered_extent *ordered;
1528        u64 lockstart, lockend;
1529        u64 num_bytes;
1530        int ret;
1531
1532        ret = btrfs_start_write_no_snapshotting(root);
1533        if (!ret)
1534                return -ENOSPC;
1535
1536        lockstart = round_down(pos, fs_info->sectorsize);
1537        lockend = round_up(pos + *write_bytes,
1538                           fs_info->sectorsize) - 1;
1539
1540        while (1) {
1541                lock_extent(&inode->io_tree, lockstart, lockend);
1542                ordered = btrfs_lookup_ordered_range(inode, lockstart,
1543                                                     lockend - lockstart + 1);
1544                if (!ordered) {
1545                        break;
1546                }
1547                unlock_extent(&inode->io_tree, lockstart, lockend);
1548                btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
1549                btrfs_put_ordered_extent(ordered);
1550        }
1551
1552        num_bytes = lockend - lockstart + 1;
1553        ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1554                        NULL, NULL, NULL);
1555        if (ret <= 0) {
1556                ret = 0;
1557                btrfs_end_write_no_snapshotting(root);
1558        } else {
1559                *write_bytes = min_t(size_t, *write_bytes ,
1560                                     num_bytes - pos + lockstart);
1561        }
1562
1563        unlock_extent(&inode->io_tree, lockstart, lockend);
1564
1565        return ret;
1566}
1567
1568static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1569                                               struct iov_iter *i)
1570{
1571        struct file *file = iocb->ki_filp;
1572        loff_t pos = iocb->ki_pos;
1573        struct inode *inode = file_inode(file);
1574        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1575        struct btrfs_root *root = BTRFS_I(inode)->root;
1576        struct page **pages = NULL;
1577        struct extent_state *cached_state = NULL;
1578        struct extent_changeset *data_reserved = NULL;
1579        u64 release_bytes = 0;
1580        u64 lockstart;
1581        u64 lockend;
1582        size_t num_written = 0;
1583        int nrptrs;
1584        int ret = 0;
1585        bool only_release_metadata = false;
1586        bool force_page_uptodate = false;
1587
1588        nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1589                        PAGE_SIZE / (sizeof(struct page *)));
1590        nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1591        nrptrs = max(nrptrs, 8);
1592        pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1593        if (!pages)
1594                return -ENOMEM;
1595
1596        while (iov_iter_count(i) > 0) {
1597                size_t offset = pos & (PAGE_SIZE - 1);
1598                size_t sector_offset;
1599                size_t write_bytes = min(iov_iter_count(i),
1600                                         nrptrs * (size_t)PAGE_SIZE -
1601                                         offset);
1602                size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1603                                                PAGE_SIZE);
1604                size_t reserve_bytes;
1605                size_t dirty_pages;
1606                size_t copied;
1607                size_t dirty_sectors;
1608                size_t num_sectors;
1609                int extents_locked;
1610
1611                WARN_ON(num_pages > nrptrs);
1612
1613                /*
1614                 * Fault pages before locking them in prepare_pages
1615                 * to avoid recursive lock
1616                 */
1617                if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1618                        ret = -EFAULT;
1619                        break;
1620                }
1621
1622                sector_offset = pos & (fs_info->sectorsize - 1);
1623                reserve_bytes = round_up(write_bytes + sector_offset,
1624                                fs_info->sectorsize);
1625
1626                extent_changeset_release(data_reserved);
1627                ret = btrfs_check_data_free_space(inode, &data_reserved, pos,
1628                                                  write_bytes);
1629                if (ret < 0) {
1630                        if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1631                                                      BTRFS_INODE_PREALLOC)) &&
1632                            check_can_nocow(BTRFS_I(inode), pos,
1633                                        &write_bytes) > 0) {
1634                                /*
1635                                 * For nodata cow case, no need to reserve
1636                                 * data space.
1637                                 */
1638                                only_release_metadata = true;
1639                                /*
1640                                 * our prealloc extent may be smaller than
1641                                 * write_bytes, so scale down.
1642                                 */
1643                                num_pages = DIV_ROUND_UP(write_bytes + offset,
1644                                                         PAGE_SIZE);
1645                                reserve_bytes = round_up(write_bytes +
1646                                                         sector_offset,
1647                                                         fs_info->sectorsize);
1648                        } else {
1649                                break;
1650                        }
1651                }
1652
1653                WARN_ON(reserve_bytes == 0);
1654                ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1655                                reserve_bytes);
1656                if (ret) {
1657                        if (!only_release_metadata)
1658                                btrfs_free_reserved_data_space(inode,
1659                                                data_reserved, pos,
1660                                                write_bytes);
1661                        else
1662                                btrfs_end_write_no_snapshotting(root);
1663                        break;
1664                }
1665
1666                release_bytes = reserve_bytes;
1667again:
1668                /*
1669                 * This is going to setup the pages array with the number of
1670                 * pages we want, so we don't really need to worry about the
1671                 * contents of pages from loop to loop
1672                 */
1673                ret = prepare_pages(inode, pages, num_pages,
1674                                    pos, write_bytes,
1675                                    force_page_uptodate);
1676                if (ret) {
1677                        btrfs_delalloc_release_extents(BTRFS_I(inode),
1678                                                       reserve_bytes, true);
1679                        break;
1680                }
1681
1682                extents_locked = lock_and_cleanup_extent_if_need(
1683                                BTRFS_I(inode), pages,
1684                                num_pages, pos, write_bytes, &lockstart,
1685                                &lockend, &cached_state);
1686                if (extents_locked < 0) {
1687                        if (extents_locked == -EAGAIN)
1688                                goto again;
1689                        btrfs_delalloc_release_extents(BTRFS_I(inode),
1690                                                       reserve_bytes, true);
1691                        ret = extents_locked;
1692                        break;
1693                }
1694
1695                copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1696
1697                num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1698                dirty_sectors = round_up(copied + sector_offset,
1699                                        fs_info->sectorsize);
1700                dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1701
1702                /*
1703                 * if we have trouble faulting in the pages, fall
1704                 * back to one page at a time
1705                 */
1706                if (copied < write_bytes)
1707                        nrptrs = 1;
1708
1709                if (copied == 0) {
1710                        force_page_uptodate = true;
1711                        dirty_sectors = 0;
1712                        dirty_pages = 0;
1713                } else {
1714                        force_page_uptodate = false;
1715                        dirty_pages = DIV_ROUND_UP(copied + offset,
1716                                                   PAGE_SIZE);
1717                }
1718
1719                if (num_sectors > dirty_sectors) {
1720                        /* release everything except the sectors we dirtied */
1721                        release_bytes -= dirty_sectors <<
1722                                                fs_info->sb->s_blocksize_bits;
1723                        if (only_release_metadata) {
1724                                btrfs_delalloc_release_metadata(BTRFS_I(inode),
1725                                                        release_bytes, true);
1726                        } else {
1727                                u64 __pos;
1728
1729                                __pos = round_down(pos,
1730                                                   fs_info->sectorsize) +
1731                                        (dirty_pages << PAGE_SHIFT);
1732                                btrfs_delalloc_release_space(inode,
1733                                                data_reserved, __pos,
1734                                                release_bytes, true);
1735                        }
1736                }
1737
1738                release_bytes = round_up(copied + sector_offset,
1739                                        fs_info->sectorsize);
1740
1741                if (copied > 0)
1742                        ret = btrfs_dirty_pages(inode, pages, dirty_pages,
1743                                                pos, copied, &cached_state);
1744                if (extents_locked)
1745                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1746                                             lockstart, lockend, &cached_state);
1747                btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
1748                                               true);
1749                if (ret) {
1750                        btrfs_drop_pages(pages, num_pages);
1751                        break;
1752                }
1753
1754                release_bytes = 0;
1755                if (only_release_metadata)
1756                        btrfs_end_write_no_snapshotting(root);
1757
1758                if (only_release_metadata && copied > 0) {
1759                        lockstart = round_down(pos,
1760                                               fs_info->sectorsize);
1761                        lockend = round_up(pos + copied,
1762                                           fs_info->sectorsize) - 1;
1763
1764                        set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1765                                       lockend, EXTENT_NORESERVE, NULL,
1766                                       NULL, GFP_NOFS);
1767                        only_release_metadata = false;
1768                }
1769
1770                btrfs_drop_pages(pages, num_pages);
1771
1772                cond_resched();
1773
1774                balance_dirty_pages_ratelimited(inode->i_mapping);
1775                if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
1776                        btrfs_btree_balance_dirty(fs_info);
1777
1778                pos += copied;
1779                num_written += copied;
1780        }
1781
1782        kfree(pages);
1783
1784        if (release_bytes) {
1785                if (only_release_metadata) {
1786                        btrfs_end_write_no_snapshotting(root);
1787                        btrfs_delalloc_release_metadata(BTRFS_I(inode),
1788                                        release_bytes, true);
1789                } else {
1790                        btrfs_delalloc_release_space(inode, data_reserved,
1791                                        round_down(pos, fs_info->sectorsize),
1792                                        release_bytes, true);
1793                }
1794        }
1795
1796        extent_changeset_free(data_reserved);
1797        return num_written ? num_written : ret;
1798}
1799
1800static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1801{
1802        struct file *file = iocb->ki_filp;
1803        struct inode *inode = file_inode(file);
1804        loff_t pos;
1805        ssize_t written;
1806        ssize_t written_buffered;
1807        loff_t endbyte;
1808        int err;
1809
1810        written = generic_file_direct_write(iocb, from);
1811
1812        if (written < 0 || !iov_iter_count(from))
1813                return written;
1814
1815        pos = iocb->ki_pos;
1816        written_buffered = btrfs_buffered_write(iocb, from);
1817        if (written_buffered < 0) {
1818                err = written_buffered;
1819                goto out;
1820        }
1821        /*
1822         * Ensure all data is persisted. We want the next direct IO read to be
1823         * able to read what was just written.
1824         */
1825        endbyte = pos + written_buffered - 1;
1826        err = btrfs_fdatawrite_range(inode, pos, endbyte);
1827        if (err)
1828                goto out;
1829        err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1830        if (err)
1831                goto out;
1832        written += written_buffered;
1833        iocb->ki_pos = pos + written_buffered;
1834        invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1835                                 endbyte >> PAGE_SHIFT);
1836out:
1837        return written ? written : err;
1838}
1839
1840static void update_time_for_write(struct inode *inode)
1841{
1842        struct timespec64 now;
1843
1844        if (IS_NOCMTIME(inode))
1845                return;
1846
1847        now = current_time(inode);
1848        if (!timespec64_equal(&inode->i_mtime, &now))
1849                inode->i_mtime = now;
1850
1851        if (!timespec64_equal(&inode->i_ctime, &now))
1852                inode->i_ctime = now;
1853
1854        if (IS_I_VERSION(inode))
1855                inode_inc_iversion(inode);
1856}
1857
1858static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1859                                    struct iov_iter *from)
1860{
1861        struct file *file = iocb->ki_filp;
1862        struct inode *inode = file_inode(file);
1863        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1864        struct btrfs_root *root = BTRFS_I(inode)->root;
1865        u64 start_pos;
1866        u64 end_pos;
1867        ssize_t num_written = 0;
1868        bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
1869        ssize_t err;
1870        loff_t pos;
1871        size_t count = iov_iter_count(from);
1872        loff_t oldsize;
1873        int clean_page = 0;
1874
1875        if (!(iocb->ki_flags & IOCB_DIRECT) &&
1876            (iocb->ki_flags & IOCB_NOWAIT))
1877                return -EOPNOTSUPP;
1878
1879        if (!inode_trylock(inode)) {
1880                if (iocb->ki_flags & IOCB_NOWAIT)
1881                        return -EAGAIN;
1882                inode_lock(inode);
1883        }
1884
1885        err = generic_write_checks(iocb, from);
1886        if (err <= 0) {
1887                inode_unlock(inode);
1888                return err;
1889        }
1890
1891        pos = iocb->ki_pos;
1892        if (iocb->ki_flags & IOCB_NOWAIT) {
1893                /*
1894                 * We will allocate space in case nodatacow is not set,
1895                 * so bail
1896                 */
1897                if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1898                                              BTRFS_INODE_PREALLOC)) ||
1899                    check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
1900                        inode_unlock(inode);
1901                        return -EAGAIN;
1902                }
1903        }
1904
1905        current->backing_dev_info = inode_to_bdi(inode);
1906        err = file_remove_privs(file);
1907        if (err) {
1908                inode_unlock(inode);
1909                goto out;
1910        }
1911
1912        /*
1913         * If BTRFS flips readonly due to some impossible error
1914         * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1915         * although we have opened a file as writable, we have
1916         * to stop this write operation to ensure FS consistency.
1917         */
1918        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1919                inode_unlock(inode);
1920                err = -EROFS;
1921                goto out;
1922        }
1923
1924        /*
1925         * We reserve space for updating the inode when we reserve space for the
1926         * extent we are going to write, so we will enospc out there.  We don't
1927         * need to start yet another transaction to update the inode as we will
1928         * update the inode when we finish writing whatever data we write.
1929         */
1930        update_time_for_write(inode);
1931
1932        start_pos = round_down(pos, fs_info->sectorsize);
1933        oldsize = i_size_read(inode);
1934        if (start_pos > oldsize) {
1935                /* Expand hole size to cover write data, preventing empty gap */
1936                end_pos = round_up(pos + count,
1937                                   fs_info->sectorsize);
1938                err = btrfs_cont_expand(inode, oldsize, end_pos);
1939                if (err) {
1940                        inode_unlock(inode);
1941                        goto out;
1942                }
1943                if (start_pos > round_up(oldsize, fs_info->sectorsize))
1944                        clean_page = 1;
1945        }
1946
1947        if (sync)
1948                atomic_inc(&BTRFS_I(inode)->sync_writers);
1949
1950        if (iocb->ki_flags & IOCB_DIRECT) {
1951                num_written = __btrfs_direct_write(iocb, from);
1952        } else {
1953                num_written = btrfs_buffered_write(iocb, from);
1954                if (num_written > 0)
1955                        iocb->ki_pos = pos + num_written;
1956                if (clean_page)
1957                        pagecache_isize_extended(inode, oldsize,
1958                                                i_size_read(inode));
1959        }
1960
1961        inode_unlock(inode);
1962
1963        /*
1964         * We also have to set last_sub_trans to the current log transid,
1965         * otherwise subsequent syncs to a file that's been synced in this
1966         * transaction will appear to have already occurred.
1967         */
1968        spin_lock(&BTRFS_I(inode)->lock);
1969        BTRFS_I(inode)->last_sub_trans = root->log_transid;
1970        spin_unlock(&BTRFS_I(inode)->lock);
1971        if (num_written > 0)
1972                num_written = generic_write_sync(iocb, num_written);
1973
1974        if (sync)
1975                atomic_dec(&BTRFS_I(inode)->sync_writers);
1976out:
1977        current->backing_dev_info = NULL;
1978        return num_written ? num_written : err;
1979}
1980
1981int btrfs_release_file(struct inode *inode, struct file *filp)
1982{
1983        struct btrfs_file_private *private = filp->private_data;
1984
1985        if (private && private->filldir_buf)
1986                kfree(private->filldir_buf);
1987        kfree(private);
1988        filp->private_data = NULL;
1989
1990        /*
1991         * ordered_data_close is set by settattr when we are about to truncate
1992         * a file from a non-zero size to a zero size.  This tries to
1993         * flush down new bytes that may have been written if the
1994         * application were using truncate to replace a file in place.
1995         */
1996        if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1997                               &BTRFS_I(inode)->runtime_flags))
1998                        filemap_flush(inode->i_mapping);
1999        return 0;
2000}
2001
2002static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2003{
2004        int ret;
2005        struct blk_plug plug;
2006
2007        /*
2008         * This is only called in fsync, which would do synchronous writes, so
2009         * a plug can merge adjacent IOs as much as possible.  Esp. in case of
2010         * multiple disks using raid profile, a large IO can be split to
2011         * several segments of stripe length (currently 64K).
2012         */
2013        blk_start_plug(&plug);
2014        atomic_inc(&BTRFS_I(inode)->sync_writers);
2015        ret = btrfs_fdatawrite_range(inode, start, end);
2016        atomic_dec(&BTRFS_I(inode)->sync_writers);
2017        blk_finish_plug(&plug);
2018
2019        return ret;
2020}
2021
2022/*
2023 * fsync call for both files and directories.  This logs the inode into
2024 * the tree log instead of forcing full commits whenever possible.
2025 *
2026 * It needs to call filemap_fdatawait so that all ordered extent updates are
2027 * in the metadata btree are up to date for copying to the log.
2028 *
2029 * It drops the inode mutex before doing the tree log commit.  This is an
2030 * important optimization for directories because holding the mutex prevents
2031 * new operations on the dir while we write to disk.
2032 */
2033int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2034{
2035        struct dentry *dentry = file_dentry(file);
2036        struct inode *inode = d_inode(dentry);
2037        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2038        struct btrfs_root *root = BTRFS_I(inode)->root;
2039        struct btrfs_trans_handle *trans;
2040        struct btrfs_log_ctx ctx;
2041        int ret = 0, err;
2042        u64 len;
2043
2044        /*
2045         * The range length can be represented by u64, we have to do the typecasts
2046         * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
2047         */
2048        len = (u64)end - (u64)start + 1;
2049        trace_btrfs_sync_file(file, datasync);
2050
2051        btrfs_init_log_ctx(&ctx, inode);
2052
2053        /*
2054         * We write the dirty pages in the range and wait until they complete
2055         * out of the ->i_mutex. If so, we can flush the dirty pages by
2056         * multi-task, and make the performance up.  See
2057         * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2058         */
2059        ret = start_ordered_ops(inode, start, end);
2060        if (ret)
2061                goto out;
2062
2063        inode_lock(inode);
2064        atomic_inc(&root->log_batch);
2065
2066        /*
2067         * We have to do this here to avoid the priority inversion of waiting on
2068         * IO of a lower priority task while holding a transaciton open.
2069         */
2070        ret = btrfs_wait_ordered_range(inode, start, len);
2071        if (ret) {
2072                inode_unlock(inode);
2073                goto out;
2074        }
2075        atomic_inc(&root->log_batch);
2076
2077        smp_mb();
2078        if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
2079            BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed) {
2080                /*
2081                 * We've had everything committed since the last time we were
2082                 * modified so clear this flag in case it was set for whatever
2083                 * reason, it's no longer relevant.
2084                 */
2085                clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2086                          &BTRFS_I(inode)->runtime_flags);
2087                /*
2088                 * An ordered extent might have started before and completed
2089                 * already with io errors, in which case the inode was not
2090                 * updated and we end up here. So check the inode's mapping
2091                 * for any errors that might have happened since we last
2092                 * checked called fsync.
2093                 */
2094                ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2095                inode_unlock(inode);
2096                goto out;
2097        }
2098
2099        /*
2100         * We use start here because we will need to wait on the IO to complete
2101         * in btrfs_sync_log, which could require joining a transaction (for
2102         * example checking cross references in the nocow path).  If we use join
2103         * here we could get into a situation where we're waiting on IO to
2104         * happen that is blocked on a transaction trying to commit.  With start
2105         * we inc the extwriter counter, so we wait for all extwriters to exit
2106         * before we start blocking join'ers.  This comment is to keep somebody
2107         * from thinking they are super smart and changing this to
2108         * btrfs_join_transaction *cough*Josef*cough*.
2109         */
2110        trans = btrfs_start_transaction(root, 0);
2111        if (IS_ERR(trans)) {
2112                ret = PTR_ERR(trans);
2113                inode_unlock(inode);
2114                goto out;
2115        }
2116        trans->sync = true;
2117
2118        ret = btrfs_log_dentry_safe(trans, dentry, start, end, &ctx);
2119        if (ret < 0) {
2120                /* Fallthrough and commit/free transaction. */
2121                ret = 1;
2122        }
2123
2124        /* we've logged all the items and now have a consistent
2125         * version of the file in the log.  It is possible that
2126         * someone will come in and modify the file, but that's
2127         * fine because the log is consistent on disk, and we
2128         * have references to all of the file's extents
2129         *
2130         * It is possible that someone will come in and log the
2131         * file again, but that will end up using the synchronization
2132         * inside btrfs_sync_log to keep things safe.
2133         */
2134        inode_unlock(inode);
2135
2136        /*
2137         * If any of the ordered extents had an error, just return it to user
2138         * space, so that the application knows some writes didn't succeed and
2139         * can take proper action (retry for e.g.). Blindly committing the
2140         * transaction in this case, would fool userspace that everything was
2141         * successful. And we also want to make sure our log doesn't contain
2142         * file extent items pointing to extents that weren't fully written to -
2143         * just like in the non fast fsync path, where we check for the ordered
2144         * operation's error flag before writing to the log tree and return -EIO
2145         * if any of them had this flag set (btrfs_wait_ordered_range) -
2146         * therefore we need to check for errors in the ordered operations,
2147         * which are indicated by ctx.io_err.
2148         */
2149        if (ctx.io_err) {
2150                btrfs_end_transaction(trans);
2151                ret = ctx.io_err;
2152                goto out;
2153        }
2154
2155        if (ret != BTRFS_NO_LOG_SYNC) {
2156                if (!ret) {
2157                        ret = btrfs_sync_log(trans, root, &ctx);
2158                        if (!ret) {
2159                                ret = btrfs_end_transaction(trans);
2160                                goto out;
2161                        }
2162                }
2163                ret = btrfs_commit_transaction(trans);
2164        } else {
2165                ret = btrfs_end_transaction(trans);
2166        }
2167out:
2168        ASSERT(list_empty(&ctx.list));
2169        err = file_check_and_advance_wb_err(file);
2170        if (!ret)
2171                ret = err;
2172        return ret > 0 ? -EIO : ret;
2173}
2174
2175static const struct vm_operations_struct btrfs_file_vm_ops = {
2176        .fault          = filemap_fault,
2177        .map_pages      = filemap_map_pages,
2178        .page_mkwrite   = btrfs_page_mkwrite,
2179};
2180
2181static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
2182{
2183        struct address_space *mapping = filp->f_mapping;
2184
2185        if (!mapping->a_ops->readpage)
2186                return -ENOEXEC;
2187
2188        file_accessed(filp);
2189        vma->vm_ops = &btrfs_file_vm_ops;
2190
2191        return 0;
2192}
2193
2194static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2195                          int slot, u64 start, u64 end)
2196{
2197        struct btrfs_file_extent_item *fi;
2198        struct btrfs_key key;
2199
2200        if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2201                return 0;
2202
2203        btrfs_item_key_to_cpu(leaf, &key, slot);
2204        if (key.objectid != btrfs_ino(inode) ||
2205            key.type != BTRFS_EXTENT_DATA_KEY)
2206                return 0;
2207
2208        fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2209
2210        if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2211                return 0;
2212
2213        if (btrfs_file_extent_disk_bytenr(leaf, fi))
2214                return 0;
2215
2216        if (key.offset == end)
2217                return 1;
2218        if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2219                return 1;
2220        return 0;
2221}
2222
2223static int fill_holes(struct btrfs_trans_handle *trans,
2224                struct btrfs_inode *inode,
2225                struct btrfs_path *path, u64 offset, u64 end)
2226{
2227        struct btrfs_fs_info *fs_info = trans->fs_info;
2228        struct btrfs_root *root = inode->root;
2229        struct extent_buffer *leaf;
2230        struct btrfs_file_extent_item *fi;
2231        struct extent_map *hole_em;
2232        struct extent_map_tree *em_tree = &inode->extent_tree;
2233        struct btrfs_key key;
2234        int ret;
2235
2236        if (btrfs_fs_incompat(fs_info, NO_HOLES))
2237                goto out;
2238
2239        key.objectid = btrfs_ino(inode);
2240        key.type = BTRFS_EXTENT_DATA_KEY;
2241        key.offset = offset;
2242
2243        ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2244        if (ret <= 0) {
2245                /*
2246                 * We should have dropped this offset, so if we find it then
2247                 * something has gone horribly wrong.
2248                 */
2249                if (ret == 0)
2250                        ret = -EINVAL;
2251                return ret;
2252        }
2253
2254        leaf = path->nodes[0];
2255        if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2256                u64 num_bytes;
2257
2258                path->slots[0]--;
2259                fi = btrfs_item_ptr(leaf, path->slots[0],
2260                                    struct btrfs_file_extent_item);
2261                num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2262                        end - offset;
2263                btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2264                btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2265                btrfs_set_file_extent_offset(leaf, fi, 0);
2266                btrfs_mark_buffer_dirty(leaf);
2267                goto out;
2268        }
2269
2270        if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2271                u64 num_bytes;
2272
2273                key.offset = offset;
2274                btrfs_set_item_key_safe(fs_info, path, &key);
2275                fi = btrfs_item_ptr(leaf, path->slots[0],
2276                                    struct btrfs_file_extent_item);
2277                num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2278                        offset;
2279                btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2280                btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2281                btrfs_set_file_extent_offset(leaf, fi, 0);
2282                btrfs_mark_buffer_dirty(leaf);
2283                goto out;
2284        }
2285        btrfs_release_path(path);
2286
2287        ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2288                        offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
2289        if (ret)
2290                return ret;
2291
2292out:
2293        btrfs_release_path(path);
2294
2295        hole_em = alloc_extent_map();
2296        if (!hole_em) {
2297                btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2298                set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2299        } else {
2300                hole_em->start = offset;
2301                hole_em->len = end - offset;
2302                hole_em->ram_bytes = hole_em->len;
2303                hole_em->orig_start = offset;
2304
2305                hole_em->block_start = EXTENT_MAP_HOLE;
2306                hole_em->block_len = 0;
2307                hole_em->orig_block_len = 0;
2308                hole_em->bdev = fs_info->fs_devices->latest_bdev;
2309                hole_em->compress_type = BTRFS_COMPRESS_NONE;
2310                hole_em->generation = trans->transid;
2311
2312                do {
2313                        btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2314                        write_lock(&em_tree->lock);
2315                        ret = add_extent_mapping(em_tree, hole_em, 1);
2316                        write_unlock(&em_tree->lock);
2317                } while (ret == -EEXIST);
2318                free_extent_map(hole_em);
2319                if (ret)
2320                        set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2321                                        &inode->runtime_flags);
2322        }
2323
2324        return 0;
2325}
2326
2327/*
2328 * Find a hole extent on given inode and change start/len to the end of hole
2329 * extent.(hole/vacuum extent whose em->start <= start &&
2330 *         em->start + em->len > start)
2331 * When a hole extent is found, return 1 and modify start/len.
2332 */
2333static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2334{
2335        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2336        struct extent_map *em;
2337        int ret = 0;
2338
2339        em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2340                              round_down(*start, fs_info->sectorsize),
2341                              round_up(*len, fs_info->sectorsize), 0);
2342        if (IS_ERR(em))
2343                return PTR_ERR(em);
2344
2345        /* Hole or vacuum extent(only exists in no-hole mode) */
2346        if (em->block_start == EXTENT_MAP_HOLE) {
2347                ret = 1;
2348                *len = em->start + em->len > *start + *len ?
2349                       0 : *start + *len - em->start - em->len;
2350                *start = em->start + em->len;
2351        }
2352        free_extent_map(em);
2353        return ret;
2354}
2355
2356static int btrfs_punch_hole_lock_range(struct inode *inode,
2357                                       const u64 lockstart,
2358                                       const u64 lockend,
2359                                       struct extent_state **cached_state)
2360{
2361        while (1) {
2362                struct btrfs_ordered_extent *ordered;
2363                int ret;
2364
2365                truncate_pagecache_range(inode, lockstart, lockend);
2366
2367                lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2368                                 cached_state);
2369                ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2370
2371                /*
2372                 * We need to make sure we have no ordered extents in this range
2373                 * and nobody raced in and read a page in this range, if we did
2374                 * we need to try again.
2375                 */
2376                if ((!ordered ||
2377                    (ordered->file_offset + ordered->len <= lockstart ||
2378                     ordered->file_offset > lockend)) &&
2379                     !filemap_range_has_page(inode->i_mapping,
2380                                             lockstart, lockend)) {
2381                        if (ordered)
2382                                btrfs_put_ordered_extent(ordered);
2383                        break;
2384                }
2385                if (ordered)
2386                        btrfs_put_ordered_extent(ordered);
2387                unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2388                                     lockend, cached_state);
2389                ret = btrfs_wait_ordered_range(inode, lockstart,
2390                                               lockend - lockstart + 1);
2391                if (ret)
2392                        return ret;
2393        }
2394        return 0;
2395}
2396
2397static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2398{
2399        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2400        struct btrfs_root *root = BTRFS_I(inode)->root;
2401        struct extent_state *cached_state = NULL;
2402        struct btrfs_path *path;
2403        struct btrfs_block_rsv *rsv;
2404        struct btrfs_trans_handle *trans;
2405        u64 lockstart;
2406        u64 lockend;
2407        u64 tail_start;
2408        u64 tail_len;
2409        u64 orig_start = offset;
2410        u64 cur_offset;
2411        u64 min_size = btrfs_calc_trans_metadata_size(fs_info, 1);
2412        u64 drop_end;
2413        int ret = 0;
2414        int err = 0;
2415        unsigned int rsv_count;
2416        bool same_block;
2417        bool no_holes = btrfs_fs_incompat(fs_info, NO_HOLES);
2418        u64 ino_size;
2419        bool truncated_block = false;
2420        bool updated_inode = false;
2421
2422        ret = btrfs_wait_ordered_range(inode, offset, len);
2423        if (ret)
2424                return ret;
2425
2426        inode_lock(inode);
2427        ino_size = round_up(inode->i_size, fs_info->sectorsize);
2428        ret = find_first_non_hole(inode, &offset, &len);
2429        if (ret < 0)
2430                goto out_only_mutex;
2431        if (ret && !len) {
2432                /* Already in a large hole */
2433                ret = 0;
2434                goto out_only_mutex;
2435        }
2436
2437        lockstart = round_up(offset, btrfs_inode_sectorsize(inode));
2438        lockend = round_down(offset + len,
2439                             btrfs_inode_sectorsize(inode)) - 1;
2440        same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2441                == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2442        /*
2443         * We needn't truncate any block which is beyond the end of the file
2444         * because we are sure there is no data there.
2445         */
2446        /*
2447         * Only do this if we are in the same block and we aren't doing the
2448         * entire block.
2449         */
2450        if (same_block && len < fs_info->sectorsize) {
2451                if (offset < ino_size) {
2452                        truncated_block = true;
2453                        ret = btrfs_truncate_block(inode, offset, len, 0);
2454                } else {
2455                        ret = 0;
2456                }
2457                goto out_only_mutex;
2458        }
2459
2460        /* zero back part of the first block */
2461        if (offset < ino_size) {
2462                truncated_block = true;
2463                ret = btrfs_truncate_block(inode, offset, 0, 0);
2464                if (ret) {
2465                        inode_unlock(inode);
2466                        return ret;
2467                }
2468        }
2469
2470        /* Check the aligned pages after the first unaligned page,
2471         * if offset != orig_start, which means the first unaligned page
2472         * including several following pages are already in holes,
2473         * the extra check can be skipped */
2474        if (offset == orig_start) {
2475                /* after truncate page, check hole again */
2476                len = offset + len - lockstart;
2477                offset = lockstart;
2478                ret = find_first_non_hole(inode, &offset, &len);
2479                if (ret < 0)
2480                        goto out_only_mutex;
2481                if (ret && !len) {
2482                        ret = 0;
2483                        goto out_only_mutex;
2484                }
2485                lockstart = offset;
2486        }
2487
2488        /* Check the tail unaligned part is in a hole */
2489        tail_start = lockend + 1;
2490        tail_len = offset + len - tail_start;
2491        if (tail_len) {
2492                ret = find_first_non_hole(inode, &tail_start, &tail_len);
2493                if (unlikely(ret < 0))
2494                        goto out_only_mutex;
2495                if (!ret) {
2496                        /* zero the front end of the last page */
2497                        if (tail_start + tail_len < ino_size) {
2498                                truncated_block = true;
2499                                ret = btrfs_truncate_block(inode,
2500                                                        tail_start + tail_len,
2501                                                        0, 1);
2502                                if (ret)
2503                                        goto out_only_mutex;
2504                        }
2505                }
2506        }
2507
2508        if (lockend < lockstart) {
2509                ret = 0;
2510                goto out_only_mutex;
2511        }
2512
2513        ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2514                                          &cached_state);
2515        if (ret) {
2516                inode_unlock(inode);
2517                goto out_only_mutex;
2518        }
2519
2520        path = btrfs_alloc_path();
2521        if (!path) {
2522                ret = -ENOMEM;
2523                goto out;
2524        }
2525
2526        rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2527        if (!rsv) {
2528                ret = -ENOMEM;
2529                goto out_free;
2530        }
2531        rsv->size = btrfs_calc_trans_metadata_size(fs_info, 1);
2532        rsv->failfast = 1;
2533
2534        /*
2535         * 1 - update the inode
2536         * 1 - removing the extents in the range
2537         * 1 - adding the hole extent if no_holes isn't set
2538         */
2539        rsv_count = no_holes ? 2 : 3;
2540        trans = btrfs_start_transaction(root, rsv_count);
2541        if (IS_ERR(trans)) {
2542                err = PTR_ERR(trans);
2543                goto out_free;
2544        }
2545
2546        ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2547                                      min_size, 0);
2548        BUG_ON(ret);
2549        trans->block_rsv = rsv;
2550
2551        cur_offset = lockstart;
2552        len = lockend - cur_offset;
2553        while (cur_offset < lockend) {
2554                ret = __btrfs_drop_extents(trans, root, inode, path,
2555                                           cur_offset, lockend + 1,
2556                                           &drop_end, 1, 0, 0, NULL);
2557                if (ret != -ENOSPC)
2558                        break;
2559
2560                trans->block_rsv = &fs_info->trans_block_rsv;
2561
2562                if (cur_offset < drop_end && cur_offset < ino_size) {
2563                        ret = fill_holes(trans, BTRFS_I(inode), path,
2564                                        cur_offset, drop_end);
2565                        if (ret) {
2566                                /*
2567                                 * If we failed then we didn't insert our hole
2568                                 * entries for the area we dropped, so now the
2569                                 * fs is corrupted, so we must abort the
2570                                 * transaction.
2571                                 */
2572                                btrfs_abort_transaction(trans, ret);
2573                                err = ret;
2574                                break;
2575                        }
2576                }
2577
2578                cur_offset = drop_end;
2579
2580                ret = btrfs_update_inode(trans, root, inode);
2581                if (ret) {
2582                        err = ret;
2583                        break;
2584                }
2585
2586                btrfs_end_transaction(trans);
2587                btrfs_btree_balance_dirty(fs_info);
2588
2589                trans = btrfs_start_transaction(root, rsv_count);
2590                if (IS_ERR(trans)) {
2591                        ret = PTR_ERR(trans);
2592                        trans = NULL;
2593                        break;
2594                }
2595
2596                ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2597                                              rsv, min_size, 0);
2598                BUG_ON(ret);    /* shouldn't happen */
2599                trans->block_rsv = rsv;
2600
2601                ret = find_first_non_hole(inode, &cur_offset, &len);
2602                if (unlikely(ret < 0))
2603                        break;
2604                if (ret && !len) {
2605                        ret = 0;
2606                        break;
2607                }
2608        }
2609
2610        if (ret) {
2611                err = ret;
2612                goto out_trans;
2613        }
2614
2615        trans->block_rsv = &fs_info->trans_block_rsv;
2616        /*
2617         * If we are using the NO_HOLES feature we might have had already an
2618         * hole that overlaps a part of the region [lockstart, lockend] and
2619         * ends at (or beyond) lockend. Since we have no file extent items to
2620         * represent holes, drop_end can be less than lockend and so we must
2621         * make sure we have an extent map representing the existing hole (the
2622         * call to __btrfs_drop_extents() might have dropped the existing extent
2623         * map representing the existing hole), otherwise the fast fsync path
2624         * will not record the existence of the hole region
2625         * [existing_hole_start, lockend].
2626         */
2627        if (drop_end <= lockend)
2628                drop_end = lockend + 1;
2629        /*
2630         * Don't insert file hole extent item if it's for a range beyond eof
2631         * (because it's useless) or if it represents a 0 bytes range (when
2632         * cur_offset == drop_end).
2633         */
2634        if (cur_offset < ino_size && cur_offset < drop_end) {
2635                ret = fill_holes(trans, BTRFS_I(inode), path,
2636                                cur_offset, drop_end);
2637                if (ret) {
2638                        /* Same comment as above. */
2639                        btrfs_abort_transaction(trans, ret);
2640                        err = ret;
2641                        goto out_trans;
2642                }
2643        }
2644
2645out_trans:
2646        if (!trans)
2647                goto out_free;
2648
2649        inode_inc_iversion(inode);
2650        inode->i_mtime = inode->i_ctime = current_time(inode);
2651
2652        trans->block_rsv = &fs_info->trans_block_rsv;
2653        ret = btrfs_update_inode(trans, root, inode);
2654        updated_inode = true;
2655        btrfs_end_transaction(trans);
2656        btrfs_btree_balance_dirty(fs_info);
2657out_free:
2658        btrfs_free_path(path);
2659        btrfs_free_block_rsv(fs_info, rsv);
2660out:
2661        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2662                             &cached_state);
2663out_only_mutex:
2664        if (!updated_inode && truncated_block && !ret && !err) {
2665                /*
2666                 * If we only end up zeroing part of a page, we still need to
2667                 * update the inode item, so that all the time fields are
2668                 * updated as well as the necessary btrfs inode in memory fields
2669                 * for detecting, at fsync time, if the inode isn't yet in the
2670                 * log tree or it's there but not up to date.
2671                 */
2672                trans = btrfs_start_transaction(root, 1);
2673                if (IS_ERR(trans)) {
2674                        err = PTR_ERR(trans);
2675                } else {
2676                        err = btrfs_update_inode(trans, root, inode);
2677                        ret = btrfs_end_transaction(trans);
2678                }
2679        }
2680        inode_unlock(inode);
2681        if (ret && !err)
2682                err = ret;
2683        return err;
2684}
2685
2686/* Helper structure to record which range is already reserved */
2687struct falloc_range {
2688        struct list_head list;
2689        u64 start;
2690        u64 len;
2691};
2692
2693/*
2694 * Helper function to add falloc range
2695 *
2696 * Caller should have locked the larger range of extent containing
2697 * [start, len)
2698 */
2699static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2700{
2701        struct falloc_range *prev = NULL;
2702        struct falloc_range *range = NULL;
2703
2704        if (list_empty(head))
2705                goto insert;
2706
2707        /*
2708         * As fallocate iterate by bytenr order, we only need to check
2709         * the last range.
2710         */
2711        prev = list_entry(head->prev, struct falloc_range, list);
2712        if (prev->start + prev->len == start) {
2713                prev->len += len;
2714                return 0;
2715        }
2716insert:
2717        range = kmalloc(sizeof(*range), GFP_KERNEL);
2718        if (!range)
2719                return -ENOMEM;
2720        range->start = start;
2721        range->len = len;
2722        list_add_tail(&range->list, head);
2723        return 0;
2724}
2725
2726static int btrfs_fallocate_update_isize(struct inode *inode,
2727                                        const u64 end,
2728                                        const int mode)
2729{
2730        struct btrfs_trans_handle *trans;
2731        struct btrfs_root *root = BTRFS_I(inode)->root;
2732        int ret;
2733        int ret2;
2734
2735        if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2736                return 0;
2737
2738        trans = btrfs_start_transaction(root, 1);
2739        if (IS_ERR(trans))
2740                return PTR_ERR(trans);
2741
2742        inode->i_ctime = current_time(inode);
2743        i_size_write(inode, end);
2744        btrfs_ordered_update_i_size(inode, end, NULL);
2745        ret = btrfs_update_inode(trans, root, inode);
2746        ret2 = btrfs_end_transaction(trans);
2747
2748        return ret ? ret : ret2;
2749}
2750
2751enum {
2752        RANGE_BOUNDARY_WRITTEN_EXTENT = 0,
2753        RANGE_BOUNDARY_PREALLOC_EXTENT = 1,
2754        RANGE_BOUNDARY_HOLE = 2,
2755};
2756
2757static int btrfs_zero_range_check_range_boundary(struct inode *inode,
2758                                                 u64 offset)
2759{
2760        const u64 sectorsize = btrfs_inode_sectorsize(inode);
2761        struct extent_map *em;
2762        int ret;
2763
2764        offset = round_down(offset, sectorsize);
2765        em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
2766        if (IS_ERR(em))
2767                return PTR_ERR(em);
2768
2769        if (em->block_start == EXTENT_MAP_HOLE)
2770                ret = RANGE_BOUNDARY_HOLE;
2771        else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2772                ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2773        else
2774                ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2775
2776        free_extent_map(em);
2777        return ret;
2778}
2779
2780static int btrfs_zero_range(struct inode *inode,
2781                            loff_t offset,
2782                            loff_t len,
2783                            const int mode)
2784{
2785        struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2786        struct extent_map *em;
2787        struct extent_changeset *data_reserved = NULL;
2788        int ret;
2789        u64 alloc_hint = 0;
2790        const u64 sectorsize = btrfs_inode_sectorsize(inode);
2791        u64 alloc_start = round_down(offset, sectorsize);
2792        u64 alloc_end = round_up(offset + len, sectorsize);
2793        u64 bytes_to_reserve = 0;
2794        bool space_reserved = false;
2795
2796        inode_dio_wait(inode);
2797
2798        em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2799                              alloc_start, alloc_end - alloc_start, 0);
2800        if (IS_ERR(em)) {
2801                ret = PTR_ERR(em);
2802                goto out;
2803        }
2804
2805        /*
2806         * Avoid hole punching and extent allocation for some cases. More cases
2807         * could be considered, but these are unlikely common and we keep things
2808         * as simple as possible for now. Also, intentionally, if the target
2809         * range contains one or more prealloc extents together with regular
2810         * extents and holes, we drop all the existing extents and allocate a
2811         * new prealloc extent, so that we get a larger contiguous disk extent.
2812         */
2813        if (em->start <= alloc_start &&
2814            test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
2815                const u64 em_end = em->start + em->len;
2816
2817                if (em_end >= offset + len) {
2818                        /*
2819                         * The whole range is already a prealloc extent,
2820                         * do nothing except updating the inode's i_size if
2821                         * needed.
2822                         */
2823                        free_extent_map(em);
2824                        ret = btrfs_fallocate_update_isize(inode, offset + len,
2825                                                           mode);
2826                        goto out;
2827                }
2828                /*
2829                 * Part of the range is already a prealloc extent, so operate
2830                 * only on the remaining part of the range.
2831                 */
2832                alloc_start = em_end;
2833                ASSERT(IS_ALIGNED(alloc_start, sectorsize));
2834                len = offset + len - alloc_start;
2835                offset = alloc_start;
2836                alloc_hint = em->block_start + em->len;
2837        }
2838        free_extent_map(em);
2839
2840        if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
2841            BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
2842                em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2843                                      alloc_start, sectorsize, 0);
2844                if (IS_ERR(em)) {
2845                        ret = PTR_ERR(em);
2846                        goto out;
2847                }
2848
2849                if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
2850                        free_extent_map(em);
2851                        ret = btrfs_fallocate_update_isize(inode, offset + len,
2852                                                           mode);
2853                        goto out;
2854                }
2855                if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
2856                        free_extent_map(em);
2857                        ret = btrfs_truncate_block(inode, offset, len, 0);
2858                        if (!ret)
2859                                ret = btrfs_fallocate_update_isize(inode,
2860                                                                   offset + len,
2861                                                                   mode);
2862                        return ret;
2863                }
2864                free_extent_map(em);
2865                alloc_start = round_down(offset, sectorsize);
2866                alloc_end = alloc_start + sectorsize;
2867                goto reserve_space;
2868        }
2869
2870        alloc_start = round_up(offset, sectorsize);
2871        alloc_end = round_down(offset + len, sectorsize);
2872
2873        /*
2874         * For unaligned ranges, check the pages at the boundaries, they might
2875         * map to an extent, in which case we need to partially zero them, or
2876         * they might map to a hole, in which case we need our allocation range
2877         * to cover them.
2878         */
2879        if (!IS_ALIGNED(offset, sectorsize)) {
2880                ret = btrfs_zero_range_check_range_boundary(inode, offset);
2881                if (ret < 0)
2882                        goto out;
2883                if (ret == RANGE_BOUNDARY_HOLE) {
2884                        alloc_start = round_down(offset, sectorsize);
2885                        ret = 0;
2886                } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2887                        ret = btrfs_truncate_block(inode, offset, 0, 0);
2888                        if (ret)
2889                                goto out;
2890                } else {
2891                        ret = 0;
2892                }
2893        }
2894
2895        if (!IS_ALIGNED(offset + len, sectorsize)) {
2896                ret = btrfs_zero_range_check_range_boundary(inode,
2897                                                            offset + len);
2898                if (ret < 0)
2899                        goto out;
2900                if (ret == RANGE_BOUNDARY_HOLE) {
2901                        alloc_end = round_up(offset + len, sectorsize);
2902                        ret = 0;
2903                } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2904                        ret = btrfs_truncate_block(inode, offset + len, 0, 1);
2905                        if (ret)
2906                                goto out;
2907                } else {
2908                        ret = 0;
2909                }
2910        }
2911
2912reserve_space:
2913        if (alloc_start < alloc_end) {
2914                struct extent_state *cached_state = NULL;
2915                const u64 lockstart = alloc_start;
2916                const u64 lockend = alloc_end - 1;
2917
2918                bytes_to_reserve = alloc_end - alloc_start;
2919                ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
2920                                                      bytes_to_reserve);
2921                if (ret < 0)
2922                        goto out;
2923                space_reserved = true;
2924                ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
2925                                                alloc_start, bytes_to_reserve);
2926                if (ret)
2927                        goto out;
2928                ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2929                                                  &cached_state);
2930                if (ret)
2931                        goto out;
2932                ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
2933                                                alloc_end - alloc_start,
2934                                                i_blocksize(inode),
2935                                                offset + len, &alloc_hint);
2936                unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2937                                     lockend, &cached_state);
2938                /* btrfs_prealloc_file_range releases reserved space on error */
2939                if (ret) {
2940                        space_reserved = false;
2941                        goto out;
2942                }
2943        }
2944        ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
2945 out:
2946        if (ret && space_reserved)
2947                btrfs_free_reserved_data_space(inode, data_reserved,
2948                                               alloc_start, bytes_to_reserve);
2949        extent_changeset_free(data_reserved);
2950
2951        return ret;
2952}
2953
2954static long btrfs_fallocate(struct file *file, int mode,
2955                            loff_t offset, loff_t len)
2956{
2957        struct inode *inode = file_inode(file);
2958        struct extent_state *cached_state = NULL;
2959        struct extent_changeset *data_reserved = NULL;
2960        struct falloc_range *range;
2961        struct falloc_range *tmp;
2962        struct list_head reserve_list;
2963        u64 cur_offset;
2964        u64 last_byte;
2965        u64 alloc_start;
2966        u64 alloc_end;
2967        u64 alloc_hint = 0;
2968        u64 locked_end;
2969        u64 actual_end = 0;
2970        struct extent_map *em;
2971        int blocksize = btrfs_inode_sectorsize(inode);
2972        int ret;
2973
2974        alloc_start = round_down(offset, blocksize);
2975        alloc_end = round_up(offset + len, blocksize);
2976        cur_offset = alloc_start;
2977
2978        /* Make sure we aren't being give some crap mode */
2979        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
2980                     FALLOC_FL_ZERO_RANGE))
2981                return -EOPNOTSUPP;
2982
2983        if (mode & FALLOC_FL_PUNCH_HOLE)
2984                return btrfs_punch_hole(inode, offset, len);
2985
2986        /*
2987         * Only trigger disk allocation, don't trigger qgroup reserve
2988         *
2989         * For qgroup space, it will be checked later.
2990         */
2991        if (!(mode & FALLOC_FL_ZERO_RANGE)) {
2992                ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
2993                                                      alloc_end - alloc_start);
2994                if (ret < 0)
2995                        return ret;
2996        }
2997
2998        inode_lock(inode);
2999
3000        if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3001                ret = inode_newsize_ok(inode, offset + len);
3002                if (ret)
3003                        goto out;
3004        }
3005
3006        /*
3007         * TODO: Move these two operations after we have checked
3008         * accurate reserved space, or fallocate can still fail but
3009         * with page truncated or size expanded.
3010         *
3011         * But that's a minor problem and won't do much harm BTW.
3012         */
3013        if (alloc_start > inode->i_size) {
3014                ret = btrfs_cont_expand(inode, i_size_read(inode),
3015                                        alloc_start);
3016                if (ret)
3017                        goto out;
3018        } else if (offset + len > inode->i_size) {
3019                /*
3020                 * If we are fallocating from the end of the file onward we
3021                 * need to zero out the end of the block if i_size lands in the
3022                 * middle of a block.
3023                 */
3024                ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
3025                if (ret)
3026                        goto out;
3027        }
3028
3029        /*
3030         * wait for ordered IO before we have any locks.  We'll loop again
3031         * below with the locks held.
3032         */
3033        ret = btrfs_wait_ordered_range(inode, alloc_start,
3034                                       alloc_end - alloc_start);
3035        if (ret)
3036                goto out;
3037
3038        if (mode & FALLOC_FL_ZERO_RANGE) {
3039                ret = btrfs_zero_range(inode, offset, len, mode);
3040                inode_unlock(inode);
3041                return ret;
3042        }
3043
3044        locked_end = alloc_end - 1;
3045        while (1) {
3046                struct btrfs_ordered_extent *ordered;
3047
3048                /* the extent lock is ordered inside the running
3049                 * transaction
3050                 */
3051                lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
3052                                 locked_end, &cached_state);
3053                ordered = btrfs_lookup_first_ordered_extent(inode, locked_end);
3054
3055                if (ordered &&
3056                    ordered->file_offset + ordered->len > alloc_start &&
3057                    ordered->file_offset < alloc_end) {
3058                        btrfs_put_ordered_extent(ordered);
3059                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
3060                                             alloc_start, locked_end,
3061                                             &cached_state);
3062                        /*
3063                         * we can't wait on the range with the transaction
3064                         * running or with the extent lock held
3065                         */
3066                        ret = btrfs_wait_ordered_range(inode, alloc_start,
3067                                                       alloc_end - alloc_start);
3068                        if (ret)
3069                                goto out;
3070                } else {
3071                        if (ordered)
3072                                btrfs_put_ordered_extent(ordered);
3073                        break;
3074                }
3075        }
3076
3077        /* First, check if we exceed the qgroup limit */
3078        INIT_LIST_HEAD(&reserve_list);
3079        while (cur_offset < alloc_end) {
3080                em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3081                                      alloc_end - cur_offset, 0);
3082                if (IS_ERR(em)) {
3083                        ret = PTR_ERR(em);
3084                        break;
3085                }
3086                last_byte = min(extent_map_end(em), alloc_end);
3087                actual_end = min_t(u64, extent_map_end(em), offset + len);
3088                last_byte = ALIGN(last_byte, blocksize);
3089                if (em->block_start == EXTENT_MAP_HOLE ||
3090                    (cur_offset >= inode->i_size &&
3091                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3092                        ret = add_falloc_range(&reserve_list, cur_offset,
3093                                               last_byte - cur_offset);
3094                        if (ret < 0) {
3095                                free_extent_map(em);
3096                                break;
3097                        }
3098                        ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
3099                                        cur_offset, last_byte - cur_offset);
3100                        if (ret < 0) {
3101                                free_extent_map(em);
3102                                break;
3103                        }
3104                } else {
3105                        /*
3106                         * Do not need to reserve unwritten extent for this
3107                         * range, free reserved data space first, otherwise
3108                         * it'll result in false ENOSPC error.
3109                         */
3110                        btrfs_free_reserved_data_space(inode, data_reserved,
3111                                        cur_offset, last_byte - cur_offset);
3112                }
3113                free_extent_map(em);
3114                cur_offset = last_byte;
3115        }
3116
3117        /*
3118         * If ret is still 0, means we're OK to fallocate.
3119         * Or just cleanup the list and exit.
3120         */
3121        list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3122                if (!ret)
3123                        ret = btrfs_prealloc_file_range(inode, mode,
3124                                        range->start,
3125                                        range->len, i_blocksize(inode),
3126                                        offset + len, &alloc_hint);
3127                else
3128                        btrfs_free_reserved_data_space(inode,
3129                                        data_reserved, range->start,
3130                                        range->len);
3131                list_del(&range->list);
3132                kfree(range);
3133        }
3134        if (ret < 0)
3135                goto out_unlock;
3136
3137        /*
3138         * We didn't need to allocate any more space, but we still extended the
3139         * size of the file so we need to update i_size and the inode item.
3140         */
3141        ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3142out_unlock:
3143        unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3144                             &cached_state);
3145out:
3146        inode_unlock(inode);
3147        /* Let go of our reservation. */
3148        if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
3149                btrfs_free_reserved_data_space(inode, data_reserved,
3150                                alloc_start, alloc_end - cur_offset);
3151        extent_changeset_free(data_reserved);
3152        return ret;
3153}
3154
3155static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
3156{
3157        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3158        struct extent_map *em = NULL;
3159        struct extent_state *cached_state = NULL;
3160        u64 lockstart;
3161        u64 lockend;
3162        u64 start;
3163        u64 len;
3164        int ret = 0;
3165
3166        if (inode->i_size == 0)
3167                return -ENXIO;
3168
3169        /*
3170         * *offset can be negative, in this case we start finding DATA/HOLE from
3171         * the very start of the file.
3172         */
3173        start = max_t(loff_t, 0, *offset);
3174
3175        lockstart = round_down(start, fs_info->sectorsize);
3176        lockend = round_up(i_size_read(inode),
3177                           fs_info->sectorsize);
3178        if (lockend <= lockstart)
3179                lockend = lockstart + fs_info->sectorsize;
3180        lockend--;
3181        len = lockend - lockstart + 1;
3182
3183        lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3184                         &cached_state);
3185
3186        while (start < inode->i_size) {
3187                em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0,
3188                                start, len, 0);
3189                if (IS_ERR(em)) {
3190                        ret = PTR_ERR(em);
3191                        em = NULL;
3192                        break;
3193                }
3194
3195                if (whence == SEEK_HOLE &&
3196                    (em->block_start == EXTENT_MAP_HOLE ||
3197                     test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3198                        break;
3199                else if (whence == SEEK_DATA &&
3200                           (em->block_start != EXTENT_MAP_HOLE &&
3201                            !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3202                        break;
3203
3204                start = em->start + em->len;
3205                free_extent_map(em);
3206                em = NULL;
3207                cond_resched();
3208        }
3209        free_extent_map(em);
3210        if (!ret) {
3211                if (whence == SEEK_DATA && start >= inode->i_size)
3212                        ret = -ENXIO;
3213                else
3214                        *offset = min_t(loff_t, start, inode->i_size);
3215        }
3216        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3217                             &cached_state);
3218        return ret;
3219}
3220
3221static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3222{
3223        struct inode *inode = file->f_mapping->host;
3224        int ret;
3225
3226        inode_lock(inode);
3227        switch (whence) {
3228        case SEEK_END:
3229        case SEEK_CUR:
3230                offset = generic_file_llseek(file, offset, whence);
3231                goto out;
3232        case SEEK_DATA:
3233        case SEEK_HOLE:
3234                if (offset >= i_size_read(inode)) {
3235                        inode_unlock(inode);
3236                        return -ENXIO;
3237                }
3238
3239                ret = find_desired_extent(inode, &offset, whence);
3240                if (ret) {
3241                        inode_unlock(inode);
3242                        return ret;
3243                }
3244        }
3245
3246        offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3247out:
3248        inode_unlock(inode);
3249        return offset;
3250}
3251
3252static int btrfs_file_open(struct inode *inode, struct file *filp)
3253{
3254        filp->f_mode |= FMODE_NOWAIT;
3255        return generic_file_open(inode, filp);
3256}
3257
3258const struct file_operations btrfs_file_operations = {
3259        .llseek         = btrfs_file_llseek,
3260        .read_iter      = generic_file_read_iter,
3261        .splice_read    = generic_file_splice_read,
3262        .write_iter     = btrfs_file_write_iter,
3263        .mmap           = btrfs_file_mmap,
3264        .open           = btrfs_file_open,
3265        .release        = btrfs_release_file,
3266        .fsync          = btrfs_sync_file,
3267        .fallocate      = btrfs_fallocate,
3268        .unlocked_ioctl = btrfs_ioctl,
3269#ifdef CONFIG_COMPAT
3270        .compat_ioctl   = btrfs_compat_ioctl,
3271#endif
3272        .clone_file_range = btrfs_clone_file_range,
3273        .dedupe_file_range = btrfs_dedupe_file_range,
3274};
3275
3276void __cold btrfs_auto_defrag_exit(void)
3277{
3278        kmem_cache_destroy(btrfs_inode_defrag_cachep);
3279}
3280
3281int __init btrfs_auto_defrag_init(void)
3282{
3283        btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
3284                                        sizeof(struct inode_defrag), 0,
3285                                        SLAB_MEM_SPREAD,
3286                                        NULL);
3287        if (!btrfs_inode_defrag_cachep)
3288                return -ENOMEM;
3289
3290        return 0;
3291}
3292
3293int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3294{
3295        int ret;
3296
3297        /*
3298         * So with compression we will find and lock a dirty page and clear the
3299         * first one as dirty, setup an async extent, and immediately return
3300         * with the entire range locked but with nobody actually marked with
3301         * writeback.  So we can't just filemap_write_and_wait_range() and
3302         * expect it to work since it will just kick off a thread to do the
3303         * actual work.  So we need to call filemap_fdatawrite_range _again_
3304         * since it will wait on the page lock, which won't be unlocked until
3305         * after the pages have been marked as writeback and so we're good to go
3306         * from there.  We have to do this otherwise we'll miss the ordered
3307         * extents and that results in badness.  Please Josef, do not think you
3308         * know better and pull this out at some point in the future, it is
3309         * right and you are wrong.
3310         */
3311        ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3312        if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3313                             &BTRFS_I(inode)->runtime_flags))
3314                ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3315
3316        return ret;
3317}
3318