linux/fs/btrfs/file.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
   8#include <linux/time.h>
   9#include <linux/init.h>
  10#include <linux/string.h>
  11#include <linux/backing-dev.h>
  12#include <linux/falloc.h>
  13#include <linux/writeback.h>
  14#include <linux/compat.h>
  15#include <linux/slab.h>
  16#include <linux/btrfs.h>
  17#include <linux/uio.h>
  18#include <linux/iversion.h>
  19#include "ctree.h"
  20#include "disk-io.h"
  21#include "transaction.h"
  22#include "btrfs_inode.h"
  23#include "print-tree.h"
  24#include "tree-log.h"
  25#include "locking.h"
  26#include "volumes.h"
  27#include "qgroup.h"
  28#include "compression.h"
  29#include "delalloc-space.h"
  30#include "reflink.h"
  31
  32static struct kmem_cache *btrfs_inode_defrag_cachep;
  33/*
  34 * when auto defrag is enabled we
  35 * queue up these defrag structs to remember which
  36 * inodes need defragging passes
  37 */
  38struct inode_defrag {
  39        struct rb_node rb_node;
  40        /* objectid */
  41        u64 ino;
  42        /*
  43         * transid where the defrag was added, we search for
  44         * extents newer than this
  45         */
  46        u64 transid;
  47
  48        /* root objectid */
  49        u64 root;
  50
  51        /* last offset we were able to defrag */
  52        u64 last_offset;
  53
  54        /* if we've wrapped around back to zero once already */
  55        int cycled;
  56};
  57
  58static int __compare_inode_defrag(struct inode_defrag *defrag1,
  59                                  struct inode_defrag *defrag2)
  60{
  61        if (defrag1->root > defrag2->root)
  62                return 1;
  63        else if (defrag1->root < defrag2->root)
  64                return -1;
  65        else if (defrag1->ino > defrag2->ino)
  66                return 1;
  67        else if (defrag1->ino < defrag2->ino)
  68                return -1;
  69        else
  70                return 0;
  71}
  72
  73/* pop a record for an inode into the defrag tree.  The lock
  74 * must be held already
  75 *
  76 * If you're inserting a record for an older transid than an
  77 * existing record, the transid already in the tree is lowered
  78 *
  79 * If an existing record is found the defrag item you
  80 * pass in is freed
  81 */
  82static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
  83                                    struct inode_defrag *defrag)
  84{
  85        struct btrfs_fs_info *fs_info = inode->root->fs_info;
  86        struct inode_defrag *entry;
  87        struct rb_node **p;
  88        struct rb_node *parent = NULL;
  89        int ret;
  90
  91        p = &fs_info->defrag_inodes.rb_node;
  92        while (*p) {
  93                parent = *p;
  94                entry = rb_entry(parent, struct inode_defrag, rb_node);
  95
  96                ret = __compare_inode_defrag(defrag, entry);
  97                if (ret < 0)
  98                        p = &parent->rb_left;
  99                else if (ret > 0)
 100                        p = &parent->rb_right;
 101                else {
 102                        /* if we're reinserting an entry for
 103                         * an old defrag run, make sure to
 104                         * lower the transid of our existing record
 105                         */
 106                        if (defrag->transid < entry->transid)
 107                                entry->transid = defrag->transid;
 108                        if (defrag->last_offset > entry->last_offset)
 109                                entry->last_offset = defrag->last_offset;
 110                        return -EEXIST;
 111                }
 112        }
 113        set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
 114        rb_link_node(&defrag->rb_node, parent, p);
 115        rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
 116        return 0;
 117}
 118
 119static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
 120{
 121        if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
 122                return 0;
 123
 124        if (btrfs_fs_closing(fs_info))
 125                return 0;
 126
 127        return 1;
 128}
 129
 130/*
 131 * insert a defrag record for this inode if auto defrag is
 132 * enabled
 133 */
 134int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 135                           struct btrfs_inode *inode)
 136{
 137        struct btrfs_root *root = inode->root;
 138        struct btrfs_fs_info *fs_info = root->fs_info;
 139        struct inode_defrag *defrag;
 140        u64 transid;
 141        int ret;
 142
 143        if (!__need_auto_defrag(fs_info))
 144                return 0;
 145
 146        if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
 147                return 0;
 148
 149        if (trans)
 150                transid = trans->transid;
 151        else
 152                transid = inode->root->last_trans;
 153
 154        defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
 155        if (!defrag)
 156                return -ENOMEM;
 157
 158        defrag->ino = btrfs_ino(inode);
 159        defrag->transid = transid;
 160        defrag->root = root->root_key.objectid;
 161
 162        spin_lock(&fs_info->defrag_inodes_lock);
 163        if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
 164                /*
 165                 * If we set IN_DEFRAG flag and evict the inode from memory,
 166                 * and then re-read this inode, this new inode doesn't have
 167                 * IN_DEFRAG flag. At the case, we may find the existed defrag.
 168                 */
 169                ret = __btrfs_add_inode_defrag(inode, defrag);
 170                if (ret)
 171                        kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 172        } else {
 173                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 174        }
 175        spin_unlock(&fs_info->defrag_inodes_lock);
 176        return 0;
 177}
 178
 179/*
 180 * Requeue the defrag object. If there is a defrag object that points to
 181 * the same inode in the tree, we will merge them together (by
 182 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
 183 */
 184static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
 185                                       struct inode_defrag *defrag)
 186{
 187        struct btrfs_fs_info *fs_info = inode->root->fs_info;
 188        int ret;
 189
 190        if (!__need_auto_defrag(fs_info))
 191                goto out;
 192
 193        /*
 194         * Here we don't check the IN_DEFRAG flag, because we need merge
 195         * them together.
 196         */
 197        spin_lock(&fs_info->defrag_inodes_lock);
 198        ret = __btrfs_add_inode_defrag(inode, defrag);
 199        spin_unlock(&fs_info->defrag_inodes_lock);
 200        if (ret)
 201                goto out;
 202        return;
 203out:
 204        kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 205}
 206
 207/*
 208 * pick the defragable inode that we want, if it doesn't exist, we will get
 209 * the next one.
 210 */
 211static struct inode_defrag *
 212btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
 213{
 214        struct inode_defrag *entry = NULL;
 215        struct inode_defrag tmp;
 216        struct rb_node *p;
 217        struct rb_node *parent = NULL;
 218        int ret;
 219
 220        tmp.ino = ino;
 221        tmp.root = root;
 222
 223        spin_lock(&fs_info->defrag_inodes_lock);
 224        p = fs_info->defrag_inodes.rb_node;
 225        while (p) {
 226                parent = p;
 227                entry = rb_entry(parent, struct inode_defrag, rb_node);
 228
 229                ret = __compare_inode_defrag(&tmp, entry);
 230                if (ret < 0)
 231                        p = parent->rb_left;
 232                else if (ret > 0)
 233                        p = parent->rb_right;
 234                else
 235                        goto out;
 236        }
 237
 238        if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
 239                parent = rb_next(parent);
 240                if (parent)
 241                        entry = rb_entry(parent, struct inode_defrag, rb_node);
 242                else
 243                        entry = NULL;
 244        }
 245out:
 246        if (entry)
 247                rb_erase(parent, &fs_info->defrag_inodes);
 248        spin_unlock(&fs_info->defrag_inodes_lock);
 249        return entry;
 250}
 251
 252void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
 253{
 254        struct inode_defrag *defrag;
 255        struct rb_node *node;
 256
 257        spin_lock(&fs_info->defrag_inodes_lock);
 258        node = rb_first(&fs_info->defrag_inodes);
 259        while (node) {
 260                rb_erase(node, &fs_info->defrag_inodes);
 261                defrag = rb_entry(node, struct inode_defrag, rb_node);
 262                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 263
 264                cond_resched_lock(&fs_info->defrag_inodes_lock);
 265
 266                node = rb_first(&fs_info->defrag_inodes);
 267        }
 268        spin_unlock(&fs_info->defrag_inodes_lock);
 269}
 270
 271#define BTRFS_DEFRAG_BATCH      1024
 272
 273static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
 274                                    struct inode_defrag *defrag)
 275{
 276        struct btrfs_root *inode_root;
 277        struct inode *inode;
 278        struct btrfs_key key;
 279        struct btrfs_ioctl_defrag_range_args range;
 280        int num_defrag;
 281        int ret;
 282
 283        /* get the inode */
 284        key.objectid = defrag->root;
 285        key.type = BTRFS_ROOT_ITEM_KEY;
 286        key.offset = (u64)-1;
 287
 288        inode_root = btrfs_get_fs_root(fs_info, &key, true);
 289        if (IS_ERR(inode_root)) {
 290                ret = PTR_ERR(inode_root);
 291                goto cleanup;
 292        }
 293
 294        key.objectid = defrag->ino;
 295        key.type = BTRFS_INODE_ITEM_KEY;
 296        key.offset = 0;
 297        inode = btrfs_iget(fs_info->sb, &key, inode_root);
 298        btrfs_put_root(inode_root);
 299        if (IS_ERR(inode)) {
 300                ret = PTR_ERR(inode);
 301                goto cleanup;
 302        }
 303
 304        /* do a chunk of defrag */
 305        clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 306        memset(&range, 0, sizeof(range));
 307        range.len = (u64)-1;
 308        range.start = defrag->last_offset;
 309
 310        sb_start_write(fs_info->sb);
 311        num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
 312                                       BTRFS_DEFRAG_BATCH);
 313        sb_end_write(fs_info->sb);
 314        /*
 315         * if we filled the whole defrag batch, there
 316         * must be more work to do.  Queue this defrag
 317         * again
 318         */
 319        if (num_defrag == BTRFS_DEFRAG_BATCH) {
 320                defrag->last_offset = range.start;
 321                btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 322        } else if (defrag->last_offset && !defrag->cycled) {
 323                /*
 324                 * we didn't fill our defrag batch, but
 325                 * we didn't start at zero.  Make sure we loop
 326                 * around to the start of the file.
 327                 */
 328                defrag->last_offset = 0;
 329                defrag->cycled = 1;
 330                btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
 331        } else {
 332                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 333        }
 334
 335        iput(inode);
 336        return 0;
 337cleanup:
 338        kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 339        return ret;
 340}
 341
 342/*
 343 * run through the list of inodes in the FS that need
 344 * defragging
 345 */
 346int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 347{
 348        struct inode_defrag *defrag;
 349        u64 first_ino = 0;
 350        u64 root_objectid = 0;
 351
 352        atomic_inc(&fs_info->defrag_running);
 353        while (1) {
 354                /* Pause the auto defragger. */
 355                if (test_bit(BTRFS_FS_STATE_REMOUNTING,
 356                             &fs_info->fs_state))
 357                        break;
 358
 359                if (!__need_auto_defrag(fs_info))
 360                        break;
 361
 362                /* find an inode to defrag */
 363                defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
 364                                                 first_ino);
 365                if (!defrag) {
 366                        if (root_objectid || first_ino) {
 367                                root_objectid = 0;
 368                                first_ino = 0;
 369                                continue;
 370                        } else {
 371                                break;
 372                        }
 373                }
 374
 375                first_ino = defrag->ino + 1;
 376                root_objectid = defrag->root;
 377
 378                __btrfs_run_defrag_inode(fs_info, defrag);
 379        }
 380        atomic_dec(&fs_info->defrag_running);
 381
 382        /*
 383         * during unmount, we use the transaction_wait queue to
 384         * wait for the defragger to stop
 385         */
 386        wake_up(&fs_info->transaction_wait);
 387        return 0;
 388}
 389
 390/* simple helper to fault in pages and copy.  This should go away
 391 * and be replaced with calls into generic code.
 392 */
 393static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
 394                                         struct page **prepared_pages,
 395                                         struct iov_iter *i)
 396{
 397        size_t copied = 0;
 398        size_t total_copied = 0;
 399        int pg = 0;
 400        int offset = offset_in_page(pos);
 401
 402        while (write_bytes > 0) {
 403                size_t count = min_t(size_t,
 404                                     PAGE_SIZE - offset, write_bytes);
 405                struct page *page = prepared_pages[pg];
 406                /*
 407                 * Copy data from userspace to the current page
 408                 */
 409                copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
 410
 411                /* Flush processor's dcache for this page */
 412                flush_dcache_page(page);
 413
 414                /*
 415                 * if we get a partial write, we can end up with
 416                 * partially up to date pages.  These add
 417                 * a lot of complexity, so make sure they don't
 418                 * happen by forcing this copy to be retried.
 419                 *
 420                 * The rest of the btrfs_file_write code will fall
 421                 * back to page at a time copies after we return 0.
 422                 */
 423                if (!PageUptodate(page) && copied < count)
 424                        copied = 0;
 425
 426                iov_iter_advance(i, copied);
 427                write_bytes -= copied;
 428                total_copied += copied;
 429
 430                /* Return to btrfs_file_write_iter to fault page */
 431                if (unlikely(copied == 0))
 432                        break;
 433
 434                if (copied < PAGE_SIZE - offset) {
 435                        offset += copied;
 436                } else {
 437                        pg++;
 438                        offset = 0;
 439                }
 440        }
 441        return total_copied;
 442}
 443
 444/*
 445 * unlocks pages after btrfs_file_write is done with them
 446 */
 447static void btrfs_drop_pages(struct page **pages, size_t num_pages)
 448{
 449        size_t i;
 450        for (i = 0; i < num_pages; i++) {
 451                /* page checked is some magic around finding pages that
 452                 * have been modified without going through btrfs_set_page_dirty
 453                 * clear it here. There should be no need to mark the pages
 454                 * accessed as prepare_pages should have marked them accessed
 455                 * in prepare_pages via find_or_create_page()
 456                 */
 457                ClearPageChecked(pages[i]);
 458                unlock_page(pages[i]);
 459                put_page(pages[i]);
 460        }
 461}
 462
 463static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
 464                                         const u64 start,
 465                                         const u64 len,
 466                                         struct extent_state **cached_state)
 467{
 468        u64 search_start = start;
 469        const u64 end = start + len - 1;
 470
 471        while (search_start < end) {
 472                const u64 search_len = end - search_start + 1;
 473                struct extent_map *em;
 474                u64 em_len;
 475                int ret = 0;
 476
 477                em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
 478                if (IS_ERR(em))
 479                        return PTR_ERR(em);
 480
 481                if (em->block_start != EXTENT_MAP_HOLE)
 482                        goto next;
 483
 484                em_len = em->len;
 485                if (em->start < search_start)
 486                        em_len -= search_start - em->start;
 487                if (em_len > search_len)
 488                        em_len = search_len;
 489
 490                ret = set_extent_bit(&inode->io_tree, search_start,
 491                                     search_start + em_len - 1,
 492                                     EXTENT_DELALLOC_NEW,
 493                                     NULL, cached_state, GFP_NOFS);
 494next:
 495                search_start = extent_map_end(em);
 496                free_extent_map(em);
 497                if (ret)
 498                        return ret;
 499        }
 500        return 0;
 501}
 502
 503/*
 504 * after copy_from_user, pages need to be dirtied and we need to make
 505 * sure holes are created between the current EOF and the start of
 506 * any next extents (if required).
 507 *
 508 * this also makes the decision about creating an inline extent vs
 509 * doing real data extents, marking pages dirty and delalloc as required.
 510 */
 511int btrfs_dirty_pages(struct inode *inode, struct page **pages,
 512                      size_t num_pages, loff_t pos, size_t write_bytes,
 513                      struct extent_state **cached)
 514{
 515        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 516        int err = 0;
 517        int i;
 518        u64 num_bytes;
 519        u64 start_pos;
 520        u64 end_of_last_block;
 521        u64 end_pos = pos + write_bytes;
 522        loff_t isize = i_size_read(inode);
 523        unsigned int extra_bits = 0;
 524
 525        start_pos = pos & ~((u64) fs_info->sectorsize - 1);
 526        num_bytes = round_up(write_bytes + pos - start_pos,
 527                             fs_info->sectorsize);
 528
 529        end_of_last_block = start_pos + num_bytes - 1;
 530
 531        /*
 532         * The pages may have already been dirty, clear out old accounting so
 533         * we can set things up properly
 534         */
 535        clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, end_of_last_block,
 536                         EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
 537                         0, 0, cached);
 538
 539        if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
 540                if (start_pos >= isize &&
 541                    !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
 542                        /*
 543                         * There can't be any extents following eof in this case
 544                         * so just set the delalloc new bit for the range
 545                         * directly.
 546                         */
 547                        extra_bits |= EXTENT_DELALLOC_NEW;
 548                } else {
 549                        err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
 550                                                            start_pos,
 551                                                            num_bytes, cached);
 552                        if (err)
 553                                return err;
 554                }
 555        }
 556
 557        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
 558                                        extra_bits, cached);
 559        if (err)
 560                return err;
 561
 562        for (i = 0; i < num_pages; i++) {
 563                struct page *p = pages[i];
 564                SetPageUptodate(p);
 565                ClearPageChecked(p);
 566                set_page_dirty(p);
 567        }
 568
 569        /*
 570         * we've only changed i_size in ram, and we haven't updated
 571         * the disk i_size.  There is no need to log the inode
 572         * at this time.
 573         */
 574        if (end_pos > isize)
 575                i_size_write(inode, end_pos);
 576        return 0;
 577}
 578
 579/*
 580 * this drops all the extents in the cache that intersect the range
 581 * [start, end].  Existing extents are split as required.
 582 */
 583void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
 584                             int skip_pinned)
 585{
 586        struct extent_map *em;
 587        struct extent_map *split = NULL;
 588        struct extent_map *split2 = NULL;
 589        struct extent_map_tree *em_tree = &inode->extent_tree;
 590        u64 len = end - start + 1;
 591        u64 gen;
 592        int ret;
 593        int testend = 1;
 594        unsigned long flags;
 595        int compressed = 0;
 596        bool modified;
 597
 598        WARN_ON(end < start);
 599        if (end == (u64)-1) {
 600                len = (u64)-1;
 601                testend = 0;
 602        }
 603        while (1) {
 604                int no_splits = 0;
 605
 606                modified = false;
 607                if (!split)
 608                        split = alloc_extent_map();
 609                if (!split2)
 610                        split2 = alloc_extent_map();
 611                if (!split || !split2)
 612                        no_splits = 1;
 613
 614                write_lock(&em_tree->lock);
 615                em = lookup_extent_mapping(em_tree, start, len);
 616                if (!em) {
 617                        write_unlock(&em_tree->lock);
 618                        break;
 619                }
 620                flags = em->flags;
 621                gen = em->generation;
 622                if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
 623                        if (testend && em->start + em->len >= start + len) {
 624                                free_extent_map(em);
 625                                write_unlock(&em_tree->lock);
 626                                break;
 627                        }
 628                        start = em->start + em->len;
 629                        if (testend)
 630                                len = start + len - (em->start + em->len);
 631                        free_extent_map(em);
 632                        write_unlock(&em_tree->lock);
 633                        continue;
 634                }
 635                compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 636                clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 637                clear_bit(EXTENT_FLAG_LOGGING, &flags);
 638                modified = !list_empty(&em->list);
 639                if (no_splits)
 640                        goto next;
 641
 642                if (em->start < start) {
 643                        split->start = em->start;
 644                        split->len = start - em->start;
 645
 646                        if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 647                                split->orig_start = em->orig_start;
 648                                split->block_start = em->block_start;
 649
 650                                if (compressed)
 651                                        split->block_len = em->block_len;
 652                                else
 653                                        split->block_len = split->len;
 654                                split->orig_block_len = max(split->block_len,
 655                                                em->orig_block_len);
 656                                split->ram_bytes = em->ram_bytes;
 657                        } else {
 658                                split->orig_start = split->start;
 659                                split->block_len = 0;
 660                                split->block_start = em->block_start;
 661                                split->orig_block_len = 0;
 662                                split->ram_bytes = split->len;
 663                        }
 664
 665                        split->generation = gen;
 666                        split->flags = flags;
 667                        split->compress_type = em->compress_type;
 668                        replace_extent_mapping(em_tree, em, split, modified);
 669                        free_extent_map(split);
 670                        split = split2;
 671                        split2 = NULL;
 672                }
 673                if (testend && em->start + em->len > start + len) {
 674                        u64 diff = start + len - em->start;
 675
 676                        split->start = start + len;
 677                        split->len = em->start + em->len - (start + len);
 678                        split->flags = flags;
 679                        split->compress_type = em->compress_type;
 680                        split->generation = gen;
 681
 682                        if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 683                                split->orig_block_len = max(em->block_len,
 684                                                    em->orig_block_len);
 685
 686                                split->ram_bytes = em->ram_bytes;
 687                                if (compressed) {
 688                                        split->block_len = em->block_len;
 689                                        split->block_start = em->block_start;
 690                                        split->orig_start = em->orig_start;
 691                                } else {
 692                                        split->block_len = split->len;
 693                                        split->block_start = em->block_start
 694                                                + diff;
 695                                        split->orig_start = em->orig_start;
 696                                }
 697                        } else {
 698                                split->ram_bytes = split->len;
 699                                split->orig_start = split->start;
 700                                split->block_len = 0;
 701                                split->block_start = em->block_start;
 702                                split->orig_block_len = 0;
 703                        }
 704
 705                        if (extent_map_in_tree(em)) {
 706                                replace_extent_mapping(em_tree, em, split,
 707                                                       modified);
 708                        } else {
 709                                ret = add_extent_mapping(em_tree, split,
 710                                                         modified);
 711                                ASSERT(ret == 0); /* Logic error */
 712                        }
 713                        free_extent_map(split);
 714                        split = NULL;
 715                }
 716next:
 717                if (extent_map_in_tree(em))
 718                        remove_extent_mapping(em_tree, em);
 719                write_unlock(&em_tree->lock);
 720
 721                /* once for us */
 722                free_extent_map(em);
 723                /* once for the tree*/
 724                free_extent_map(em);
 725        }
 726        if (split)
 727                free_extent_map(split);
 728        if (split2)
 729                free_extent_map(split2);
 730}
 731
 732/*
 733 * this is very complex, but the basic idea is to drop all extents
 734 * in the range start - end.  hint_block is filled in with a block number
 735 * that would be a good hint to the block allocator for this file.
 736 *
 737 * If an extent intersects the range but is not entirely inside the range
 738 * it is either truncated or split.  Anything entirely inside the range
 739 * is deleted from the tree.
 740 */
 741int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 742                         struct btrfs_root *root, struct inode *inode,
 743                         struct btrfs_path *path, u64 start, u64 end,
 744                         u64 *drop_end, int drop_cache,
 745                         int replace_extent,
 746                         u32 extent_item_size,
 747                         int *key_inserted)
 748{
 749        struct btrfs_fs_info *fs_info = root->fs_info;
 750        struct extent_buffer *leaf;
 751        struct btrfs_file_extent_item *fi;
 752        struct btrfs_ref ref = { 0 };
 753        struct btrfs_key key;
 754        struct btrfs_key new_key;
 755        u64 ino = btrfs_ino(BTRFS_I(inode));
 756        u64 search_start = start;
 757        u64 disk_bytenr = 0;
 758        u64 num_bytes = 0;
 759        u64 extent_offset = 0;
 760        u64 extent_end = 0;
 761        u64 last_end = start;
 762        int del_nr = 0;
 763        int del_slot = 0;
 764        int extent_type;
 765        int recow;
 766        int ret;
 767        int modify_tree = -1;
 768        int update_refs;
 769        int found = 0;
 770        int leafs_visited = 0;
 771
 772        if (drop_cache)
 773                btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0);
 774
 775        if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
 776                modify_tree = 0;
 777
 778        update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
 779                       root == fs_info->tree_root);
 780        while (1) {
 781                recow = 0;
 782                ret = btrfs_lookup_file_extent(trans, root, path, ino,
 783                                               search_start, modify_tree);
 784                if (ret < 0)
 785                        break;
 786                if (ret > 0 && path->slots[0] > 0 && search_start == start) {
 787                        leaf = path->nodes[0];
 788                        btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
 789                        if (key.objectid == ino &&
 790                            key.type == BTRFS_EXTENT_DATA_KEY)
 791                                path->slots[0]--;
 792                }
 793                ret = 0;
 794                leafs_visited++;
 795next_slot:
 796                leaf = path->nodes[0];
 797                if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 798                        BUG_ON(del_nr > 0);
 799                        ret = btrfs_next_leaf(root, path);
 800                        if (ret < 0)
 801                                break;
 802                        if (ret > 0) {
 803                                ret = 0;
 804                                break;
 805                        }
 806                        leafs_visited++;
 807                        leaf = path->nodes[0];
 808                        recow = 1;
 809                }
 810
 811                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 812
 813                if (key.objectid > ino)
 814                        break;
 815                if (WARN_ON_ONCE(key.objectid < ino) ||
 816                    key.type < BTRFS_EXTENT_DATA_KEY) {
 817                        ASSERT(del_nr == 0);
 818                        path->slots[0]++;
 819                        goto next_slot;
 820                }
 821                if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
 822                        break;
 823
 824                fi = btrfs_item_ptr(leaf, path->slots[0],
 825                                    struct btrfs_file_extent_item);
 826                extent_type = btrfs_file_extent_type(leaf, fi);
 827
 828                if (extent_type == BTRFS_FILE_EXTENT_REG ||
 829                    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 830                        disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 831                        num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 832                        extent_offset = btrfs_file_extent_offset(leaf, fi);
 833                        extent_end = key.offset +
 834                                btrfs_file_extent_num_bytes(leaf, fi);
 835                } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 836                        extent_end = key.offset +
 837                                btrfs_file_extent_ram_bytes(leaf, fi);
 838                } else {
 839                        /* can't happen */
 840                        BUG();
 841                }
 842
 843                /*
 844                 * Don't skip extent items representing 0 byte lengths. They
 845                 * used to be created (bug) if while punching holes we hit
 846                 * -ENOSPC condition. So if we find one here, just ensure we
 847                 * delete it, otherwise we would insert a new file extent item
 848                 * with the same key (offset) as that 0 bytes length file
 849                 * extent item in the call to setup_items_for_insert() later
 850                 * in this function.
 851                 */
 852                if (extent_end == key.offset && extent_end >= search_start) {
 853                        last_end = extent_end;
 854                        goto delete_extent_item;
 855                }
 856
 857                if (extent_end <= search_start) {
 858                        path->slots[0]++;
 859                        goto next_slot;
 860                }
 861
 862                found = 1;
 863                search_start = max(key.offset, start);
 864                if (recow || !modify_tree) {
 865                        modify_tree = -1;
 866                        btrfs_release_path(path);
 867                        continue;
 868                }
 869
 870                /*
 871                 *     | - range to drop - |
 872                 *  | -------- extent -------- |
 873                 */
 874                if (start > key.offset && end < extent_end) {
 875                        BUG_ON(del_nr > 0);
 876                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 877                                ret = -EOPNOTSUPP;
 878                                break;
 879                        }
 880
 881                        memcpy(&new_key, &key, sizeof(new_key));
 882                        new_key.offset = start;
 883                        ret = btrfs_duplicate_item(trans, root, path,
 884                                                   &new_key);
 885                        if (ret == -EAGAIN) {
 886                                btrfs_release_path(path);
 887                                continue;
 888                        }
 889                        if (ret < 0)
 890                                break;
 891
 892                        leaf = path->nodes[0];
 893                        fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 894                                            struct btrfs_file_extent_item);
 895                        btrfs_set_file_extent_num_bytes(leaf, fi,
 896                                                        start - key.offset);
 897
 898                        fi = btrfs_item_ptr(leaf, path->slots[0],
 899                                            struct btrfs_file_extent_item);
 900
 901                        extent_offset += start - key.offset;
 902                        btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 903                        btrfs_set_file_extent_num_bytes(leaf, fi,
 904                                                        extent_end - start);
 905                        btrfs_mark_buffer_dirty(leaf);
 906
 907                        if (update_refs && disk_bytenr > 0) {
 908                                btrfs_init_generic_ref(&ref,
 909                                                BTRFS_ADD_DELAYED_REF,
 910                                                disk_bytenr, num_bytes, 0);
 911                                btrfs_init_data_ref(&ref,
 912                                                root->root_key.objectid,
 913                                                new_key.objectid,
 914                                                start - extent_offset);
 915                                ret = btrfs_inc_extent_ref(trans, &ref);
 916                                BUG_ON(ret); /* -ENOMEM */
 917                        }
 918                        key.offset = start;
 919                }
 920                /*
 921                 * From here on out we will have actually dropped something, so
 922                 * last_end can be updated.
 923                 */
 924                last_end = extent_end;
 925
 926                /*
 927                 *  | ---- range to drop ----- |
 928                 *      | -------- extent -------- |
 929                 */
 930                if (start <= key.offset && end < extent_end) {
 931                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 932                                ret = -EOPNOTSUPP;
 933                                break;
 934                        }
 935
 936                        memcpy(&new_key, &key, sizeof(new_key));
 937                        new_key.offset = end;
 938                        btrfs_set_item_key_safe(fs_info, path, &new_key);
 939
 940                        extent_offset += end - key.offset;
 941                        btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 942                        btrfs_set_file_extent_num_bytes(leaf, fi,
 943                                                        extent_end - end);
 944                        btrfs_mark_buffer_dirty(leaf);
 945                        if (update_refs && disk_bytenr > 0)
 946                                inode_sub_bytes(inode, end - key.offset);
 947                        break;
 948                }
 949
 950                search_start = extent_end;
 951                /*
 952                 *       | ---- range to drop ----- |
 953                 *  | -------- extent -------- |
 954                 */
 955                if (start > key.offset && end >= extent_end) {
 956                        BUG_ON(del_nr > 0);
 957                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 958                                ret = -EOPNOTSUPP;
 959                                break;
 960                        }
 961
 962                        btrfs_set_file_extent_num_bytes(leaf, fi,
 963                                                        start - key.offset);
 964                        btrfs_mark_buffer_dirty(leaf);
 965                        if (update_refs && disk_bytenr > 0)
 966                                inode_sub_bytes(inode, extent_end - start);
 967                        if (end == extent_end)
 968                                break;
 969
 970                        path->slots[0]++;
 971                        goto next_slot;
 972                }
 973
 974                /*
 975                 *  | ---- range to drop ----- |
 976                 *    | ------ extent ------ |
 977                 */
 978                if (start <= key.offset && end >= extent_end) {
 979delete_extent_item:
 980                        if (del_nr == 0) {
 981                                del_slot = path->slots[0];
 982                                del_nr = 1;
 983                        } else {
 984                                BUG_ON(del_slot + del_nr != path->slots[0]);
 985                                del_nr++;
 986                        }
 987
 988                        if (update_refs &&
 989                            extent_type == BTRFS_FILE_EXTENT_INLINE) {
 990                                inode_sub_bytes(inode,
 991                                                extent_end - key.offset);
 992                                extent_end = ALIGN(extent_end,
 993                                                   fs_info->sectorsize);
 994                        } else if (update_refs && disk_bytenr > 0) {
 995                                btrfs_init_generic_ref(&ref,
 996                                                BTRFS_DROP_DELAYED_REF,
 997                                                disk_bytenr, num_bytes, 0);
 998                                btrfs_init_data_ref(&ref,
 999                                                root->root_key.objectid,
1000                                                key.objectid,
1001                                                key.offset - extent_offset);
1002                                ret = btrfs_free_extent(trans, &ref);
1003                                BUG_ON(ret); /* -ENOMEM */
1004                                inode_sub_bytes(inode,
1005                                                extent_end - key.offset);
1006                        }
1007
1008                        if (end == extent_end)
1009                                break;
1010
1011                        if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
1012                                path->slots[0]++;
1013                                goto next_slot;
1014                        }
1015
1016                        ret = btrfs_del_items(trans, root, path, del_slot,
1017                                              del_nr);
1018                        if (ret) {
1019                                btrfs_abort_transaction(trans, ret);
1020                                break;
1021                        }
1022
1023                        del_nr = 0;
1024                        del_slot = 0;
1025
1026                        btrfs_release_path(path);
1027                        continue;
1028                }
1029
1030                BUG();
1031        }
1032
1033        if (!ret && del_nr > 0) {
1034                /*
1035                 * Set path->slots[0] to first slot, so that after the delete
1036                 * if items are move off from our leaf to its immediate left or
1037                 * right neighbor leafs, we end up with a correct and adjusted
1038                 * path->slots[0] for our insertion (if replace_extent != 0).
1039                 */
1040                path->slots[0] = del_slot;
1041                ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1042                if (ret)
1043                        btrfs_abort_transaction(trans, ret);
1044        }
1045
1046        leaf = path->nodes[0];
1047        /*
1048         * If btrfs_del_items() was called, it might have deleted a leaf, in
1049         * which case it unlocked our path, so check path->locks[0] matches a
1050         * write lock.
1051         */
1052        if (!ret && replace_extent && leafs_visited == 1 &&
1053            (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
1054             path->locks[0] == BTRFS_WRITE_LOCK) &&
1055            btrfs_leaf_free_space(leaf) >=
1056            sizeof(struct btrfs_item) + extent_item_size) {
1057
1058                key.objectid = ino;
1059                key.type = BTRFS_EXTENT_DATA_KEY;
1060                key.offset = start;
1061                if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
1062                        struct btrfs_key slot_key;
1063
1064                        btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
1065                        if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1066                                path->slots[0]++;
1067                }
1068                setup_items_for_insert(root, path, &key,
1069                                       &extent_item_size,
1070                                       extent_item_size,
1071                                       sizeof(struct btrfs_item) +
1072                                       extent_item_size, 1);
1073                *key_inserted = 1;
1074        }
1075
1076        if (!replace_extent || !(*key_inserted))
1077                btrfs_release_path(path);
1078        if (drop_end)
1079                *drop_end = found ? min(end, last_end) : end;
1080        return ret;
1081}
1082
1083int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1084                       struct btrfs_root *root, struct inode *inode, u64 start,
1085                       u64 end, int drop_cache)
1086{
1087        struct btrfs_path *path;
1088        int ret;
1089
1090        path = btrfs_alloc_path();
1091        if (!path)
1092                return -ENOMEM;
1093        ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
1094                                   drop_cache, 0, 0, NULL);
1095        btrfs_free_path(path);
1096        return ret;
1097}
1098
1099static int extent_mergeable(struct extent_buffer *leaf, int slot,
1100                            u64 objectid, u64 bytenr, u64 orig_offset,
1101                            u64 *start, u64 *end)
1102{
1103        struct btrfs_file_extent_item *fi;
1104        struct btrfs_key key;
1105        u64 extent_end;
1106
1107        if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1108                return 0;
1109
1110        btrfs_item_key_to_cpu(leaf, &key, slot);
1111        if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1112                return 0;
1113
1114        fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1115        if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1116            btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1117            btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1118            btrfs_file_extent_compression(leaf, fi) ||
1119            btrfs_file_extent_encryption(leaf, fi) ||
1120            btrfs_file_extent_other_encoding(leaf, fi))
1121                return 0;
1122
1123        extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1124        if ((*start && *start != key.offset) || (*end && *end != extent_end))
1125                return 0;
1126
1127        *start = key.offset;
1128        *end = extent_end;
1129        return 1;
1130}
1131
1132/*
1133 * Mark extent in the range start - end as written.
1134 *
1135 * This changes extent type from 'pre-allocated' to 'regular'. If only
1136 * part of extent is marked as written, the extent will be split into
1137 * two or three.
1138 */
1139int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1140                              struct btrfs_inode *inode, u64 start, u64 end)
1141{
1142        struct btrfs_fs_info *fs_info = trans->fs_info;
1143        struct btrfs_root *root = inode->root;
1144        struct extent_buffer *leaf;
1145        struct btrfs_path *path;
1146        struct btrfs_file_extent_item *fi;
1147        struct btrfs_ref ref = { 0 };
1148        struct btrfs_key key;
1149        struct btrfs_key new_key;
1150        u64 bytenr;
1151        u64 num_bytes;
1152        u64 extent_end;
1153        u64 orig_offset;
1154        u64 other_start;
1155        u64 other_end;
1156        u64 split;
1157        int del_nr = 0;
1158        int del_slot = 0;
1159        int recow;
1160        int ret;
1161        u64 ino = btrfs_ino(inode);
1162
1163        path = btrfs_alloc_path();
1164        if (!path)
1165                return -ENOMEM;
1166again:
1167        recow = 0;
1168        split = start;
1169        key.objectid = ino;
1170        key.type = BTRFS_EXTENT_DATA_KEY;
1171        key.offset = split;
1172
1173        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1174        if (ret < 0)
1175                goto out;
1176        if (ret > 0 && path->slots[0] > 0)
1177                path->slots[0]--;
1178
1179        leaf = path->nodes[0];
1180        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1181        if (key.objectid != ino ||
1182            key.type != BTRFS_EXTENT_DATA_KEY) {
1183                ret = -EINVAL;
1184                btrfs_abort_transaction(trans, ret);
1185                goto out;
1186        }
1187        fi = btrfs_item_ptr(leaf, path->slots[0],
1188                            struct btrfs_file_extent_item);
1189        if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1190                ret = -EINVAL;
1191                btrfs_abort_transaction(trans, ret);
1192                goto out;
1193        }
1194        extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1195        if (key.offset > start || extent_end < end) {
1196                ret = -EINVAL;
1197                btrfs_abort_transaction(trans, ret);
1198                goto out;
1199        }
1200
1201        bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1202        num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1203        orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1204        memcpy(&new_key, &key, sizeof(new_key));
1205
1206        if (start == key.offset && end < extent_end) {
1207                other_start = 0;
1208                other_end = start;
1209                if (extent_mergeable(leaf, path->slots[0] - 1,
1210                                     ino, bytenr, orig_offset,
1211                                     &other_start, &other_end)) {
1212                        new_key.offset = end;
1213                        btrfs_set_item_key_safe(fs_info, path, &new_key);
1214                        fi = btrfs_item_ptr(leaf, path->slots[0],
1215                                            struct btrfs_file_extent_item);
1216                        btrfs_set_file_extent_generation(leaf, fi,
1217                                                         trans->transid);
1218                        btrfs_set_file_extent_num_bytes(leaf, fi,
1219                                                        extent_end - end);
1220                        btrfs_set_file_extent_offset(leaf, fi,
1221                                                     end - orig_offset);
1222                        fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1223                                            struct btrfs_file_extent_item);
1224                        btrfs_set_file_extent_generation(leaf, fi,
1225                                                         trans->transid);
1226                        btrfs_set_file_extent_num_bytes(leaf, fi,
1227                                                        end - other_start);
1228                        btrfs_mark_buffer_dirty(leaf);
1229                        goto out;
1230                }
1231        }
1232
1233        if (start > key.offset && end == extent_end) {
1234                other_start = end;
1235                other_end = 0;
1236                if (extent_mergeable(leaf, path->slots[0] + 1,
1237                                     ino, bytenr, orig_offset,
1238                                     &other_start, &other_end)) {
1239                        fi = btrfs_item_ptr(leaf, path->slots[0],
1240                                            struct btrfs_file_extent_item);
1241                        btrfs_set_file_extent_num_bytes(leaf, fi,
1242                                                        start - key.offset);
1243                        btrfs_set_file_extent_generation(leaf, fi,
1244                                                         trans->transid);
1245                        path->slots[0]++;
1246                        new_key.offset = start;
1247                        btrfs_set_item_key_safe(fs_info, path, &new_key);
1248
1249                        fi = btrfs_item_ptr(leaf, path->slots[0],
1250                                            struct btrfs_file_extent_item);
1251                        btrfs_set_file_extent_generation(leaf, fi,
1252                                                         trans->transid);
1253                        btrfs_set_file_extent_num_bytes(leaf, fi,
1254                                                        other_end - start);
1255                        btrfs_set_file_extent_offset(leaf, fi,
1256                                                     start - orig_offset);
1257                        btrfs_mark_buffer_dirty(leaf);
1258                        goto out;
1259                }
1260        }
1261
1262        while (start > key.offset || end < extent_end) {
1263                if (key.offset == start)
1264                        split = end;
1265
1266                new_key.offset = split;
1267                ret = btrfs_duplicate_item(trans, root, path, &new_key);
1268                if (ret == -EAGAIN) {
1269                        btrfs_release_path(path);
1270                        goto again;
1271                }
1272                if (ret < 0) {
1273                        btrfs_abort_transaction(trans, ret);
1274                        goto out;
1275                }
1276
1277                leaf = path->nodes[0];
1278                fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1279                                    struct btrfs_file_extent_item);
1280                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1281                btrfs_set_file_extent_num_bytes(leaf, fi,
1282                                                split - key.offset);
1283
1284                fi = btrfs_item_ptr(leaf, path->slots[0],
1285                                    struct btrfs_file_extent_item);
1286
1287                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1288                btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1289                btrfs_set_file_extent_num_bytes(leaf, fi,
1290                                                extent_end - split);
1291                btrfs_mark_buffer_dirty(leaf);
1292
1293                btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
1294                                       num_bytes, 0);
1295                btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
1296                                    orig_offset);
1297                ret = btrfs_inc_extent_ref(trans, &ref);
1298                if (ret) {
1299                        btrfs_abort_transaction(trans, ret);
1300                        goto out;
1301                }
1302
1303                if (split == start) {
1304                        key.offset = start;
1305                } else {
1306                        if (start != key.offset) {
1307                                ret = -EINVAL;
1308                                btrfs_abort_transaction(trans, ret);
1309                                goto out;
1310                        }
1311                        path->slots[0]--;
1312                        extent_end = end;
1313                }
1314                recow = 1;
1315        }
1316
1317        other_start = end;
1318        other_end = 0;
1319        btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1320                               num_bytes, 0);
1321        btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset);
1322        if (extent_mergeable(leaf, path->slots[0] + 1,
1323                             ino, bytenr, orig_offset,
1324                             &other_start, &other_end)) {
1325                if (recow) {
1326                        btrfs_release_path(path);
1327                        goto again;
1328                }
1329                extent_end = other_end;
1330                del_slot = path->slots[0] + 1;
1331                del_nr++;
1332                ret = btrfs_free_extent(trans, &ref);
1333                if (ret) {
1334                        btrfs_abort_transaction(trans, ret);
1335                        goto out;
1336                }
1337        }
1338        other_start = 0;
1339        other_end = start;
1340        if (extent_mergeable(leaf, path->slots[0] - 1,
1341                             ino, bytenr, orig_offset,
1342                             &other_start, &other_end)) {
1343                if (recow) {
1344                        btrfs_release_path(path);
1345                        goto again;
1346                }
1347                key.offset = other_start;
1348                del_slot = path->slots[0];
1349                del_nr++;
1350                ret = btrfs_free_extent(trans, &ref);
1351                if (ret) {
1352                        btrfs_abort_transaction(trans, ret);
1353                        goto out;
1354                }
1355        }
1356        if (del_nr == 0) {
1357                fi = btrfs_item_ptr(leaf, path->slots[0],
1358                           struct btrfs_file_extent_item);
1359                btrfs_set_file_extent_type(leaf, fi,
1360                                           BTRFS_FILE_EXTENT_REG);
1361                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1362                btrfs_mark_buffer_dirty(leaf);
1363        } else {
1364                fi = btrfs_item_ptr(leaf, del_slot - 1,
1365                           struct btrfs_file_extent_item);
1366                btrfs_set_file_extent_type(leaf, fi,
1367                                           BTRFS_FILE_EXTENT_REG);
1368                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1369                btrfs_set_file_extent_num_bytes(leaf, fi,
1370                                                extent_end - key.offset);
1371                btrfs_mark_buffer_dirty(leaf);
1372
1373                ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1374                if (ret < 0) {
1375                        btrfs_abort_transaction(trans, ret);
1376                        goto out;
1377                }
1378        }
1379out:
1380        btrfs_free_path(path);
1381        return 0;
1382}
1383
1384/*
1385 * on error we return an unlocked page and the error value
1386 * on success we return a locked page and 0
1387 */
1388static int prepare_uptodate_page(struct inode *inode,
1389                                 struct page *page, u64 pos,
1390                                 bool force_uptodate)
1391{
1392        int ret = 0;
1393
1394        if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1395            !PageUptodate(page)) {
1396                ret = btrfs_readpage(NULL, page);
1397                if (ret)
1398                        return ret;
1399                lock_page(page);
1400                if (!PageUptodate(page)) {
1401                        unlock_page(page);
1402                        return -EIO;
1403                }
1404                if (page->mapping != inode->i_mapping) {
1405                        unlock_page(page);
1406                        return -EAGAIN;
1407                }
1408        }
1409        return 0;
1410}
1411
1412/*
1413 * this just gets pages into the page cache and locks them down.
1414 */
1415static noinline int prepare_pages(struct inode *inode, struct page **pages,
1416                                  size_t num_pages, loff_t pos,
1417                                  size_t write_bytes, bool force_uptodate)
1418{
1419        int i;
1420        unsigned long index = pos >> PAGE_SHIFT;
1421        gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1422        int err = 0;
1423        int faili;
1424
1425        for (i = 0; i < num_pages; i++) {
1426again:
1427                pages[i] = find_or_create_page(inode->i_mapping, index + i,
1428                                               mask | __GFP_WRITE);
1429                if (!pages[i]) {
1430                        faili = i - 1;
1431                        err = -ENOMEM;
1432                        goto fail;
1433                }
1434
1435                if (i == 0)
1436                        err = prepare_uptodate_page(inode, pages[i], pos,
1437                                                    force_uptodate);
1438                if (!err && i == num_pages - 1)
1439                        err = prepare_uptodate_page(inode, pages[i],
1440                                                    pos + write_bytes, false);
1441                if (err) {
1442                        put_page(pages[i]);
1443                        if (err == -EAGAIN) {
1444                                err = 0;
1445                                goto again;
1446                        }
1447                        faili = i - 1;
1448                        goto fail;
1449                }
1450                wait_on_page_writeback(pages[i]);
1451        }
1452
1453        return 0;
1454fail:
1455        while (faili >= 0) {
1456                unlock_page(pages[faili]);
1457                put_page(pages[faili]);
1458                faili--;
1459        }
1460        return err;
1461
1462}
1463
1464/*
1465 * This function locks the extent and properly waits for data=ordered extents
1466 * to finish before allowing the pages to be modified if need.
1467 *
1468 * The return value:
1469 * 1 - the extent is locked
1470 * 0 - the extent is not locked, and everything is OK
1471 * -EAGAIN - need re-prepare the pages
1472 * the other < 0 number - Something wrong happens
1473 */
1474static noinline int
1475lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1476                                size_t num_pages, loff_t pos,
1477                                size_t write_bytes,
1478                                u64 *lockstart, u64 *lockend,
1479                                struct extent_state **cached_state)
1480{
1481        struct btrfs_fs_info *fs_info = inode->root->fs_info;
1482        u64 start_pos;
1483        u64 last_pos;
1484        int i;
1485        int ret = 0;
1486
1487        start_pos = round_down(pos, fs_info->sectorsize);
1488        last_pos = start_pos
1489                + round_up(pos + write_bytes - start_pos,
1490                           fs_info->sectorsize) - 1;
1491
1492        if (start_pos < inode->vfs_inode.i_size) {
1493                struct btrfs_ordered_extent *ordered;
1494
1495                lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1496                                cached_state);
1497                ordered = btrfs_lookup_ordered_range(inode, start_pos,
1498                                                     last_pos - start_pos + 1);
1499                if (ordered &&
1500                    ordered->file_offset + ordered->num_bytes > start_pos &&
1501                    ordered->file_offset <= last_pos) {
1502                        unlock_extent_cached(&inode->io_tree, start_pos,
1503                                        last_pos, cached_state);
1504                        for (i = 0; i < num_pages; i++) {
1505                                unlock_page(pages[i]);
1506                                put_page(pages[i]);
1507                        }
1508                        btrfs_start_ordered_extent(&inode->vfs_inode,
1509                                        ordered, 1);
1510                        btrfs_put_ordered_extent(ordered);
1511                        return -EAGAIN;
1512                }
1513                if (ordered)
1514                        btrfs_put_ordered_extent(ordered);
1515
1516                *lockstart = start_pos;
1517                *lockend = last_pos;
1518                ret = 1;
1519        }
1520
1521        /*
1522         * It's possible the pages are dirty right now, but we don't want
1523         * to clean them yet because copy_from_user may catch a page fault
1524         * and we might have to fall back to one page at a time.  If that
1525         * happens, we'll unlock these pages and we'd have a window where
1526         * reclaim could sneak in and drop the once-dirty page on the floor
1527         * without writing it.
1528         *
1529         * We have the pages locked and the extent range locked, so there's
1530         * no way someone can start IO on any dirty pages in this range.
1531         *
1532         * We'll call btrfs_dirty_pages() later on, and that will flip around
1533         * delalloc bits and dirty the pages as required.
1534         */
1535        for (i = 0; i < num_pages; i++) {
1536                set_page_extent_mapped(pages[i]);
1537                WARN_ON(!PageLocked(pages[i]));
1538        }
1539
1540        return ret;
1541}
1542
1543static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1544                                    size_t *write_bytes)
1545{
1546        struct btrfs_fs_info *fs_info = inode->root->fs_info;
1547        struct btrfs_root *root = inode->root;
1548        u64 lockstart, lockend;
1549        u64 num_bytes;
1550        int ret;
1551
1552        if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
1553                return -EAGAIN;
1554
1555        lockstart = round_down(pos, fs_info->sectorsize);
1556        lockend = round_up(pos + *write_bytes,
1557                           fs_info->sectorsize) - 1;
1558
1559        btrfs_lock_and_flush_ordered_range(inode, lockstart,
1560                                           lockend, NULL);
1561
1562        num_bytes = lockend - lockstart + 1;
1563        ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1564                        NULL, NULL, NULL);
1565        if (ret <= 0) {
1566                ret = 0;
1567                btrfs_drew_write_unlock(&root->snapshot_lock);
1568        } else {
1569                *write_bytes = min_t(size_t, *write_bytes ,
1570                                     num_bytes - pos + lockstart);
1571        }
1572
1573        unlock_extent(&inode->io_tree, lockstart, lockend);
1574
1575        return ret;
1576}
1577
1578static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1579                                               struct iov_iter *i)
1580{
1581        struct file *file = iocb->ki_filp;
1582        loff_t pos = iocb->ki_pos;
1583        struct inode *inode = file_inode(file);
1584        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1585        struct btrfs_root *root = BTRFS_I(inode)->root;
1586        struct page **pages = NULL;
1587        struct extent_changeset *data_reserved = NULL;
1588        u64 release_bytes = 0;
1589        u64 lockstart;
1590        u64 lockend;
1591        size_t num_written = 0;
1592        int nrptrs;
1593        int ret = 0;
1594        bool only_release_metadata = false;
1595        bool force_page_uptodate = false;
1596
1597        nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1598                        PAGE_SIZE / (sizeof(struct page *)));
1599        nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1600        nrptrs = max(nrptrs, 8);
1601        pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1602        if (!pages)
1603                return -ENOMEM;
1604
1605        while (iov_iter_count(i) > 0) {
1606                struct extent_state *cached_state = NULL;
1607                size_t offset = offset_in_page(pos);
1608                size_t sector_offset;
1609                size_t write_bytes = min(iov_iter_count(i),
1610                                         nrptrs * (size_t)PAGE_SIZE -
1611                                         offset);
1612                size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1613                                                PAGE_SIZE);
1614                size_t reserve_bytes;
1615                size_t dirty_pages;
1616                size_t copied;
1617                size_t dirty_sectors;
1618                size_t num_sectors;
1619                int extents_locked;
1620
1621                WARN_ON(num_pages > nrptrs);
1622
1623                /*
1624                 * Fault pages before locking them in prepare_pages
1625                 * to avoid recursive lock
1626                 */
1627                if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1628                        ret = -EFAULT;
1629                        break;
1630                }
1631
1632                only_release_metadata = false;
1633                sector_offset = pos & (fs_info->sectorsize - 1);
1634                reserve_bytes = round_up(write_bytes + sector_offset,
1635                                fs_info->sectorsize);
1636
1637                extent_changeset_release(data_reserved);
1638                ret = btrfs_check_data_free_space(inode, &data_reserved, pos,
1639                                                  write_bytes);
1640                if (ret < 0) {
1641                        if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1642                                                      BTRFS_INODE_PREALLOC)) &&
1643                            check_can_nocow(BTRFS_I(inode), pos,
1644                                        &write_bytes) > 0) {
1645                                /*
1646                                 * For nodata cow case, no need to reserve
1647                                 * data space.
1648                                 */
1649                                only_release_metadata = true;
1650                                /*
1651                                 * our prealloc extent may be smaller than
1652                                 * write_bytes, so scale down.
1653                                 */
1654                                num_pages = DIV_ROUND_UP(write_bytes + offset,
1655                                                         PAGE_SIZE);
1656                                reserve_bytes = round_up(write_bytes +
1657                                                         sector_offset,
1658                                                         fs_info->sectorsize);
1659                        } else {
1660                                break;
1661                        }
1662                }
1663
1664                WARN_ON(reserve_bytes == 0);
1665                ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1666                                reserve_bytes);
1667                if (ret) {
1668                        if (!only_release_metadata)
1669                                btrfs_free_reserved_data_space(inode,
1670                                                data_reserved, pos,
1671                                                write_bytes);
1672                        else
1673                                btrfs_drew_write_unlock(&root->snapshot_lock);
1674                        break;
1675                }
1676
1677                release_bytes = reserve_bytes;
1678again:
1679                /*
1680                 * This is going to setup the pages array with the number of
1681                 * pages we want, so we don't really need to worry about the
1682                 * contents of pages from loop to loop
1683                 */
1684                ret = prepare_pages(inode, pages, num_pages,
1685                                    pos, write_bytes,
1686                                    force_page_uptodate);
1687                if (ret) {
1688                        btrfs_delalloc_release_extents(BTRFS_I(inode),
1689                                                       reserve_bytes);
1690                        break;
1691                }
1692
1693                extents_locked = lock_and_cleanup_extent_if_need(
1694                                BTRFS_I(inode), pages,
1695                                num_pages, pos, write_bytes, &lockstart,
1696                                &lockend, &cached_state);
1697                if (extents_locked < 0) {
1698                        if (extents_locked == -EAGAIN)
1699                                goto again;
1700                        btrfs_delalloc_release_extents(BTRFS_I(inode),
1701                                                       reserve_bytes);
1702                        ret = extents_locked;
1703                        break;
1704                }
1705
1706                copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1707
1708                num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1709                dirty_sectors = round_up(copied + sector_offset,
1710                                        fs_info->sectorsize);
1711                dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1712
1713                /*
1714                 * if we have trouble faulting in the pages, fall
1715                 * back to one page at a time
1716                 */
1717                if (copied < write_bytes)
1718                        nrptrs = 1;
1719
1720                if (copied == 0) {
1721                        force_page_uptodate = true;
1722                        dirty_sectors = 0;
1723                        dirty_pages = 0;
1724                } else {
1725                        force_page_uptodate = false;
1726                        dirty_pages = DIV_ROUND_UP(copied + offset,
1727                                                   PAGE_SIZE);
1728                }
1729
1730                if (num_sectors > dirty_sectors) {
1731                        /* release everything except the sectors we dirtied */
1732                        release_bytes -= dirty_sectors <<
1733                                                fs_info->sb->s_blocksize_bits;
1734                        if (only_release_metadata) {
1735                                btrfs_delalloc_release_metadata(BTRFS_I(inode),
1736                                                        release_bytes, true);
1737                        } else {
1738                                u64 __pos;
1739
1740                                __pos = round_down(pos,
1741                                                   fs_info->sectorsize) +
1742                                        (dirty_pages << PAGE_SHIFT);
1743                                btrfs_delalloc_release_space(inode,
1744                                                data_reserved, __pos,
1745                                                release_bytes, true);
1746                        }
1747                }
1748
1749                release_bytes = round_up(copied + sector_offset,
1750                                        fs_info->sectorsize);
1751
1752                if (copied > 0)
1753                        ret = btrfs_dirty_pages(inode, pages, dirty_pages,
1754                                                pos, copied, &cached_state);
1755
1756                /*
1757                 * If we have not locked the extent range, because the range's
1758                 * start offset is >= i_size, we might still have a non-NULL
1759                 * cached extent state, acquired while marking the extent range
1760                 * as delalloc through btrfs_dirty_pages(). Therefore free any
1761                 * possible cached extent state to avoid a memory leak.
1762                 */
1763                if (extents_locked)
1764                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1765                                             lockstart, lockend, &cached_state);
1766                else
1767                        free_extent_state(cached_state);
1768
1769                btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1770                if (ret) {
1771                        btrfs_drop_pages(pages, num_pages);
1772                        break;
1773                }
1774
1775                release_bytes = 0;
1776                if (only_release_metadata)
1777                        btrfs_drew_write_unlock(&root->snapshot_lock);
1778
1779                if (only_release_metadata && copied > 0) {
1780                        lockstart = round_down(pos,
1781                                               fs_info->sectorsize);
1782                        lockend = round_up(pos + copied,
1783                                           fs_info->sectorsize) - 1;
1784
1785                        set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1786                                       lockend, EXTENT_NORESERVE, NULL,
1787                                       NULL, GFP_NOFS);
1788                }
1789
1790                btrfs_drop_pages(pages, num_pages);
1791
1792                cond_resched();
1793
1794                balance_dirty_pages_ratelimited(inode->i_mapping);
1795                if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
1796                        btrfs_btree_balance_dirty(fs_info);
1797
1798                pos += copied;
1799                num_written += copied;
1800        }
1801
1802        kfree(pages);
1803
1804        if (release_bytes) {
1805                if (only_release_metadata) {
1806                        btrfs_drew_write_unlock(&root->snapshot_lock);
1807                        btrfs_delalloc_release_metadata(BTRFS_I(inode),
1808                                        release_bytes, true);
1809                } else {
1810                        btrfs_delalloc_release_space(inode, data_reserved,
1811                                        round_down(pos, fs_info->sectorsize),
1812                                        release_bytes, true);
1813                }
1814        }
1815
1816        extent_changeset_free(data_reserved);
1817        return num_written ? num_written : ret;
1818}
1819
1820static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1821{
1822        struct file *file = iocb->ki_filp;
1823        struct inode *inode = file_inode(file);
1824        loff_t pos;
1825        ssize_t written;
1826        ssize_t written_buffered;
1827        loff_t endbyte;
1828        int err;
1829
1830        written = generic_file_direct_write(iocb, from);
1831
1832        if (written < 0 || !iov_iter_count(from))
1833                return written;
1834
1835        pos = iocb->ki_pos;
1836        written_buffered = btrfs_buffered_write(iocb, from);
1837        if (written_buffered < 0) {
1838                err = written_buffered;
1839                goto out;
1840        }
1841        /*
1842         * Ensure all data is persisted. We want the next direct IO read to be
1843         * able to read what was just written.
1844         */
1845        endbyte = pos + written_buffered - 1;
1846        err = btrfs_fdatawrite_range(inode, pos, endbyte);
1847        if (err)
1848                goto out;
1849        err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1850        if (err)
1851                goto out;
1852        written += written_buffered;
1853        iocb->ki_pos = pos + written_buffered;
1854        invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1855                                 endbyte >> PAGE_SHIFT);
1856out:
1857        return written ? written : err;
1858}
1859
1860static void update_time_for_write(struct inode *inode)
1861{
1862        struct timespec64 now;
1863
1864        if (IS_NOCMTIME(inode))
1865                return;
1866
1867        now = current_time(inode);
1868        if (!timespec64_equal(&inode->i_mtime, &now))
1869                inode->i_mtime = now;
1870
1871        if (!timespec64_equal(&inode->i_ctime, &now))
1872                inode->i_ctime = now;
1873
1874        if (IS_I_VERSION(inode))
1875                inode_inc_iversion(inode);
1876}
1877
1878static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1879                                    struct iov_iter *from)
1880{
1881        struct file *file = iocb->ki_filp;
1882        struct inode *inode = file_inode(file);
1883        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1884        struct btrfs_root *root = BTRFS_I(inode)->root;
1885        u64 start_pos;
1886        u64 end_pos;
1887        ssize_t num_written = 0;
1888        const bool sync = iocb->ki_flags & IOCB_DSYNC;
1889        ssize_t err;
1890        loff_t pos;
1891        size_t count;
1892        loff_t oldsize;
1893        int clean_page = 0;
1894
1895        if (!(iocb->ki_flags & IOCB_DIRECT) &&
1896            (iocb->ki_flags & IOCB_NOWAIT))
1897                return -EOPNOTSUPP;
1898
1899        if (iocb->ki_flags & IOCB_NOWAIT) {
1900                if (!inode_trylock(inode))
1901                        return -EAGAIN;
1902        } else {
1903                inode_lock(inode);
1904        }
1905
1906        err = generic_write_checks(iocb, from);
1907        if (err <= 0) {
1908                inode_unlock(inode);
1909                return err;
1910        }
1911
1912        pos = iocb->ki_pos;
1913        count = iov_iter_count(from);
1914        if (iocb->ki_flags & IOCB_NOWAIT) {
1915                /*
1916                 * We will allocate space in case nodatacow is not set,
1917                 * so bail
1918                 */
1919                if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1920                                              BTRFS_INODE_PREALLOC)) ||
1921                    check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
1922                        inode_unlock(inode);
1923                        return -EAGAIN;
1924                }
1925        }
1926
1927        current->backing_dev_info = inode_to_bdi(inode);
1928        err = file_remove_privs(file);
1929        if (err) {
1930                inode_unlock(inode);
1931                goto out;
1932        }
1933
1934        /*
1935         * If BTRFS flips readonly due to some impossible error
1936         * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1937         * although we have opened a file as writable, we have
1938         * to stop this write operation to ensure FS consistency.
1939         */
1940        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1941                inode_unlock(inode);
1942                err = -EROFS;
1943                goto out;
1944        }
1945
1946        /*
1947         * We reserve space for updating the inode when we reserve space for the
1948         * extent we are going to write, so we will enospc out there.  We don't
1949         * need to start yet another transaction to update the inode as we will
1950         * update the inode when we finish writing whatever data we write.
1951         */
1952        update_time_for_write(inode);
1953
1954        start_pos = round_down(pos, fs_info->sectorsize);
1955        oldsize = i_size_read(inode);
1956        if (start_pos > oldsize) {
1957                /* Expand hole size to cover write data, preventing empty gap */
1958                end_pos = round_up(pos + count,
1959                                   fs_info->sectorsize);
1960                err = btrfs_cont_expand(inode, oldsize, end_pos);
1961                if (err) {
1962                        inode_unlock(inode);
1963                        goto out;
1964                }
1965                if (start_pos > round_up(oldsize, fs_info->sectorsize))
1966                        clean_page = 1;
1967        }
1968
1969        if (sync)
1970                atomic_inc(&BTRFS_I(inode)->sync_writers);
1971
1972        if (iocb->ki_flags & IOCB_DIRECT) {
1973                num_written = __btrfs_direct_write(iocb, from);
1974        } else {
1975                num_written = btrfs_buffered_write(iocb, from);
1976                if (num_written > 0)
1977                        iocb->ki_pos = pos + num_written;
1978                if (clean_page)
1979                        pagecache_isize_extended(inode, oldsize,
1980                                                i_size_read(inode));
1981        }
1982
1983        inode_unlock(inode);
1984
1985        /*
1986         * We also have to set last_sub_trans to the current log transid,
1987         * otherwise subsequent syncs to a file that's been synced in this
1988         * transaction will appear to have already occurred.
1989         */
1990        spin_lock(&BTRFS_I(inode)->lock);
1991        BTRFS_I(inode)->last_sub_trans = root->log_transid;
1992        spin_unlock(&BTRFS_I(inode)->lock);
1993        if (num_written > 0)
1994                num_written = generic_write_sync(iocb, num_written);
1995
1996        if (sync)
1997                atomic_dec(&BTRFS_I(inode)->sync_writers);
1998out:
1999        current->backing_dev_info = NULL;
2000        return num_written ? num_written : err;
2001}
2002
2003int btrfs_release_file(struct inode *inode, struct file *filp)
2004{
2005        struct btrfs_file_private *private = filp->private_data;
2006
2007        if (private && private->filldir_buf)
2008                kfree(private->filldir_buf);
2009        kfree(private);
2010        filp->private_data = NULL;
2011
2012        /*
2013         * ordered_data_close is set by setattr when we are about to truncate
2014         * a file from a non-zero size to a zero size.  This tries to
2015         * flush down new bytes that may have been written if the
2016         * application were using truncate to replace a file in place.
2017         */
2018        if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
2019                               &BTRFS_I(inode)->runtime_flags))
2020                        filemap_flush(inode->i_mapping);
2021        return 0;
2022}
2023
2024static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2025{
2026        int ret;
2027        struct blk_plug plug;
2028
2029        /*
2030         * This is only called in fsync, which would do synchronous writes, so
2031         * a plug can merge adjacent IOs as much as possible.  Esp. in case of
2032         * multiple disks using raid profile, a large IO can be split to
2033         * several segments of stripe length (currently 64K).
2034         */
2035        blk_start_plug(&plug);
2036        atomic_inc(&BTRFS_I(inode)->sync_writers);
2037        ret = btrfs_fdatawrite_range(inode, start, end);
2038        atomic_dec(&BTRFS_I(inode)->sync_writers);
2039        blk_finish_plug(&plug);
2040
2041        return ret;
2042}
2043
2044/*
2045 * fsync call for both files and directories.  This logs the inode into
2046 * the tree log instead of forcing full commits whenever possible.
2047 *
2048 * It needs to call filemap_fdatawait so that all ordered extent updates are
2049 * in the metadata btree are up to date for copying to the log.
2050 *
2051 * It drops the inode mutex before doing the tree log commit.  This is an
2052 * important optimization for directories because holding the mutex prevents
2053 * new operations on the dir while we write to disk.
2054 */
2055int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2056{
2057        struct dentry *dentry = file_dentry(file);
2058        struct inode *inode = d_inode(dentry);
2059        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2060        struct btrfs_root *root = BTRFS_I(inode)->root;
2061        struct btrfs_trans_handle *trans;
2062        struct btrfs_log_ctx ctx;
2063        int ret = 0, err;
2064
2065        trace_btrfs_sync_file(file, datasync);
2066
2067        btrfs_init_log_ctx(&ctx, inode);
2068
2069        /*
2070         * Set the range to full if the NO_HOLES feature is not enabled.
2071         * This is to avoid missing file extent items representing holes after
2072         * replaying the log.
2073         */
2074        if (!btrfs_fs_incompat(fs_info, NO_HOLES)) {
2075                start = 0;
2076                end = LLONG_MAX;
2077        }
2078
2079        /*
2080         * We write the dirty pages in the range and wait until they complete
2081         * out of the ->i_mutex. If so, we can flush the dirty pages by
2082         * multi-task, and make the performance up.  See
2083         * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2084         */
2085        ret = start_ordered_ops(inode, start, end);
2086        if (ret)
2087                goto out;
2088
2089        inode_lock(inode);
2090
2091        /*
2092         * We take the dio_sem here because the tree log stuff can race with
2093         * lockless dio writes and get an extent map logged for an extent we
2094         * never waited on.  We need it this high up for lockdep reasons.
2095         */
2096        down_write(&BTRFS_I(inode)->dio_sem);
2097
2098        atomic_inc(&root->log_batch);
2099
2100        /*
2101         * If the inode needs a full sync, make sure we use a full range to
2102         * avoid log tree corruption, due to hole detection racing with ordered
2103         * extent completion for adjacent ranges and races between logging and
2104         * completion of ordered extents for adjancent ranges - both races
2105         * could lead to file extent items in the log with overlapping ranges.
2106         * Do this while holding the inode lock, to avoid races with other
2107         * tasks.
2108         */
2109        if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2110                     &BTRFS_I(inode)->runtime_flags)) {
2111                start = 0;
2112                end = LLONG_MAX;
2113        }
2114
2115        /*
2116         * Before we acquired the inode's lock, someone may have dirtied more
2117         * pages in the target range. We need to make sure that writeback for
2118         * any such pages does not start while we are logging the inode, because
2119         * if it does, any of the following might happen when we are not doing a
2120         * full inode sync:
2121         *
2122         * 1) We log an extent after its writeback finishes but before its
2123         *    checksums are added to the csum tree, leading to -EIO errors
2124         *    when attempting to read the extent after a log replay.
2125         *
2126         * 2) We can end up logging an extent before its writeback finishes.
2127         *    Therefore after the log replay we will have a file extent item
2128         *    pointing to an unwritten extent (and no data checksums as well).
2129         *
2130         * So trigger writeback for any eventual new dirty pages and then we
2131         * wait for all ordered extents to complete below.
2132         */
2133        ret = start_ordered_ops(inode, start, end);
2134        if (ret) {
2135                up_write(&BTRFS_I(inode)->dio_sem);
2136                inode_unlock(inode);
2137                goto out;
2138        }
2139
2140        /*
2141         * We have to do this here to avoid the priority inversion of waiting on
2142         * IO of a lower priority task while holding a transaction open.
2143         *
2144         * Also, the range length can be represented by u64, we have to do the
2145         * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
2146         */
2147        ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
2148        if (ret) {
2149                up_write(&BTRFS_I(inode)->dio_sem);
2150                inode_unlock(inode);
2151                goto out;
2152        }
2153        atomic_inc(&root->log_batch);
2154
2155        smp_mb();
2156        if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
2157            BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed) {
2158                /*
2159                 * We've had everything committed since the last time we were
2160                 * modified so clear this flag in case it was set for whatever
2161                 * reason, it's no longer relevant.
2162                 */
2163                clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2164                          &BTRFS_I(inode)->runtime_flags);
2165                /*
2166                 * An ordered extent might have started before and completed
2167                 * already with io errors, in which case the inode was not
2168                 * updated and we end up here. So check the inode's mapping
2169                 * for any errors that might have happened since we last
2170                 * checked called fsync.
2171                 */
2172                ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2173                up_write(&BTRFS_I(inode)->dio_sem);
2174                inode_unlock(inode);
2175                goto out;
2176        }
2177
2178        /*
2179         * We use start here because we will need to wait on the IO to complete
2180         * in btrfs_sync_log, which could require joining a transaction (for
2181         * example checking cross references in the nocow path).  If we use join
2182         * here we could get into a situation where we're waiting on IO to
2183         * happen that is blocked on a transaction trying to commit.  With start
2184         * we inc the extwriter counter, so we wait for all extwriters to exit
2185         * before we start blocking joiners.  This comment is to keep somebody
2186         * from thinking they are super smart and changing this to
2187         * btrfs_join_transaction *cough*Josef*cough*.
2188         */
2189        trans = btrfs_start_transaction(root, 0);
2190        if (IS_ERR(trans)) {
2191                ret = PTR_ERR(trans);
2192                up_write(&BTRFS_I(inode)->dio_sem);
2193                inode_unlock(inode);
2194                goto out;
2195        }
2196
2197        ret = btrfs_log_dentry_safe(trans, dentry, start, end, &ctx);
2198        if (ret < 0) {
2199                /* Fallthrough and commit/free transaction. */
2200                ret = 1;
2201        }
2202
2203        /* we've logged all the items and now have a consistent
2204         * version of the file in the log.  It is possible that
2205         * someone will come in and modify the file, but that's
2206         * fine because the log is consistent on disk, and we
2207         * have references to all of the file's extents
2208         *
2209         * It is possible that someone will come in and log the
2210         * file again, but that will end up using the synchronization
2211         * inside btrfs_sync_log to keep things safe.
2212         */
2213        up_write(&BTRFS_I(inode)->dio_sem);
2214        inode_unlock(inode);
2215
2216        if (ret != BTRFS_NO_LOG_SYNC) {
2217                if (!ret) {
2218                        ret = btrfs_sync_log(trans, root, &ctx);
2219                        if (!ret) {
2220                                ret = btrfs_end_transaction(trans);
2221                                goto out;
2222                        }
2223                }
2224                ret = btrfs_commit_transaction(trans);
2225        } else {
2226                ret = btrfs_end_transaction(trans);
2227        }
2228out:
2229        ASSERT(list_empty(&ctx.list));
2230        err = file_check_and_advance_wb_err(file);
2231        if (!ret)
2232                ret = err;
2233        return ret > 0 ? -EIO : ret;
2234}
2235
2236static const struct vm_operations_struct btrfs_file_vm_ops = {
2237        .fault          = filemap_fault,
2238        .map_pages      = filemap_map_pages,
2239        .page_mkwrite   = btrfs_page_mkwrite,
2240};
2241
2242static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
2243{
2244        struct address_space *mapping = filp->f_mapping;
2245
2246        if (!mapping->a_ops->readpage)
2247                return -ENOEXEC;
2248
2249        file_accessed(filp);
2250        vma->vm_ops = &btrfs_file_vm_ops;
2251
2252        return 0;
2253}
2254
2255static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2256                          int slot, u64 start, u64 end)
2257{
2258        struct btrfs_file_extent_item *fi;
2259        struct btrfs_key key;
2260
2261        if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2262                return 0;
2263
2264        btrfs_item_key_to_cpu(leaf, &key, slot);
2265        if (key.objectid != btrfs_ino(inode) ||
2266            key.type != BTRFS_EXTENT_DATA_KEY)
2267                return 0;
2268
2269        fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2270
2271        if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2272                return 0;
2273
2274        if (btrfs_file_extent_disk_bytenr(leaf, fi))
2275                return 0;
2276
2277        if (key.offset == end)
2278                return 1;
2279        if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2280                return 1;
2281        return 0;
2282}
2283
2284static int fill_holes(struct btrfs_trans_handle *trans,
2285                struct btrfs_inode *inode,
2286                struct btrfs_path *path, u64 offset, u64 end)
2287{
2288        struct btrfs_fs_info *fs_info = trans->fs_info;
2289        struct btrfs_root *root = inode->root;
2290        struct extent_buffer *leaf;
2291        struct btrfs_file_extent_item *fi;
2292        struct extent_map *hole_em;
2293        struct extent_map_tree *em_tree = &inode->extent_tree;
2294        struct btrfs_key key;
2295        int ret;
2296
2297        if (btrfs_fs_incompat(fs_info, NO_HOLES))
2298                goto out;
2299
2300        key.objectid = btrfs_ino(inode);
2301        key.type = BTRFS_EXTENT_DATA_KEY;
2302        key.offset = offset;
2303
2304        ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2305        if (ret <= 0) {
2306                /*
2307                 * We should have dropped this offset, so if we find it then
2308                 * something has gone horribly wrong.
2309                 */
2310                if (ret == 0)
2311                        ret = -EINVAL;
2312                return ret;
2313        }
2314
2315        leaf = path->nodes[0];
2316        if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2317                u64 num_bytes;
2318
2319                path->slots[0]--;
2320                fi = btrfs_item_ptr(leaf, path->slots[0],
2321                                    struct btrfs_file_extent_item);
2322                num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2323                        end - offset;
2324                btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2325                btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2326                btrfs_set_file_extent_offset(leaf, fi, 0);
2327                btrfs_mark_buffer_dirty(leaf);
2328                goto out;
2329        }
2330
2331        if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2332                u64 num_bytes;
2333
2334                key.offset = offset;
2335                btrfs_set_item_key_safe(fs_info, path, &key);
2336                fi = btrfs_item_ptr(leaf, path->slots[0],
2337                                    struct btrfs_file_extent_item);
2338                num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2339                        offset;
2340                btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2341                btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2342                btrfs_set_file_extent_offset(leaf, fi, 0);
2343                btrfs_mark_buffer_dirty(leaf);
2344                goto out;
2345        }
2346        btrfs_release_path(path);
2347
2348        ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2349                        offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
2350        if (ret)
2351                return ret;
2352
2353out:
2354        btrfs_release_path(path);
2355
2356        hole_em = alloc_extent_map();
2357        if (!hole_em) {
2358                btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2359                set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2360        } else {
2361                hole_em->start = offset;
2362                hole_em->len = end - offset;
2363                hole_em->ram_bytes = hole_em->len;
2364                hole_em->orig_start = offset;
2365
2366                hole_em->block_start = EXTENT_MAP_HOLE;
2367                hole_em->block_len = 0;
2368                hole_em->orig_block_len = 0;
2369                hole_em->compress_type = BTRFS_COMPRESS_NONE;
2370                hole_em->generation = trans->transid;
2371
2372                do {
2373                        btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2374                        write_lock(&em_tree->lock);
2375                        ret = add_extent_mapping(em_tree, hole_em, 1);
2376                        write_unlock(&em_tree->lock);
2377                } while (ret == -EEXIST);
2378                free_extent_map(hole_em);
2379                if (ret)
2380                        set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2381                                        &inode->runtime_flags);
2382        }
2383
2384        return 0;
2385}
2386
2387/*
2388 * Find a hole extent on given inode and change start/len to the end of hole
2389 * extent.(hole/vacuum extent whose em->start <= start &&
2390 *         em->start + em->len > start)
2391 * When a hole extent is found, return 1 and modify start/len.
2392 */
2393static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2394{
2395        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2396        struct extent_map *em;
2397        int ret = 0;
2398
2399        em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2400                              round_down(*start, fs_info->sectorsize),
2401                              round_up(*len, fs_info->sectorsize));
2402        if (IS_ERR(em))
2403                return PTR_ERR(em);
2404
2405        /* Hole or vacuum extent(only exists in no-hole mode) */
2406        if (em->block_start == EXTENT_MAP_HOLE) {
2407                ret = 1;
2408                *len = em->start + em->len > *start + *len ?
2409                       0 : *start + *len - em->start - em->len;
2410                *start = em->start + em->len;
2411        }
2412        free_extent_map(em);
2413        return ret;
2414}
2415
2416static int btrfs_punch_hole_lock_range(struct inode *inode,
2417                                       const u64 lockstart,
2418                                       const u64 lockend,
2419                                       struct extent_state **cached_state)
2420{
2421        while (1) {
2422                struct btrfs_ordered_extent *ordered;
2423                int ret;
2424
2425                truncate_pagecache_range(inode, lockstart, lockend);
2426
2427                lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2428                                 cached_state);
2429                ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2430
2431                /*
2432                 * We need to make sure we have no ordered extents in this range
2433                 * and nobody raced in and read a page in this range, if we did
2434                 * we need to try again.
2435                 */
2436                if ((!ordered ||
2437                    (ordered->file_offset + ordered->num_bytes <= lockstart ||
2438                     ordered->file_offset > lockend)) &&
2439                     !filemap_range_has_page(inode->i_mapping,
2440                                             lockstart, lockend)) {
2441                        if (ordered)
2442                                btrfs_put_ordered_extent(ordered);
2443                        break;
2444                }
2445                if (ordered)
2446                        btrfs_put_ordered_extent(ordered);
2447                unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2448                                     lockend, cached_state);
2449                ret = btrfs_wait_ordered_range(inode, lockstart,
2450                                               lockend - lockstart + 1);
2451                if (ret)
2452                        return ret;
2453        }
2454        return 0;
2455}
2456
2457static int btrfs_insert_clone_extent(struct btrfs_trans_handle *trans,
2458                                     struct inode *inode,
2459                                     struct btrfs_path *path,
2460                                     struct btrfs_clone_extent_info *clone_info,
2461                                     const u64 clone_len)
2462{
2463        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2464        struct btrfs_root *root = BTRFS_I(inode)->root;
2465        struct btrfs_file_extent_item *extent;
2466        struct extent_buffer *leaf;
2467        struct btrfs_key key;
2468        int slot;
2469        struct btrfs_ref ref = { 0 };
2470        u64 ref_offset;
2471        int ret;
2472
2473        if (clone_len == 0)
2474                return 0;
2475
2476        if (clone_info->disk_offset == 0 &&
2477            btrfs_fs_incompat(fs_info, NO_HOLES))
2478                return 0;
2479
2480        key.objectid = btrfs_ino(BTRFS_I(inode));
2481        key.type = BTRFS_EXTENT_DATA_KEY;
2482        key.offset = clone_info->file_offset;
2483        ret = btrfs_insert_empty_item(trans, root, path, &key,
2484                                      clone_info->item_size);
2485        if (ret)
2486                return ret;
2487        leaf = path->nodes[0];
2488        slot = path->slots[0];
2489        write_extent_buffer(leaf, clone_info->extent_buf,
2490                            btrfs_item_ptr_offset(leaf, slot),
2491                            clone_info->item_size);
2492        extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2493        btrfs_set_file_extent_offset(leaf, extent, clone_info->data_offset);
2494        btrfs_set_file_extent_num_bytes(leaf, extent, clone_len);
2495        btrfs_mark_buffer_dirty(leaf);
2496        btrfs_release_path(path);
2497
2498        ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode),
2499                        clone_info->file_offset, clone_len);
2500        if (ret)
2501                return ret;
2502
2503        /* If it's a hole, nothing more needs to be done. */
2504        if (clone_info->disk_offset == 0)
2505                return 0;
2506
2507        inode_add_bytes(inode, clone_len);
2508        btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2509                               clone_info->disk_offset,
2510                               clone_info->disk_len, 0);
2511        ref_offset = clone_info->file_offset - clone_info->data_offset;
2512        btrfs_init_data_ref(&ref, root->root_key.objectid,
2513                            btrfs_ino(BTRFS_I(inode)), ref_offset);
2514        ret = btrfs_inc_extent_ref(trans, &ref);
2515
2516        return ret;
2517}
2518
2519/*
2520 * The respective range must have been previously locked, as well as the inode.
2521 * The end offset is inclusive (last byte of the range).
2522 * @clone_info is NULL for fallocate's hole punching and non-NULL for extent
2523 * cloning.
2524 * When cloning, we don't want to end up in a state where we dropped extents
2525 * without inserting a new one, so we must abort the transaction to avoid a
2526 * corruption.
2527 */
2528int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path,
2529                           const u64 start, const u64 end,
2530                           struct btrfs_clone_extent_info *clone_info,
2531                           struct btrfs_trans_handle **trans_out)
2532{
2533        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2534        u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2535        u64 ino_size = round_up(inode->i_size, fs_info->sectorsize);
2536        struct btrfs_root *root = BTRFS_I(inode)->root;
2537        struct btrfs_trans_handle *trans = NULL;
2538        struct btrfs_block_rsv *rsv;
2539        unsigned int rsv_count;
2540        u64 cur_offset;
2541        u64 drop_end;
2542        u64 len = end - start;
2543        int ret = 0;
2544
2545        if (end <= start)
2546                return -EINVAL;
2547
2548        rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2549        if (!rsv) {
2550                ret = -ENOMEM;
2551                goto out;
2552        }
2553        rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2554        rsv->failfast = 1;
2555
2556        /*
2557         * 1 - update the inode
2558         * 1 - removing the extents in the range
2559         * 1 - adding the hole extent if no_holes isn't set or if we are cloning
2560         *     an extent
2561         */
2562        if (!btrfs_fs_incompat(fs_info, NO_HOLES) || clone_info)
2563                rsv_count = 3;
2564        else
2565                rsv_count = 2;
2566
2567        trans = btrfs_start_transaction(root, rsv_count);
2568        if (IS_ERR(trans)) {
2569                ret = PTR_ERR(trans);
2570                trans = NULL;
2571                goto out_free;
2572        }
2573
2574        ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2575                                      min_size, false);
2576        BUG_ON(ret);
2577        trans->block_rsv = rsv;
2578
2579        cur_offset = start;
2580        while (cur_offset < end) {
2581                ret = __btrfs_drop_extents(trans, root, inode, path,
2582                                           cur_offset, end + 1, &drop_end,
2583                                           1, 0, 0, NULL);
2584                if (ret != -ENOSPC) {
2585                        /*
2586                         * When cloning we want to avoid transaction aborts when
2587                         * nothing was done and we are attempting to clone parts
2588                         * of inline extents, in such cases -EOPNOTSUPP is
2589                         * returned by __btrfs_drop_extents() without having
2590                         * changed anything in the file.
2591                         */
2592                        if (clone_info && ret && ret != -EOPNOTSUPP)
2593                                btrfs_abort_transaction(trans, ret);
2594                        break;
2595                }
2596
2597                trans->block_rsv = &fs_info->trans_block_rsv;
2598
2599                if (!clone_info && cur_offset < drop_end &&
2600                    cur_offset < ino_size) {
2601                        ret = fill_holes(trans, BTRFS_I(inode), path,
2602                                        cur_offset, drop_end);
2603                        if (ret) {
2604                                /*
2605                                 * If we failed then we didn't insert our hole
2606                                 * entries for the area we dropped, so now the
2607                                 * fs is corrupted, so we must abort the
2608                                 * transaction.
2609                                 */
2610                                btrfs_abort_transaction(trans, ret);
2611                                break;
2612                        }
2613                } else if (!clone_info && cur_offset < drop_end) {
2614                        /*
2615                         * We are past the i_size here, but since we didn't
2616                         * insert holes we need to clear the mapped area so we
2617                         * know to not set disk_i_size in this area until a new
2618                         * file extent is inserted here.
2619                         */
2620                        ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode),
2621                                        cur_offset, drop_end - cur_offset);
2622                        if (ret) {
2623                                /*
2624                                 * We couldn't clear our area, so we could
2625                                 * presumably adjust up and corrupt the fs, so
2626                                 * we need to abort.
2627                                 */
2628                                btrfs_abort_transaction(trans, ret);
2629                                break;
2630                        }
2631                }
2632
2633                if (clone_info && drop_end > clone_info->file_offset) {
2634                        u64 clone_len = drop_end - clone_info->file_offset;
2635
2636                        ret = btrfs_insert_clone_extent(trans, inode, path,
2637                                                        clone_info, clone_len);
2638                        if (ret) {
2639                                btrfs_abort_transaction(trans, ret);
2640                                break;
2641                        }
2642                        clone_info->data_len -= clone_len;
2643                        clone_info->data_offset += clone_len;
2644                        clone_info->file_offset += clone_len;
2645                }
2646
2647                cur_offset = drop_end;
2648
2649                ret = btrfs_update_inode(trans, root, inode);
2650                if (ret)
2651                        break;
2652
2653                btrfs_end_transaction(trans);
2654                btrfs_btree_balance_dirty(fs_info);
2655
2656                trans = btrfs_start_transaction(root, rsv_count);
2657                if (IS_ERR(trans)) {
2658                        ret = PTR_ERR(trans);
2659                        trans = NULL;
2660                        break;
2661                }
2662
2663                ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2664                                              rsv, min_size, false);
2665                BUG_ON(ret);    /* shouldn't happen */
2666                trans->block_rsv = rsv;
2667
2668                if (!clone_info) {
2669                        ret = find_first_non_hole(inode, &cur_offset, &len);
2670                        if (unlikely(ret < 0))
2671                                break;
2672                        if (ret && !len) {
2673                                ret = 0;
2674                                break;
2675                        }
2676                }
2677        }
2678
2679        /*
2680         * If we were cloning, force the next fsync to be a full one since we
2681         * we replaced (or just dropped in the case of cloning holes when
2682         * NO_HOLES is enabled) extents and extent maps.
2683         * This is for the sake of simplicity, and cloning into files larger
2684         * than 16Mb would force the full fsync any way (when
2685         * try_release_extent_mapping() is invoked during page cache truncation.
2686         */
2687        if (clone_info)
2688                set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2689                        &BTRFS_I(inode)->runtime_flags);
2690
2691        if (ret)
2692                goto out_trans;
2693
2694        trans->block_rsv = &fs_info->trans_block_rsv;
2695        /*
2696         * If we are using the NO_HOLES feature we might have had already an
2697         * hole that overlaps a part of the region [lockstart, lockend] and
2698         * ends at (or beyond) lockend. Since we have no file extent items to
2699         * represent holes, drop_end can be less than lockend and so we must
2700         * make sure we have an extent map representing the existing hole (the
2701         * call to __btrfs_drop_extents() might have dropped the existing extent
2702         * map representing the existing hole), otherwise the fast fsync path
2703         * will not record the existence of the hole region
2704         * [existing_hole_start, lockend].
2705         */
2706        if (drop_end <= end)
2707                drop_end = end + 1;
2708        /*
2709         * Don't insert file hole extent item if it's for a range beyond eof
2710         * (because it's useless) or if it represents a 0 bytes range (when
2711         * cur_offset == drop_end).
2712         */
2713        if (!clone_info && cur_offset < ino_size && cur_offset < drop_end) {
2714                ret = fill_holes(trans, BTRFS_I(inode), path,
2715                                cur_offset, drop_end);
2716                if (ret) {
2717                        /* Same comment as above. */
2718                        btrfs_abort_transaction(trans, ret);
2719                        goto out_trans;
2720                }
2721        } else if (!clone_info && cur_offset < drop_end) {
2722                /* See the comment in the loop above for the reasoning here. */
2723                ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode),
2724                                        cur_offset, drop_end - cur_offset);
2725                if (ret) {
2726                        btrfs_abort_transaction(trans, ret);
2727                        goto out_trans;
2728                }
2729
2730        }
2731        if (clone_info) {
2732                ret = btrfs_insert_clone_extent(trans, inode, path, clone_info,
2733                                                clone_info->data_len);
2734                if (ret) {
2735                        btrfs_abort_transaction(trans, ret);
2736                        goto out_trans;
2737                }
2738        }
2739
2740out_trans:
2741        if (!trans)
2742                goto out_free;
2743
2744        trans->block_rsv = &fs_info->trans_block_rsv;
2745        if (ret)
2746                btrfs_end_transaction(trans);
2747        else
2748                *trans_out = trans;
2749out_free:
2750        btrfs_free_block_rsv(fs_info, rsv);
2751out:
2752        return ret;
2753}
2754
2755static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2756{
2757        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2758        struct btrfs_root *root = BTRFS_I(inode)->root;
2759        struct extent_state *cached_state = NULL;
2760        struct btrfs_path *path;
2761        struct btrfs_trans_handle *trans = NULL;
2762        u64 lockstart;
2763        u64 lockend;
2764        u64 tail_start;
2765        u64 tail_len;
2766        u64 orig_start = offset;
2767        int ret = 0;
2768        bool same_block;
2769        u64 ino_size;
2770        bool truncated_block = false;
2771        bool updated_inode = false;
2772
2773        ret = btrfs_wait_ordered_range(inode, offset, len);
2774        if (ret)
2775                return ret;
2776
2777        inode_lock(inode);
2778        ino_size = round_up(inode->i_size, fs_info->sectorsize);
2779        ret = find_first_non_hole(inode, &offset, &len);
2780        if (ret < 0)
2781                goto out_only_mutex;
2782        if (ret && !len) {
2783                /* Already in a large hole */
2784                ret = 0;
2785                goto out_only_mutex;
2786        }
2787
2788        lockstart = round_up(offset, btrfs_inode_sectorsize(inode));
2789        lockend = round_down(offset + len,
2790                             btrfs_inode_sectorsize(inode)) - 1;
2791        same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2792                == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2793        /*
2794         * We needn't truncate any block which is beyond the end of the file
2795         * because we are sure there is no data there.
2796         */
2797        /*
2798         * Only do this if we are in the same block and we aren't doing the
2799         * entire block.
2800         */
2801        if (same_block && len < fs_info->sectorsize) {
2802                if (offset < ino_size) {
2803                        truncated_block = true;
2804                        ret = btrfs_truncate_block(inode, offset, len, 0);
2805                } else {
2806                        ret = 0;
2807                }
2808                goto out_only_mutex;
2809        }
2810
2811        /* zero back part of the first block */
2812        if (offset < ino_size) {
2813                truncated_block = true;
2814                ret = btrfs_truncate_block(inode, offset, 0, 0);
2815                if (ret) {
2816                        inode_unlock(inode);
2817                        return ret;
2818                }
2819        }
2820
2821        /* Check the aligned pages after the first unaligned page,
2822         * if offset != orig_start, which means the first unaligned page
2823         * including several following pages are already in holes,
2824         * the extra check can be skipped */
2825        if (offset == orig_start) {
2826                /* after truncate page, check hole again */
2827                len = offset + len - lockstart;
2828                offset = lockstart;
2829                ret = find_first_non_hole(inode, &offset, &len);
2830                if (ret < 0)
2831                        goto out_only_mutex;
2832                if (ret && !len) {
2833                        ret = 0;
2834                        goto out_only_mutex;
2835                }
2836                lockstart = offset;
2837        }
2838
2839        /* Check the tail unaligned part is in a hole */
2840        tail_start = lockend + 1;
2841        tail_len = offset + len - tail_start;
2842        if (tail_len) {
2843                ret = find_first_non_hole(inode, &tail_start, &tail_len);
2844                if (unlikely(ret < 0))
2845                        goto out_only_mutex;
2846                if (!ret) {
2847                        /* zero the front end of the last page */
2848                        if (tail_start + tail_len < ino_size) {
2849                                truncated_block = true;
2850                                ret = btrfs_truncate_block(inode,
2851                                                        tail_start + tail_len,
2852                                                        0, 1);
2853                                if (ret)
2854                                        goto out_only_mutex;
2855                        }
2856                }
2857        }
2858
2859        if (lockend < lockstart) {
2860                ret = 0;
2861                goto out_only_mutex;
2862        }
2863
2864        ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2865                                          &cached_state);
2866        if (ret)
2867                goto out_only_mutex;
2868
2869        path = btrfs_alloc_path();
2870        if (!path) {
2871                ret = -ENOMEM;
2872                goto out;
2873        }
2874
2875        ret = btrfs_punch_hole_range(inode, path, lockstart, lockend, NULL,
2876                                     &trans);
2877        btrfs_free_path(path);
2878        if (ret)
2879                goto out;
2880
2881        ASSERT(trans != NULL);
2882        inode_inc_iversion(inode);
2883        inode->i_mtime = inode->i_ctime = current_time(inode);
2884        ret = btrfs_update_inode(trans, root, inode);
2885        updated_inode = true;
2886        btrfs_end_transaction(trans);
2887        btrfs_btree_balance_dirty(fs_info);
2888out:
2889        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2890                             &cached_state);
2891out_only_mutex:
2892        if (!updated_inode && truncated_block && !ret) {
2893                /*
2894                 * If we only end up zeroing part of a page, we still need to
2895                 * update the inode item, so that all the time fields are
2896                 * updated as well as the necessary btrfs inode in memory fields
2897                 * for detecting, at fsync time, if the inode isn't yet in the
2898                 * log tree or it's there but not up to date.
2899                 */
2900                struct timespec64 now = current_time(inode);
2901
2902                inode_inc_iversion(inode);
2903                inode->i_mtime = now;
2904                inode->i_ctime = now;
2905                trans = btrfs_start_transaction(root, 1);
2906                if (IS_ERR(trans)) {
2907                        ret = PTR_ERR(trans);
2908                } else {
2909                        int ret2;
2910
2911                        ret = btrfs_update_inode(trans, root, inode);
2912                        ret2 = btrfs_end_transaction(trans);
2913                        if (!ret)
2914                                ret = ret2;
2915                }
2916        }
2917        inode_unlock(inode);
2918        return ret;
2919}
2920
2921/* Helper structure to record which range is already reserved */
2922struct falloc_range {
2923        struct list_head list;
2924        u64 start;
2925        u64 len;
2926};
2927
2928/*
2929 * Helper function to add falloc range
2930 *
2931 * Caller should have locked the larger range of extent containing
2932 * [start, len)
2933 */
2934static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2935{
2936        struct falloc_range *prev = NULL;
2937        struct falloc_range *range = NULL;
2938
2939        if (list_empty(head))
2940                goto insert;
2941
2942        /*
2943         * As fallocate iterate by bytenr order, we only need to check
2944         * the last range.
2945         */
2946        prev = list_entry(head->prev, struct falloc_range, list);
2947        if (prev->start + prev->len == start) {
2948                prev->len += len;
2949                return 0;
2950        }
2951insert:
2952        range = kmalloc(sizeof(*range), GFP_KERNEL);
2953        if (!range)
2954                return -ENOMEM;
2955        range->start = start;
2956        range->len = len;
2957        list_add_tail(&range->list, head);
2958        return 0;
2959}
2960
2961static int btrfs_fallocate_update_isize(struct inode *inode,
2962                                        const u64 end,
2963                                        const int mode)
2964{
2965        struct btrfs_trans_handle *trans;
2966        struct btrfs_root *root = BTRFS_I(inode)->root;
2967        int ret;
2968        int ret2;
2969
2970        if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2971                return 0;
2972
2973        trans = btrfs_start_transaction(root, 1);
2974        if (IS_ERR(trans))
2975                return PTR_ERR(trans);
2976
2977        inode->i_ctime = current_time(inode);
2978        i_size_write(inode, end);
2979        btrfs_inode_safe_disk_i_size_write(inode, 0);
2980        ret = btrfs_update_inode(trans, root, inode);
2981        ret2 = btrfs_end_transaction(trans);
2982
2983        return ret ? ret : ret2;
2984}
2985
2986enum {
2987        RANGE_BOUNDARY_WRITTEN_EXTENT,
2988        RANGE_BOUNDARY_PREALLOC_EXTENT,
2989        RANGE_BOUNDARY_HOLE,
2990};
2991
2992static int btrfs_zero_range_check_range_boundary(struct inode *inode,
2993                                                 u64 offset)
2994{
2995        const u64 sectorsize = btrfs_inode_sectorsize(inode);
2996        struct extent_map *em;
2997        int ret;
2998
2999        offset = round_down(offset, sectorsize);
3000        em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
3001        if (IS_ERR(em))
3002                return PTR_ERR(em);
3003
3004        if (em->block_start == EXTENT_MAP_HOLE)
3005                ret = RANGE_BOUNDARY_HOLE;
3006        else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3007                ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
3008        else
3009                ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
3010
3011        free_extent_map(em);
3012        return ret;
3013}
3014
3015static int btrfs_zero_range(struct inode *inode,
3016                            loff_t offset,
3017                            loff_t len,
3018                            const int mode)
3019{
3020        struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3021        struct extent_map *em;
3022        struct extent_changeset *data_reserved = NULL;
3023        int ret;
3024        u64 alloc_hint = 0;
3025        const u64 sectorsize = btrfs_inode_sectorsize(inode);
3026        u64 alloc_start = round_down(offset, sectorsize);
3027        u64 alloc_end = round_up(offset + len, sectorsize);
3028        u64 bytes_to_reserve = 0;
3029        bool space_reserved = false;
3030
3031        inode_dio_wait(inode);
3032
3033        em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3034                              alloc_end - alloc_start);
3035        if (IS_ERR(em)) {
3036                ret = PTR_ERR(em);
3037                goto out;
3038        }
3039
3040        /*
3041         * Avoid hole punching and extent allocation for some cases. More cases
3042         * could be considered, but these are unlikely common and we keep things
3043         * as simple as possible for now. Also, intentionally, if the target
3044         * range contains one or more prealloc extents together with regular
3045         * extents and holes, we drop all the existing extents and allocate a
3046         * new prealloc extent, so that we get a larger contiguous disk extent.
3047         */
3048        if (em->start <= alloc_start &&
3049            test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3050                const u64 em_end = em->start + em->len;
3051
3052                if (em_end >= offset + len) {
3053                        /*
3054                         * The whole range is already a prealloc extent,
3055                         * do nothing except updating the inode's i_size if
3056                         * needed.
3057                         */
3058                        free_extent_map(em);
3059                        ret = btrfs_fallocate_update_isize(inode, offset + len,
3060                                                           mode);
3061                        goto out;
3062                }
3063                /*
3064                 * Part of the range is already a prealloc extent, so operate
3065                 * only on the remaining part of the range.
3066                 */
3067                alloc_start = em_end;
3068                ASSERT(IS_ALIGNED(alloc_start, sectorsize));
3069                len = offset + len - alloc_start;
3070                offset = alloc_start;
3071                alloc_hint = em->block_start + em->len;
3072        }
3073        free_extent_map(em);
3074
3075        if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
3076            BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
3077                em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3078                                      sectorsize);
3079                if (IS_ERR(em)) {
3080                        ret = PTR_ERR(em);
3081                        goto out;
3082                }
3083
3084                if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3085                        free_extent_map(em);
3086                        ret = btrfs_fallocate_update_isize(inode, offset + len,
3087                                                           mode);
3088                        goto out;
3089                }
3090                if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
3091                        free_extent_map(em);
3092                        ret = btrfs_truncate_block(inode, offset, len, 0);
3093                        if (!ret)
3094                                ret = btrfs_fallocate_update_isize(inode,
3095                                                                   offset + len,
3096                                                                   mode);
3097                        return ret;
3098                }
3099                free_extent_map(em);
3100                alloc_start = round_down(offset, sectorsize);
3101                alloc_end = alloc_start + sectorsize;
3102                goto reserve_space;
3103        }
3104
3105        alloc_start = round_up(offset, sectorsize);
3106        alloc_end = round_down(offset + len, sectorsize);
3107
3108        /*
3109         * For unaligned ranges, check the pages at the boundaries, they might
3110         * map to an extent, in which case we need to partially zero them, or
3111         * they might map to a hole, in which case we need our allocation range
3112         * to cover them.
3113         */
3114        if (!IS_ALIGNED(offset, sectorsize)) {
3115                ret = btrfs_zero_range_check_range_boundary(inode, offset);
3116                if (ret < 0)
3117                        goto out;
3118                if (ret == RANGE_BOUNDARY_HOLE) {
3119                        alloc_start = round_down(offset, sectorsize);
3120                        ret = 0;
3121                } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3122                        ret = btrfs_truncate_block(inode, offset, 0, 0);
3123                        if (ret)
3124                                goto out;
3125                } else {
3126                        ret = 0;
3127                }
3128        }
3129
3130        if (!IS_ALIGNED(offset + len, sectorsize)) {
3131                ret = btrfs_zero_range_check_range_boundary(inode,
3132                                                            offset + len);
3133                if (ret < 0)
3134                        goto out;
3135                if (ret == RANGE_BOUNDARY_HOLE) {
3136                        alloc_end = round_up(offset + len, sectorsize);
3137                        ret = 0;
3138                } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3139                        ret = btrfs_truncate_block(inode, offset + len, 0, 1);
3140                        if (ret)
3141                                goto out;
3142                } else {
3143                        ret = 0;
3144                }
3145        }
3146
3147reserve_space:
3148        if (alloc_start < alloc_end) {
3149                struct extent_state *cached_state = NULL;
3150                const u64 lockstart = alloc_start;
3151                const u64 lockend = alloc_end - 1;
3152
3153                bytes_to_reserve = alloc_end - alloc_start;
3154                ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3155                                                      bytes_to_reserve);
3156                if (ret < 0)
3157                        goto out;
3158                space_reserved = true;
3159                ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
3160                                                alloc_start, bytes_to_reserve);
3161                if (ret)
3162                        goto out;
3163                ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3164                                                  &cached_state);
3165                if (ret)
3166                        goto out;
3167                ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3168                                                alloc_end - alloc_start,
3169                                                i_blocksize(inode),
3170                                                offset + len, &alloc_hint);
3171                unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3172                                     lockend, &cached_state);
3173                /* btrfs_prealloc_file_range releases reserved space on error */
3174                if (ret) {
3175                        space_reserved = false;
3176                        goto out;
3177                }
3178        }
3179        ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3180 out:
3181        if (ret && space_reserved)
3182                btrfs_free_reserved_data_space(inode, data_reserved,
3183                                               alloc_start, bytes_to_reserve);
3184        extent_changeset_free(data_reserved);
3185
3186        return ret;
3187}
3188
3189static long btrfs_fallocate(struct file *file, int mode,
3190                            loff_t offset, loff_t len)
3191{
3192        struct inode *inode = file_inode(file);
3193        struct extent_state *cached_state = NULL;
3194        struct extent_changeset *data_reserved = NULL;
3195        struct falloc_range *range;
3196        struct falloc_range *tmp;
3197        struct list_head reserve_list;
3198        u64 cur_offset;
3199        u64 last_byte;
3200        u64 alloc_start;
3201        u64 alloc_end;
3202        u64 alloc_hint = 0;
3203        u64 locked_end;
3204        u64 actual_end = 0;
3205        struct extent_map *em;
3206        int blocksize = btrfs_inode_sectorsize(inode);
3207        int ret;
3208
3209        alloc_start = round_down(offset, blocksize);
3210        alloc_end = round_up(offset + len, blocksize);
3211        cur_offset = alloc_start;
3212
3213        /* Make sure we aren't being give some crap mode */
3214        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3215                     FALLOC_FL_ZERO_RANGE))
3216                return -EOPNOTSUPP;
3217
3218        if (mode & FALLOC_FL_PUNCH_HOLE)
3219                return btrfs_punch_hole(inode, offset, len);
3220
3221        /*
3222         * Only trigger disk allocation, don't trigger qgroup reserve
3223         *
3224         * For qgroup space, it will be checked later.
3225         */
3226        if (!(mode & FALLOC_FL_ZERO_RANGE)) {
3227                ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3228                                                      alloc_end - alloc_start);
3229                if (ret < 0)
3230                        return ret;
3231        }
3232
3233        inode_lock(inode);
3234
3235        if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3236                ret = inode_newsize_ok(inode, offset + len);
3237                if (ret)
3238                        goto out;
3239        }
3240
3241        /*
3242         * TODO: Move these two operations after we have checked
3243         * accurate reserved space, or fallocate can still fail but
3244         * with page truncated or size expanded.
3245         *
3246         * But that's a minor problem and won't do much harm BTW.
3247         */
3248        if (alloc_start > inode->i_size) {
3249                ret = btrfs_cont_expand(inode, i_size_read(inode),
3250                                        alloc_start);
3251                if (ret)
3252                        goto out;
3253        } else if (offset + len > inode->i_size) {
3254                /*
3255                 * If we are fallocating from the end of the file onward we
3256                 * need to zero out the end of the block if i_size lands in the
3257                 * middle of a block.
3258                 */
3259                ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
3260                if (ret)
3261                        goto out;
3262        }
3263
3264        /*
3265         * wait for ordered IO before we have any locks.  We'll loop again
3266         * below with the locks held.
3267         */
3268        ret = btrfs_wait_ordered_range(inode, alloc_start,
3269                                       alloc_end - alloc_start);
3270        if (ret)
3271                goto out;
3272
3273        if (mode & FALLOC_FL_ZERO_RANGE) {
3274                ret = btrfs_zero_range(inode, offset, len, mode);
3275                inode_unlock(inode);
3276                return ret;
3277        }
3278
3279        locked_end = alloc_end - 1;
3280        while (1) {
3281                struct btrfs_ordered_extent *ordered;
3282
3283                /* the extent lock is ordered inside the running
3284                 * transaction
3285                 */
3286                lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
3287                                 locked_end, &cached_state);
3288                ordered = btrfs_lookup_first_ordered_extent(inode, locked_end);
3289
3290                if (ordered &&
3291                    ordered->file_offset + ordered->num_bytes > alloc_start &&
3292                    ordered->file_offset < alloc_end) {
3293                        btrfs_put_ordered_extent(ordered);
3294                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
3295                                             alloc_start, locked_end,
3296                                             &cached_state);
3297                        /*
3298                         * we can't wait on the range with the transaction
3299                         * running or with the extent lock held
3300                         */
3301                        ret = btrfs_wait_ordered_range(inode, alloc_start,
3302                                                       alloc_end - alloc_start);
3303                        if (ret)
3304                                goto out;
3305                } else {
3306                        if (ordered)
3307                                btrfs_put_ordered_extent(ordered);
3308                        break;
3309                }
3310        }
3311
3312        /* First, check if we exceed the qgroup limit */
3313        INIT_LIST_HEAD(&reserve_list);
3314        while (cur_offset < alloc_end) {
3315                em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3316                                      alloc_end - cur_offset);
3317                if (IS_ERR(em)) {
3318                        ret = PTR_ERR(em);
3319                        break;
3320                }
3321                last_byte = min(extent_map_end(em), alloc_end);
3322                actual_end = min_t(u64, extent_map_end(em), offset + len);
3323                last_byte = ALIGN(last_byte, blocksize);
3324                if (em->block_start == EXTENT_MAP_HOLE ||
3325                    (cur_offset >= inode->i_size &&
3326                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3327                        ret = add_falloc_range(&reserve_list, cur_offset,
3328                                               last_byte - cur_offset);
3329                        if (ret < 0) {
3330                                free_extent_map(em);
3331                                break;
3332                        }
3333                        ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
3334                                        cur_offset, last_byte - cur_offset);
3335                        if (ret < 0) {
3336                                cur_offset = last_byte;
3337                                free_extent_map(em);
3338                                break;
3339                        }
3340                } else {
3341                        /*
3342                         * Do not need to reserve unwritten extent for this
3343                         * range, free reserved data space first, otherwise
3344                         * it'll result in false ENOSPC error.
3345                         */
3346                        btrfs_free_reserved_data_space(inode, data_reserved,
3347                                        cur_offset, last_byte - cur_offset);
3348                }
3349                free_extent_map(em);
3350                cur_offset = last_byte;
3351        }
3352
3353        /*
3354         * If ret is still 0, means we're OK to fallocate.
3355         * Or just cleanup the list and exit.
3356         */
3357        list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3358                if (!ret)
3359                        ret = btrfs_prealloc_file_range(inode, mode,
3360                                        range->start,
3361                                        range->len, i_blocksize(inode),
3362                                        offset + len, &alloc_hint);
3363                else
3364                        btrfs_free_reserved_data_space(inode,
3365                                        data_reserved, range->start,
3366                                        range->len);
3367                list_del(&range->list);
3368                kfree(range);
3369        }
3370        if (ret < 0)
3371                goto out_unlock;
3372
3373        /*
3374         * We didn't need to allocate any more space, but we still extended the
3375         * size of the file so we need to update i_size and the inode item.
3376         */
3377        ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3378out_unlock:
3379        unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3380                             &cached_state);
3381out:
3382        inode_unlock(inode);
3383        /* Let go of our reservation. */
3384        if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
3385                btrfs_free_reserved_data_space(inode, data_reserved,
3386                                cur_offset, alloc_end - cur_offset);
3387        extent_changeset_free(data_reserved);
3388        return ret;
3389}
3390
3391static loff_t find_desired_extent(struct inode *inode, loff_t offset,
3392                                  int whence)
3393{
3394        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3395        struct extent_map *em = NULL;
3396        struct extent_state *cached_state = NULL;
3397        loff_t i_size = inode->i_size;
3398        u64 lockstart;
3399        u64 lockend;
3400        u64 start;
3401        u64 len;
3402        int ret = 0;
3403
3404        if (i_size == 0 || offset >= i_size)
3405                return -ENXIO;
3406
3407        /*
3408         * offset can be negative, in this case we start finding DATA/HOLE from
3409         * the very start of the file.
3410         */
3411        start = max_t(loff_t, 0, offset);
3412
3413        lockstart = round_down(start, fs_info->sectorsize);
3414        lockend = round_up(i_size, fs_info->sectorsize);
3415        if (lockend <= lockstart)
3416                lockend = lockstart + fs_info->sectorsize;
3417        lockend--;
3418        len = lockend - lockstart + 1;
3419
3420        lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3421                         &cached_state);
3422
3423        while (start < i_size) {
3424                em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len);
3425                if (IS_ERR(em)) {
3426                        ret = PTR_ERR(em);
3427                        em = NULL;
3428                        break;
3429                }
3430
3431                if (whence == SEEK_HOLE &&
3432                    (em->block_start == EXTENT_MAP_HOLE ||
3433                     test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3434                        break;
3435                else if (whence == SEEK_DATA &&
3436                           (em->block_start != EXTENT_MAP_HOLE &&
3437                            !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3438                        break;
3439
3440                start = em->start + em->len;
3441                free_extent_map(em);
3442                em = NULL;
3443                cond_resched();
3444        }
3445        free_extent_map(em);
3446        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3447                             &cached_state);
3448        if (ret) {
3449                offset = ret;
3450        } else {
3451                if (whence == SEEK_DATA && start >= i_size)
3452                        offset = -ENXIO;
3453                else
3454                        offset = min_t(loff_t, start, i_size);
3455        }
3456
3457        return offset;
3458}
3459
3460static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3461{
3462        struct inode *inode = file->f_mapping->host;
3463
3464        switch (whence) {
3465        default:
3466                return generic_file_llseek(file, offset, whence);
3467        case SEEK_DATA:
3468        case SEEK_HOLE:
3469                inode_lock_shared(inode);
3470                offset = find_desired_extent(inode, offset, whence);
3471                inode_unlock_shared(inode);
3472                break;
3473        }
3474
3475        if (offset < 0)
3476                return offset;
3477
3478        return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3479}
3480
3481static int btrfs_file_open(struct inode *inode, struct file *filp)
3482{
3483        filp->f_mode |= FMODE_NOWAIT;
3484        return generic_file_open(inode, filp);
3485}
3486
3487const struct file_operations btrfs_file_operations = {
3488        .llseek         = btrfs_file_llseek,
3489        .read_iter      = generic_file_read_iter,
3490        .splice_read    = generic_file_splice_read,
3491        .write_iter     = btrfs_file_write_iter,
3492        .mmap           = btrfs_file_mmap,
3493        .open           = btrfs_file_open,
3494        .release        = btrfs_release_file,
3495        .fsync          = btrfs_sync_file,
3496        .fallocate      = btrfs_fallocate,
3497        .unlocked_ioctl = btrfs_ioctl,
3498#ifdef CONFIG_COMPAT
3499        .compat_ioctl   = btrfs_compat_ioctl,
3500#endif
3501        .remap_file_range = btrfs_remap_file_range,
3502};
3503
3504void __cold btrfs_auto_defrag_exit(void)
3505{
3506        kmem_cache_destroy(btrfs_inode_defrag_cachep);
3507}
3508
3509int __init btrfs_auto_defrag_init(void)
3510{
3511        btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
3512                                        sizeof(struct inode_defrag), 0,
3513                                        SLAB_MEM_SPREAD,
3514                                        NULL);
3515        if (!btrfs_inode_defrag_cachep)
3516                return -ENOMEM;
3517
3518        return 0;
3519}
3520
3521int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3522{
3523        int ret;
3524
3525        /*
3526         * So with compression we will find and lock a dirty page and clear the
3527         * first one as dirty, setup an async extent, and immediately return
3528         * with the entire range locked but with nobody actually marked with
3529         * writeback.  So we can't just filemap_write_and_wait_range() and
3530         * expect it to work since it will just kick off a thread to do the
3531         * actual work.  So we need to call filemap_fdatawrite_range _again_
3532         * since it will wait on the page lock, which won't be unlocked until
3533         * after the pages have been marked as writeback and so we're good to go
3534         * from there.  We have to do this otherwise we'll miss the ordered
3535         * extents and that results in badness.  Please Josef, do not think you
3536         * know better and pull this out at some point in the future, it is
3537         * right and you are wrong.
3538         */
3539        ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3540        if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3541                             &BTRFS_I(inode)->runtime_flags))
3542                ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3543
3544        return ret;
3545}
3546