linux/fs/btrfs/file.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/fs.h>
  20#include <linux/pagemap.h>
  21#include <linux/highmem.h>
  22#include <linux/time.h>
  23#include <linux/init.h>
  24#include <linux/string.h>
  25#include <linux/backing-dev.h>
  26#include <linux/mpage.h>
  27#include <linux/falloc.h>
  28#include <linux/swap.h>
  29#include <linux/writeback.h>
  30#include <linux/statfs.h>
  31#include <linux/compat.h>
  32#include <linux/slab.h>
  33#include <linux/btrfs.h>
  34#include <linux/uio.h>
  35#include "ctree.h"
  36#include "disk-io.h"
  37#include "transaction.h"
  38#include "btrfs_inode.h"
  39#include "print-tree.h"
  40#include "tree-log.h"
  41#include "locking.h"
  42#include "volumes.h"
  43#include "qgroup.h"
  44#include "compression.h"
  45
  46static struct kmem_cache *btrfs_inode_defrag_cachep;
  47/*
  48 * when auto defrag is enabled we
  49 * queue up these defrag structs to remember which
  50 * inodes need defragging passes
  51 */
  52struct inode_defrag {
  53        struct rb_node rb_node;
  54        /* objectid */
  55        u64 ino;
  56        /*
  57         * transid where the defrag was added, we search for
  58         * extents newer than this
  59         */
  60        u64 transid;
  61
  62        /* root objectid */
  63        u64 root;
  64
  65        /* last offset we were able to defrag */
  66        u64 last_offset;
  67
  68        /* if we've wrapped around back to zero once already */
  69        int cycled;
  70};
  71
  72static int __compare_inode_defrag(struct inode_defrag *defrag1,
  73                                  struct inode_defrag *defrag2)
  74{
  75        if (defrag1->root > defrag2->root)
  76                return 1;
  77        else if (defrag1->root < defrag2->root)
  78                return -1;
  79        else if (defrag1->ino > defrag2->ino)
  80                return 1;
  81        else if (defrag1->ino < defrag2->ino)
  82                return -1;
  83        else
  84                return 0;
  85}
  86
  87/* pop a record for an inode into the defrag tree.  The lock
  88 * must be held already
  89 *
  90 * If you're inserting a record for an older transid than an
  91 * existing record, the transid already in the tree is lowered
  92 *
  93 * If an existing record is found the defrag item you
  94 * pass in is freed
  95 */
  96static int __btrfs_add_inode_defrag(struct inode *inode,
  97                                    struct inode_defrag *defrag)
  98{
  99        struct btrfs_root *root = BTRFS_I(inode)->root;
 100        struct inode_defrag *entry;
 101        struct rb_node **p;
 102        struct rb_node *parent = NULL;
 103        int ret;
 104
 105        p = &root->fs_info->defrag_inodes.rb_node;
 106        while (*p) {
 107                parent = *p;
 108                entry = rb_entry(parent, struct inode_defrag, rb_node);
 109
 110                ret = __compare_inode_defrag(defrag, entry);
 111                if (ret < 0)
 112                        p = &parent->rb_left;
 113                else if (ret > 0)
 114                        p = &parent->rb_right;
 115                else {
 116                        /* if we're reinserting an entry for
 117                         * an old defrag run, make sure to
 118                         * lower the transid of our existing record
 119                         */
 120                        if (defrag->transid < entry->transid)
 121                                entry->transid = defrag->transid;
 122                        if (defrag->last_offset > entry->last_offset)
 123                                entry->last_offset = defrag->last_offset;
 124                        return -EEXIST;
 125                }
 126        }
 127        set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 128        rb_link_node(&defrag->rb_node, parent, p);
 129        rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
 130        return 0;
 131}
 132
 133static inline int __need_auto_defrag(struct btrfs_root *root)
 134{
 135        if (!btrfs_test_opt(root, AUTO_DEFRAG))
 136                return 0;
 137
 138        if (btrfs_fs_closing(root->fs_info))
 139                return 0;
 140
 141        return 1;
 142}
 143
 144/*
 145 * insert a defrag record for this inode if auto defrag is
 146 * enabled
 147 */
 148int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 149                           struct inode *inode)
 150{
 151        struct btrfs_root *root = BTRFS_I(inode)->root;
 152        struct inode_defrag *defrag;
 153        u64 transid;
 154        int ret;
 155
 156        if (!__need_auto_defrag(root))
 157                return 0;
 158
 159        if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
 160                return 0;
 161
 162        if (trans)
 163                transid = trans->transid;
 164        else
 165                transid = BTRFS_I(inode)->root->last_trans;
 166
 167        defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
 168        if (!defrag)
 169                return -ENOMEM;
 170
 171        defrag->ino = btrfs_ino(inode);
 172        defrag->transid = transid;
 173        defrag->root = root->root_key.objectid;
 174
 175        spin_lock(&root->fs_info->defrag_inodes_lock);
 176        if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
 177                /*
 178                 * If we set IN_DEFRAG flag and evict the inode from memory,
 179                 * and then re-read this inode, this new inode doesn't have
 180                 * IN_DEFRAG flag. At the case, we may find the existed defrag.
 181                 */
 182                ret = __btrfs_add_inode_defrag(inode, defrag);
 183                if (ret)
 184                        kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 185        } else {
 186                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 187        }
 188        spin_unlock(&root->fs_info->defrag_inodes_lock);
 189        return 0;
 190}
 191
 192/*
 193 * Requeue the defrag object. If there is a defrag object that points to
 194 * the same inode in the tree, we will merge them together (by
 195 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
 196 */
 197static void btrfs_requeue_inode_defrag(struct inode *inode,
 198                                       struct inode_defrag *defrag)
 199{
 200        struct btrfs_root *root = BTRFS_I(inode)->root;
 201        int ret;
 202
 203        if (!__need_auto_defrag(root))
 204                goto out;
 205
 206        /*
 207         * Here we don't check the IN_DEFRAG flag, because we need merge
 208         * them together.
 209         */
 210        spin_lock(&root->fs_info->defrag_inodes_lock);
 211        ret = __btrfs_add_inode_defrag(inode, defrag);
 212        spin_unlock(&root->fs_info->defrag_inodes_lock);
 213        if (ret)
 214                goto out;
 215        return;
 216out:
 217        kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 218}
 219
 220/*
 221 * pick the defragable inode that we want, if it doesn't exist, we will get
 222 * the next one.
 223 */
 224static struct inode_defrag *
 225btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
 226{
 227        struct inode_defrag *entry = NULL;
 228        struct inode_defrag tmp;
 229        struct rb_node *p;
 230        struct rb_node *parent = NULL;
 231        int ret;
 232
 233        tmp.ino = ino;
 234        tmp.root = root;
 235
 236        spin_lock(&fs_info->defrag_inodes_lock);
 237        p = fs_info->defrag_inodes.rb_node;
 238        while (p) {
 239                parent = p;
 240                entry = rb_entry(parent, struct inode_defrag, rb_node);
 241
 242                ret = __compare_inode_defrag(&tmp, entry);
 243                if (ret < 0)
 244                        p = parent->rb_left;
 245                else if (ret > 0)
 246                        p = parent->rb_right;
 247                else
 248                        goto out;
 249        }
 250
 251        if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
 252                parent = rb_next(parent);
 253                if (parent)
 254                        entry = rb_entry(parent, struct inode_defrag, rb_node);
 255                else
 256                        entry = NULL;
 257        }
 258out:
 259        if (entry)
 260                rb_erase(parent, &fs_info->defrag_inodes);
 261        spin_unlock(&fs_info->defrag_inodes_lock);
 262        return entry;
 263}
 264
 265void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
 266{
 267        struct inode_defrag *defrag;
 268        struct rb_node *node;
 269
 270        spin_lock(&fs_info->defrag_inodes_lock);
 271        node = rb_first(&fs_info->defrag_inodes);
 272        while (node) {
 273                rb_erase(node, &fs_info->defrag_inodes);
 274                defrag = rb_entry(node, struct inode_defrag, rb_node);
 275                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 276
 277                cond_resched_lock(&fs_info->defrag_inodes_lock);
 278
 279                node = rb_first(&fs_info->defrag_inodes);
 280        }
 281        spin_unlock(&fs_info->defrag_inodes_lock);
 282}
 283
 284#define BTRFS_DEFRAG_BATCH      1024
 285
 286static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
 287                                    struct inode_defrag *defrag)
 288{
 289        struct btrfs_root *inode_root;
 290        struct inode *inode;
 291        struct btrfs_key key;
 292        struct btrfs_ioctl_defrag_range_args range;
 293        int num_defrag;
 294        int index;
 295        int ret;
 296
 297        /* get the inode */
 298        key.objectid = defrag->root;
 299        key.type = BTRFS_ROOT_ITEM_KEY;
 300        key.offset = (u64)-1;
 301
 302        index = srcu_read_lock(&fs_info->subvol_srcu);
 303
 304        inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
 305        if (IS_ERR(inode_root)) {
 306                ret = PTR_ERR(inode_root);
 307                goto cleanup;
 308        }
 309
 310        key.objectid = defrag->ino;
 311        key.type = BTRFS_INODE_ITEM_KEY;
 312        key.offset = 0;
 313        inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
 314        if (IS_ERR(inode)) {
 315                ret = PTR_ERR(inode);
 316                goto cleanup;
 317        }
 318        srcu_read_unlock(&fs_info->subvol_srcu, index);
 319
 320        /* do a chunk of defrag */
 321        clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 322        memset(&range, 0, sizeof(range));
 323        range.len = (u64)-1;
 324        range.start = defrag->last_offset;
 325
 326        sb_start_write(fs_info->sb);
 327        num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
 328                                       BTRFS_DEFRAG_BATCH);
 329        sb_end_write(fs_info->sb);
 330        /*
 331         * if we filled the whole defrag batch, there
 332         * must be more work to do.  Queue this defrag
 333         * again
 334         */
 335        if (num_defrag == BTRFS_DEFRAG_BATCH) {
 336                defrag->last_offset = range.start;
 337                btrfs_requeue_inode_defrag(inode, defrag);
 338        } else if (defrag->last_offset && !defrag->cycled) {
 339                /*
 340                 * we didn't fill our defrag batch, but
 341                 * we didn't start at zero.  Make sure we loop
 342                 * around to the start of the file.
 343                 */
 344                defrag->last_offset = 0;
 345                defrag->cycled = 1;
 346                btrfs_requeue_inode_defrag(inode, defrag);
 347        } else {
 348                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 349        }
 350
 351        iput(inode);
 352        return 0;
 353cleanup:
 354        srcu_read_unlock(&fs_info->subvol_srcu, index);
 355        kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 356        return ret;
 357}
 358
 359/*
 360 * run through the list of inodes in the FS that need
 361 * defragging
 362 */
 363int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 364{
 365        struct inode_defrag *defrag;
 366        u64 first_ino = 0;
 367        u64 root_objectid = 0;
 368
 369        atomic_inc(&fs_info->defrag_running);
 370        while (1) {
 371                /* Pause the auto defragger. */
 372                if (test_bit(BTRFS_FS_STATE_REMOUNTING,
 373                             &fs_info->fs_state))
 374                        break;
 375
 376                if (!__need_auto_defrag(fs_info->tree_root))
 377                        break;
 378
 379                /* find an inode to defrag */
 380                defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
 381                                                 first_ino);
 382                if (!defrag) {
 383                        if (root_objectid || first_ino) {
 384                                root_objectid = 0;
 385                                first_ino = 0;
 386                                continue;
 387                        } else {
 388                                break;
 389                        }
 390                }
 391
 392                first_ino = defrag->ino + 1;
 393                root_objectid = defrag->root;
 394
 395                __btrfs_run_defrag_inode(fs_info, defrag);
 396        }
 397        atomic_dec(&fs_info->defrag_running);
 398
 399        /*
 400         * during unmount, we use the transaction_wait queue to
 401         * wait for the defragger to stop
 402         */
 403        wake_up(&fs_info->transaction_wait);
 404        return 0;
 405}
 406
 407/* simple helper to fault in pages and copy.  This should go away
 408 * and be replaced with calls into generic code.
 409 */
 410static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
 411                                         struct page **prepared_pages,
 412                                         struct iov_iter *i)
 413{
 414        size_t copied = 0;
 415        size_t total_copied = 0;
 416        int pg = 0;
 417        int offset = pos & (PAGE_SIZE - 1);
 418
 419        while (write_bytes > 0) {
 420                size_t count = min_t(size_t,
 421                                     PAGE_SIZE - offset, write_bytes);
 422                struct page *page = prepared_pages[pg];
 423                /*
 424                 * Copy data from userspace to the current page
 425                 */
 426                copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
 427
 428                /* Flush processor's dcache for this page */
 429                flush_dcache_page(page);
 430
 431                /*
 432                 * if we get a partial write, we can end up with
 433                 * partially up to date pages.  These add
 434                 * a lot of complexity, so make sure they don't
 435                 * happen by forcing this copy to be retried.
 436                 *
 437                 * The rest of the btrfs_file_write code will fall
 438                 * back to page at a time copies after we return 0.
 439                 */
 440                if (!PageUptodate(page) && copied < count)
 441                        copied = 0;
 442
 443                iov_iter_advance(i, copied);
 444                write_bytes -= copied;
 445                total_copied += copied;
 446
 447                /* Return to btrfs_file_write_iter to fault page */
 448                if (unlikely(copied == 0))
 449                        break;
 450
 451                if (copied < PAGE_SIZE - offset) {
 452                        offset += copied;
 453                } else {
 454                        pg++;
 455                        offset = 0;
 456                }
 457        }
 458        return total_copied;
 459}
 460
 461/*
 462 * unlocks pages after btrfs_file_write is done with them
 463 */
 464static void btrfs_drop_pages(struct page **pages, size_t num_pages)
 465{
 466        size_t i;
 467        for (i = 0; i < num_pages; i++) {
 468                /* page checked is some magic around finding pages that
 469                 * have been modified without going through btrfs_set_page_dirty
 470                 * clear it here. There should be no need to mark the pages
 471                 * accessed as prepare_pages should have marked them accessed
 472                 * in prepare_pages via find_or_create_page()
 473                 */
 474                ClearPageChecked(pages[i]);
 475                unlock_page(pages[i]);
 476                put_page(pages[i]);
 477        }
 478}
 479
 480/*
 481 * after copy_from_user, pages need to be dirtied and we need to make
 482 * sure holes are created between the current EOF and the start of
 483 * any next extents (if required).
 484 *
 485 * this also makes the decision about creating an inline extent vs
 486 * doing real data extents, marking pages dirty and delalloc as required.
 487 */
 488int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
 489                             struct page **pages, size_t num_pages,
 490                             loff_t pos, size_t write_bytes,
 491                             struct extent_state **cached)
 492{
 493        int err = 0;
 494        int i;
 495        u64 num_bytes;
 496        u64 start_pos;
 497        u64 end_of_last_block;
 498        u64 end_pos = pos + write_bytes;
 499        loff_t isize = i_size_read(inode);
 500
 501        start_pos = pos & ~((u64)root->sectorsize - 1);
 502        num_bytes = round_up(write_bytes + pos - start_pos, root->sectorsize);
 503
 504        end_of_last_block = start_pos + num_bytes - 1;
 505        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
 506                                        cached);
 507        if (err)
 508                return err;
 509
 510        for (i = 0; i < num_pages; i++) {
 511                struct page *p = pages[i];
 512                SetPageUptodate(p);
 513                ClearPageChecked(p);
 514                set_page_dirty(p);
 515        }
 516
 517        /*
 518         * we've only changed i_size in ram, and we haven't updated
 519         * the disk i_size.  There is no need to log the inode
 520         * at this time.
 521         */
 522        if (end_pos > isize)
 523                i_size_write(inode, end_pos);
 524        return 0;
 525}
 526
 527/*
 528 * this drops all the extents in the cache that intersect the range
 529 * [start, end].  Existing extents are split as required.
 530 */
 531void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
 532                             int skip_pinned)
 533{
 534        struct extent_map *em;
 535        struct extent_map *split = NULL;
 536        struct extent_map *split2 = NULL;
 537        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 538        u64 len = end - start + 1;
 539        u64 gen;
 540        int ret;
 541        int testend = 1;
 542        unsigned long flags;
 543        int compressed = 0;
 544        bool modified;
 545
 546        WARN_ON(end < start);
 547        if (end == (u64)-1) {
 548                len = (u64)-1;
 549                testend = 0;
 550        }
 551        while (1) {
 552                int no_splits = 0;
 553
 554                modified = false;
 555                if (!split)
 556                        split = alloc_extent_map();
 557                if (!split2)
 558                        split2 = alloc_extent_map();
 559                if (!split || !split2)
 560                        no_splits = 1;
 561
 562                write_lock(&em_tree->lock);
 563                em = lookup_extent_mapping(em_tree, start, len);
 564                if (!em) {
 565                        write_unlock(&em_tree->lock);
 566                        break;
 567                }
 568                flags = em->flags;
 569                gen = em->generation;
 570                if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
 571                        if (testend && em->start + em->len >= start + len) {
 572                                free_extent_map(em);
 573                                write_unlock(&em_tree->lock);
 574                                break;
 575                        }
 576                        start = em->start + em->len;
 577                        if (testend)
 578                                len = start + len - (em->start + em->len);
 579                        free_extent_map(em);
 580                        write_unlock(&em_tree->lock);
 581                        continue;
 582                }
 583                compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 584                clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 585                clear_bit(EXTENT_FLAG_LOGGING, &flags);
 586                modified = !list_empty(&em->list);
 587                if (no_splits)
 588                        goto next;
 589
 590                if (em->start < start) {
 591                        split->start = em->start;
 592                        split->len = start - em->start;
 593
 594                        if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 595                                split->orig_start = em->orig_start;
 596                                split->block_start = em->block_start;
 597
 598                                if (compressed)
 599                                        split->block_len = em->block_len;
 600                                else
 601                                        split->block_len = split->len;
 602                                split->orig_block_len = max(split->block_len,
 603                                                em->orig_block_len);
 604                                split->ram_bytes = em->ram_bytes;
 605                        } else {
 606                                split->orig_start = split->start;
 607                                split->block_len = 0;
 608                                split->block_start = em->block_start;
 609                                split->orig_block_len = 0;
 610                                split->ram_bytes = split->len;
 611                        }
 612
 613                        split->generation = gen;
 614                        split->bdev = em->bdev;
 615                        split->flags = flags;
 616                        split->compress_type = em->compress_type;
 617                        replace_extent_mapping(em_tree, em, split, modified);
 618                        free_extent_map(split);
 619                        split = split2;
 620                        split2 = NULL;
 621                }
 622                if (testend && em->start + em->len > start + len) {
 623                        u64 diff = start + len - em->start;
 624
 625                        split->start = start + len;
 626                        split->len = em->start + em->len - (start + len);
 627                        split->bdev = em->bdev;
 628                        split->flags = flags;
 629                        split->compress_type = em->compress_type;
 630                        split->generation = gen;
 631
 632                        if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 633                                split->orig_block_len = max(em->block_len,
 634                                                    em->orig_block_len);
 635
 636                                split->ram_bytes = em->ram_bytes;
 637                                if (compressed) {
 638                                        split->block_len = em->block_len;
 639                                        split->block_start = em->block_start;
 640                                        split->orig_start = em->orig_start;
 641                                } else {
 642                                        split->block_len = split->len;
 643                                        split->block_start = em->block_start
 644                                                + diff;
 645                                        split->orig_start = em->orig_start;
 646                                }
 647                        } else {
 648                                split->ram_bytes = split->len;
 649                                split->orig_start = split->start;
 650                                split->block_len = 0;
 651                                split->block_start = em->block_start;
 652                                split->orig_block_len = 0;
 653                        }
 654
 655                        if (extent_map_in_tree(em)) {
 656                                replace_extent_mapping(em_tree, em, split,
 657                                                       modified);
 658                        } else {
 659                                ret = add_extent_mapping(em_tree, split,
 660                                                         modified);
 661                                ASSERT(ret == 0); /* Logic error */
 662                        }
 663                        free_extent_map(split);
 664                        split = NULL;
 665                }
 666next:
 667                if (extent_map_in_tree(em))
 668                        remove_extent_mapping(em_tree, em);
 669                write_unlock(&em_tree->lock);
 670
 671                /* once for us */
 672                free_extent_map(em);
 673                /* once for the tree*/
 674                free_extent_map(em);
 675        }
 676        if (split)
 677                free_extent_map(split);
 678        if (split2)
 679                free_extent_map(split2);
 680}
 681
 682/*
 683 * this is very complex, but the basic idea is to drop all extents
 684 * in the range start - end.  hint_block is filled in with a block number
 685 * that would be a good hint to the block allocator for this file.
 686 *
 687 * If an extent intersects the range but is not entirely inside the range
 688 * it is either truncated or split.  Anything entirely inside the range
 689 * is deleted from the tree.
 690 */
 691int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 692                         struct btrfs_root *root, struct inode *inode,
 693                         struct btrfs_path *path, u64 start, u64 end,
 694                         u64 *drop_end, int drop_cache,
 695                         int replace_extent,
 696                         u32 extent_item_size,
 697                         int *key_inserted)
 698{
 699        struct extent_buffer *leaf;
 700        struct btrfs_file_extent_item *fi;
 701        struct btrfs_key key;
 702        struct btrfs_key new_key;
 703        u64 ino = btrfs_ino(inode);
 704        u64 search_start = start;
 705        u64 disk_bytenr = 0;
 706        u64 num_bytes = 0;
 707        u64 extent_offset = 0;
 708        u64 extent_end = 0;
 709        int del_nr = 0;
 710        int del_slot = 0;
 711        int extent_type;
 712        int recow;
 713        int ret;
 714        int modify_tree = -1;
 715        int update_refs;
 716        int found = 0;
 717        int leafs_visited = 0;
 718
 719        if (drop_cache)
 720                btrfs_drop_extent_cache(inode, start, end - 1, 0);
 721
 722        if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
 723                modify_tree = 0;
 724
 725        update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
 726                       root == root->fs_info->tree_root);
 727        while (1) {
 728                recow = 0;
 729                ret = btrfs_lookup_file_extent(trans, root, path, ino,
 730                                               search_start, modify_tree);
 731                if (ret < 0)
 732                        break;
 733                if (ret > 0 && path->slots[0] > 0 && search_start == start) {
 734                        leaf = path->nodes[0];
 735                        btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
 736                        if (key.objectid == ino &&
 737                            key.type == BTRFS_EXTENT_DATA_KEY)
 738                                path->slots[0]--;
 739                }
 740                ret = 0;
 741                leafs_visited++;
 742next_slot:
 743                leaf = path->nodes[0];
 744                if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 745                        BUG_ON(del_nr > 0);
 746                        ret = btrfs_next_leaf(root, path);
 747                        if (ret < 0)
 748                                break;
 749                        if (ret > 0) {
 750                                ret = 0;
 751                                break;
 752                        }
 753                        leafs_visited++;
 754                        leaf = path->nodes[0];
 755                        recow = 1;
 756                }
 757
 758                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 759
 760                if (key.objectid > ino)
 761                        break;
 762                if (WARN_ON_ONCE(key.objectid < ino) ||
 763                    key.type < BTRFS_EXTENT_DATA_KEY) {
 764                        ASSERT(del_nr == 0);
 765                        path->slots[0]++;
 766                        goto next_slot;
 767                }
 768                if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
 769                        break;
 770
 771                fi = btrfs_item_ptr(leaf, path->slots[0],
 772                                    struct btrfs_file_extent_item);
 773                extent_type = btrfs_file_extent_type(leaf, fi);
 774
 775                if (extent_type == BTRFS_FILE_EXTENT_REG ||
 776                    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 777                        disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 778                        num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 779                        extent_offset = btrfs_file_extent_offset(leaf, fi);
 780                        extent_end = key.offset +
 781                                btrfs_file_extent_num_bytes(leaf, fi);
 782                } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 783                        extent_end = key.offset +
 784                                btrfs_file_extent_inline_len(leaf,
 785                                                     path->slots[0], fi);
 786                } else {
 787                        /* can't happen */
 788                        BUG();
 789                }
 790
 791                /*
 792                 * Don't skip extent items representing 0 byte lengths. They
 793                 * used to be created (bug) if while punching holes we hit
 794                 * -ENOSPC condition. So if we find one here, just ensure we
 795                 * delete it, otherwise we would insert a new file extent item
 796                 * with the same key (offset) as that 0 bytes length file
 797                 * extent item in the call to setup_items_for_insert() later
 798                 * in this function.
 799                 */
 800                if (extent_end == key.offset && extent_end >= search_start)
 801                        goto delete_extent_item;
 802
 803                if (extent_end <= search_start) {
 804                        path->slots[0]++;
 805                        goto next_slot;
 806                }
 807
 808                found = 1;
 809                search_start = max(key.offset, start);
 810                if (recow || !modify_tree) {
 811                        modify_tree = -1;
 812                        btrfs_release_path(path);
 813                        continue;
 814                }
 815
 816                /*
 817                 *     | - range to drop - |
 818                 *  | -------- extent -------- |
 819                 */
 820                if (start > key.offset && end < extent_end) {
 821                        BUG_ON(del_nr > 0);
 822                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 823                                ret = -EOPNOTSUPP;
 824                                break;
 825                        }
 826
 827                        memcpy(&new_key, &key, sizeof(new_key));
 828                        new_key.offset = start;
 829                        ret = btrfs_duplicate_item(trans, root, path,
 830                                                   &new_key);
 831                        if (ret == -EAGAIN) {
 832                                btrfs_release_path(path);
 833                                continue;
 834                        }
 835                        if (ret < 0)
 836                                break;
 837
 838                        leaf = path->nodes[0];
 839                        fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 840                                            struct btrfs_file_extent_item);
 841                        btrfs_set_file_extent_num_bytes(leaf, fi,
 842                                                        start - key.offset);
 843
 844                        fi = btrfs_item_ptr(leaf, path->slots[0],
 845                                            struct btrfs_file_extent_item);
 846
 847                        extent_offset += start - key.offset;
 848                        btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 849                        btrfs_set_file_extent_num_bytes(leaf, fi,
 850                                                        extent_end - start);
 851                        btrfs_mark_buffer_dirty(leaf);
 852
 853                        if (update_refs && disk_bytenr > 0) {
 854                                ret = btrfs_inc_extent_ref(trans, root,
 855                                                disk_bytenr, num_bytes, 0,
 856                                                root->root_key.objectid,
 857                                                new_key.objectid,
 858                                                start - extent_offset);
 859                                BUG_ON(ret); /* -ENOMEM */
 860                        }
 861                        key.offset = start;
 862                }
 863                /*
 864                 *  | ---- range to drop ----- |
 865                 *      | -------- extent -------- |
 866                 */
 867                if (start <= key.offset && end < extent_end) {
 868                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 869                                ret = -EOPNOTSUPP;
 870                                break;
 871                        }
 872
 873                        memcpy(&new_key, &key, sizeof(new_key));
 874                        new_key.offset = end;
 875                        btrfs_set_item_key_safe(root->fs_info, path, &new_key);
 876
 877                        extent_offset += end - key.offset;
 878                        btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 879                        btrfs_set_file_extent_num_bytes(leaf, fi,
 880                                                        extent_end - end);
 881                        btrfs_mark_buffer_dirty(leaf);
 882                        if (update_refs && disk_bytenr > 0)
 883                                inode_sub_bytes(inode, end - key.offset);
 884                        break;
 885                }
 886
 887                search_start = extent_end;
 888                /*
 889                 *       | ---- range to drop ----- |
 890                 *  | -------- extent -------- |
 891                 */
 892                if (start > key.offset && end >= extent_end) {
 893                        BUG_ON(del_nr > 0);
 894                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 895                                ret = -EOPNOTSUPP;
 896                                break;
 897                        }
 898
 899                        btrfs_set_file_extent_num_bytes(leaf, fi,
 900                                                        start - key.offset);
 901                        btrfs_mark_buffer_dirty(leaf);
 902                        if (update_refs && disk_bytenr > 0)
 903                                inode_sub_bytes(inode, extent_end - start);
 904                        if (end == extent_end)
 905                                break;
 906
 907                        path->slots[0]++;
 908                        goto next_slot;
 909                }
 910
 911                /*
 912                 *  | ---- range to drop ----- |
 913                 *    | ------ extent ------ |
 914                 */
 915                if (start <= key.offset && end >= extent_end) {
 916delete_extent_item:
 917                        if (del_nr == 0) {
 918                                del_slot = path->slots[0];
 919                                del_nr = 1;
 920                        } else {
 921                                BUG_ON(del_slot + del_nr != path->slots[0]);
 922                                del_nr++;
 923                        }
 924
 925                        if (update_refs &&
 926                            extent_type == BTRFS_FILE_EXTENT_INLINE) {
 927                                inode_sub_bytes(inode,
 928                                                extent_end - key.offset);
 929                                extent_end = ALIGN(extent_end,
 930                                                   root->sectorsize);
 931                        } else if (update_refs && disk_bytenr > 0) {
 932                                ret = btrfs_free_extent(trans, root,
 933                                                disk_bytenr, num_bytes, 0,
 934                                                root->root_key.objectid,
 935                                                key.objectid, key.offset -
 936                                                extent_offset);
 937                                BUG_ON(ret); /* -ENOMEM */
 938                                inode_sub_bytes(inode,
 939                                                extent_end - key.offset);
 940                        }
 941
 942                        if (end == extent_end)
 943                                break;
 944
 945                        if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
 946                                path->slots[0]++;
 947                                goto next_slot;
 948                        }
 949
 950                        ret = btrfs_del_items(trans, root, path, del_slot,
 951                                              del_nr);
 952                        if (ret) {
 953                                btrfs_abort_transaction(trans, root, ret);
 954                                break;
 955                        }
 956
 957                        del_nr = 0;
 958                        del_slot = 0;
 959
 960                        btrfs_release_path(path);
 961                        continue;
 962                }
 963
 964                BUG_ON(1);
 965        }
 966
 967        if (!ret && del_nr > 0) {
 968                /*
 969                 * Set path->slots[0] to first slot, so that after the delete
 970                 * if items are move off from our leaf to its immediate left or
 971                 * right neighbor leafs, we end up with a correct and adjusted
 972                 * path->slots[0] for our insertion (if replace_extent != 0).
 973                 */
 974                path->slots[0] = del_slot;
 975                ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
 976                if (ret)
 977                        btrfs_abort_transaction(trans, root, ret);
 978        }
 979
 980        leaf = path->nodes[0];
 981        /*
 982         * If btrfs_del_items() was called, it might have deleted a leaf, in
 983         * which case it unlocked our path, so check path->locks[0] matches a
 984         * write lock.
 985         */
 986        if (!ret && replace_extent && leafs_visited == 1 &&
 987            (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
 988             path->locks[0] == BTRFS_WRITE_LOCK) &&
 989            btrfs_leaf_free_space(root, leaf) >=
 990            sizeof(struct btrfs_item) + extent_item_size) {
 991
 992                key.objectid = ino;
 993                key.type = BTRFS_EXTENT_DATA_KEY;
 994                key.offset = start;
 995                if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
 996                        struct btrfs_key slot_key;
 997
 998                        btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
 999                        if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1000                                path->slots[0]++;
1001                }
1002                setup_items_for_insert(root, path, &key,
1003                                       &extent_item_size,
1004                                       extent_item_size,
1005                                       sizeof(struct btrfs_item) +
1006                                       extent_item_size, 1);
1007                *key_inserted = 1;
1008        }
1009
1010        if (!replace_extent || !(*key_inserted))
1011                btrfs_release_path(path);
1012        if (drop_end)
1013                *drop_end = found ? min(end, extent_end) : end;
1014        return ret;
1015}
1016
1017int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1018                       struct btrfs_root *root, struct inode *inode, u64 start,
1019                       u64 end, int drop_cache)
1020{
1021        struct btrfs_path *path;
1022        int ret;
1023
1024        path = btrfs_alloc_path();
1025        if (!path)
1026                return -ENOMEM;
1027        ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
1028                                   drop_cache, 0, 0, NULL);
1029        btrfs_free_path(path);
1030        return ret;
1031}
1032
1033static int extent_mergeable(struct extent_buffer *leaf, int slot,
1034                            u64 objectid, u64 bytenr, u64 orig_offset,
1035                            u64 *start, u64 *end)
1036{
1037        struct btrfs_file_extent_item *fi;
1038        struct btrfs_key key;
1039        u64 extent_end;
1040
1041        if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1042                return 0;
1043
1044        btrfs_item_key_to_cpu(leaf, &key, slot);
1045        if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1046                return 0;
1047
1048        fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1049        if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1050            btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1051            btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1052            btrfs_file_extent_compression(leaf, fi) ||
1053            btrfs_file_extent_encryption(leaf, fi) ||
1054            btrfs_file_extent_other_encoding(leaf, fi))
1055                return 0;
1056
1057        extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1058        if ((*start && *start != key.offset) || (*end && *end != extent_end))
1059                return 0;
1060
1061        *start = key.offset;
1062        *end = extent_end;
1063        return 1;
1064}
1065
1066/*
1067 * Mark extent in the range start - end as written.
1068 *
1069 * This changes extent type from 'pre-allocated' to 'regular'. If only
1070 * part of extent is marked as written, the extent will be split into
1071 * two or three.
1072 */
1073int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1074                              struct inode *inode, u64 start, u64 end)
1075{
1076        struct btrfs_root *root = BTRFS_I(inode)->root;
1077        struct extent_buffer *leaf;
1078        struct btrfs_path *path;
1079        struct btrfs_file_extent_item *fi;
1080        struct btrfs_key key;
1081        struct btrfs_key new_key;
1082        u64 bytenr;
1083        u64 num_bytes;
1084        u64 extent_end;
1085        u64 orig_offset;
1086        u64 other_start;
1087        u64 other_end;
1088        u64 split;
1089        int del_nr = 0;
1090        int del_slot = 0;
1091        int recow;
1092        int ret;
1093        u64 ino = btrfs_ino(inode);
1094
1095        path = btrfs_alloc_path();
1096        if (!path)
1097                return -ENOMEM;
1098again:
1099        recow = 0;
1100        split = start;
1101        key.objectid = ino;
1102        key.type = BTRFS_EXTENT_DATA_KEY;
1103        key.offset = split;
1104
1105        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1106        if (ret < 0)
1107                goto out;
1108        if (ret > 0 && path->slots[0] > 0)
1109                path->slots[0]--;
1110
1111        leaf = path->nodes[0];
1112        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1113        BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
1114        fi = btrfs_item_ptr(leaf, path->slots[0],
1115                            struct btrfs_file_extent_item);
1116        BUG_ON(btrfs_file_extent_type(leaf, fi) !=
1117               BTRFS_FILE_EXTENT_PREALLOC);
1118        extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1119        BUG_ON(key.offset > start || extent_end < end);
1120
1121        bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1122        num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1123        orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1124        memcpy(&new_key, &key, sizeof(new_key));
1125
1126        if (start == key.offset && end < extent_end) {
1127                other_start = 0;
1128                other_end = start;
1129                if (extent_mergeable(leaf, path->slots[0] - 1,
1130                                     ino, bytenr, orig_offset,
1131                                     &other_start, &other_end)) {
1132                        new_key.offset = end;
1133                        btrfs_set_item_key_safe(root->fs_info, path, &new_key);
1134                        fi = btrfs_item_ptr(leaf, path->slots[0],
1135                                            struct btrfs_file_extent_item);
1136                        btrfs_set_file_extent_generation(leaf, fi,
1137                                                         trans->transid);
1138                        btrfs_set_file_extent_num_bytes(leaf, fi,
1139                                                        extent_end - end);
1140                        btrfs_set_file_extent_offset(leaf, fi,
1141                                                     end - orig_offset);
1142                        fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1143                                            struct btrfs_file_extent_item);
1144                        btrfs_set_file_extent_generation(leaf, fi,
1145                                                         trans->transid);
1146                        btrfs_set_file_extent_num_bytes(leaf, fi,
1147                                                        end - other_start);
1148                        btrfs_mark_buffer_dirty(leaf);
1149                        goto out;
1150                }
1151        }
1152
1153        if (start > key.offset && end == extent_end) {
1154                other_start = end;
1155                other_end = 0;
1156                if (extent_mergeable(leaf, path->slots[0] + 1,
1157                                     ino, bytenr, orig_offset,
1158                                     &other_start, &other_end)) {
1159                        fi = btrfs_item_ptr(leaf, path->slots[0],
1160                                            struct btrfs_file_extent_item);
1161                        btrfs_set_file_extent_num_bytes(leaf, fi,
1162                                                        start - key.offset);
1163                        btrfs_set_file_extent_generation(leaf, fi,
1164                                                         trans->transid);
1165                        path->slots[0]++;
1166                        new_key.offset = start;
1167                        btrfs_set_item_key_safe(root->fs_info, path, &new_key);
1168
1169                        fi = btrfs_item_ptr(leaf, path->slots[0],
1170                                            struct btrfs_file_extent_item);
1171                        btrfs_set_file_extent_generation(leaf, fi,
1172                                                         trans->transid);
1173                        btrfs_set_file_extent_num_bytes(leaf, fi,
1174                                                        other_end - start);
1175                        btrfs_set_file_extent_offset(leaf, fi,
1176                                                     start - orig_offset);
1177                        btrfs_mark_buffer_dirty(leaf);
1178                        goto out;
1179                }
1180        }
1181
1182        while (start > key.offset || end < extent_end) {
1183                if (key.offset == start)
1184                        split = end;
1185
1186                new_key.offset = split;
1187                ret = btrfs_duplicate_item(trans, root, path, &new_key);
1188                if (ret == -EAGAIN) {
1189                        btrfs_release_path(path);
1190                        goto again;
1191                }
1192                if (ret < 0) {
1193                        btrfs_abort_transaction(trans, root, ret);
1194                        goto out;
1195                }
1196
1197                leaf = path->nodes[0];
1198                fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1199                                    struct btrfs_file_extent_item);
1200                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1201                btrfs_set_file_extent_num_bytes(leaf, fi,
1202                                                split - key.offset);
1203
1204                fi = btrfs_item_ptr(leaf, path->slots[0],
1205                                    struct btrfs_file_extent_item);
1206
1207                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1208                btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1209                btrfs_set_file_extent_num_bytes(leaf, fi,
1210                                                extent_end - split);
1211                btrfs_mark_buffer_dirty(leaf);
1212
1213                ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1214                                           root->root_key.objectid,
1215                                           ino, orig_offset);
1216                BUG_ON(ret); /* -ENOMEM */
1217
1218                if (split == start) {
1219                        key.offset = start;
1220                } else {
1221                        BUG_ON(start != key.offset);
1222                        path->slots[0]--;
1223                        extent_end = end;
1224                }
1225                recow = 1;
1226        }
1227
1228        other_start = end;
1229        other_end = 0;
1230        if (extent_mergeable(leaf, path->slots[0] + 1,
1231                             ino, bytenr, orig_offset,
1232                             &other_start, &other_end)) {
1233                if (recow) {
1234                        btrfs_release_path(path);
1235                        goto again;
1236                }
1237                extent_end = other_end;
1238                del_slot = path->slots[0] + 1;
1239                del_nr++;
1240                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1241                                        0, root->root_key.objectid,
1242                                        ino, orig_offset);
1243                BUG_ON(ret); /* -ENOMEM */
1244        }
1245        other_start = 0;
1246        other_end = start;
1247        if (extent_mergeable(leaf, path->slots[0] - 1,
1248                             ino, bytenr, orig_offset,
1249                             &other_start, &other_end)) {
1250                if (recow) {
1251                        btrfs_release_path(path);
1252                        goto again;
1253                }
1254                key.offset = other_start;
1255                del_slot = path->slots[0];
1256                del_nr++;
1257                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1258                                        0, root->root_key.objectid,
1259                                        ino, orig_offset);
1260                BUG_ON(ret); /* -ENOMEM */
1261        }
1262        if (del_nr == 0) {
1263                fi = btrfs_item_ptr(leaf, path->slots[0],
1264                           struct btrfs_file_extent_item);
1265                btrfs_set_file_extent_type(leaf, fi,
1266                                           BTRFS_FILE_EXTENT_REG);
1267                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1268                btrfs_mark_buffer_dirty(leaf);
1269        } else {
1270                fi = btrfs_item_ptr(leaf, del_slot - 1,
1271                           struct btrfs_file_extent_item);
1272                btrfs_set_file_extent_type(leaf, fi,
1273                                           BTRFS_FILE_EXTENT_REG);
1274                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1275                btrfs_set_file_extent_num_bytes(leaf, fi,
1276                                                extent_end - key.offset);
1277                btrfs_mark_buffer_dirty(leaf);
1278
1279                ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1280                if (ret < 0) {
1281                        btrfs_abort_transaction(trans, root, ret);
1282                        goto out;
1283                }
1284        }
1285out:
1286        btrfs_free_path(path);
1287        return 0;
1288}
1289
1290/*
1291 * on error we return an unlocked page and the error value
1292 * on success we return a locked page and 0
1293 */
1294static int prepare_uptodate_page(struct inode *inode,
1295                                 struct page *page, u64 pos,
1296                                 bool force_uptodate)
1297{
1298        int ret = 0;
1299
1300        if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1301            !PageUptodate(page)) {
1302                ret = btrfs_readpage(NULL, page);
1303                if (ret)
1304                        return ret;
1305                lock_page(page);
1306                if (!PageUptodate(page)) {
1307                        unlock_page(page);
1308                        return -EIO;
1309                }
1310                if (page->mapping != inode->i_mapping) {
1311                        unlock_page(page);
1312                        return -EAGAIN;
1313                }
1314        }
1315        return 0;
1316}
1317
1318/*
1319 * this just gets pages into the page cache and locks them down.
1320 */
1321static noinline int prepare_pages(struct inode *inode, struct page **pages,
1322                                  size_t num_pages, loff_t pos,
1323                                  size_t write_bytes, bool force_uptodate)
1324{
1325        int i;
1326        unsigned long index = pos >> PAGE_SHIFT;
1327        gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1328        int err = 0;
1329        int faili;
1330
1331        for (i = 0; i < num_pages; i++) {
1332again:
1333                pages[i] = find_or_create_page(inode->i_mapping, index + i,
1334                                               mask | __GFP_WRITE);
1335                if (!pages[i]) {
1336                        faili = i - 1;
1337                        err = -ENOMEM;
1338                        goto fail;
1339                }
1340
1341                if (i == 0)
1342                        err = prepare_uptodate_page(inode, pages[i], pos,
1343                                                    force_uptodate);
1344                if (!err && i == num_pages - 1)
1345                        err = prepare_uptodate_page(inode, pages[i],
1346                                                    pos + write_bytes, false);
1347                if (err) {
1348                        put_page(pages[i]);
1349                        if (err == -EAGAIN) {
1350                                err = 0;
1351                                goto again;
1352                        }
1353                        faili = i - 1;
1354                        goto fail;
1355                }
1356                wait_on_page_writeback(pages[i]);
1357        }
1358
1359        return 0;
1360fail:
1361        while (faili >= 0) {
1362                unlock_page(pages[faili]);
1363                put_page(pages[faili]);
1364                faili--;
1365        }
1366        return err;
1367
1368}
1369
1370/*
1371 * This function locks the extent and properly waits for data=ordered extents
1372 * to finish before allowing the pages to be modified if need.
1373 *
1374 * The return value:
1375 * 1 - the extent is locked
1376 * 0 - the extent is not locked, and everything is OK
1377 * -EAGAIN - need re-prepare the pages
1378 * the other < 0 number - Something wrong happens
1379 */
1380static noinline int
1381lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
1382                                size_t num_pages, loff_t pos,
1383                                size_t write_bytes,
1384                                u64 *lockstart, u64 *lockend,
1385                                struct extent_state **cached_state)
1386{
1387        struct btrfs_root *root = BTRFS_I(inode)->root;
1388        u64 start_pos;
1389        u64 last_pos;
1390        int i;
1391        int ret = 0;
1392
1393        start_pos = round_down(pos, root->sectorsize);
1394        last_pos = start_pos
1395                + round_up(pos + write_bytes - start_pos, root->sectorsize) - 1;
1396
1397        if (start_pos < inode->i_size) {
1398                struct btrfs_ordered_extent *ordered;
1399                lock_extent_bits(&BTRFS_I(inode)->io_tree,
1400                                 start_pos, last_pos, cached_state);
1401                ordered = btrfs_lookup_ordered_range(inode, start_pos,
1402                                                     last_pos - start_pos + 1);
1403                if (ordered &&
1404                    ordered->file_offset + ordered->len > start_pos &&
1405                    ordered->file_offset <= last_pos) {
1406                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1407                                             start_pos, last_pos,
1408                                             cached_state, GFP_NOFS);
1409                        for (i = 0; i < num_pages; i++) {
1410                                unlock_page(pages[i]);
1411                                put_page(pages[i]);
1412                        }
1413                        btrfs_start_ordered_extent(inode, ordered, 1);
1414                        btrfs_put_ordered_extent(ordered);
1415                        return -EAGAIN;
1416                }
1417                if (ordered)
1418                        btrfs_put_ordered_extent(ordered);
1419
1420                clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1421                                  last_pos, EXTENT_DIRTY | EXTENT_DELALLOC |
1422                                  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1423                                  0, 0, cached_state, GFP_NOFS);
1424                *lockstart = start_pos;
1425                *lockend = last_pos;
1426                ret = 1;
1427        }
1428
1429        for (i = 0; i < num_pages; i++) {
1430                if (clear_page_dirty_for_io(pages[i]))
1431                        account_page_redirty(pages[i]);
1432                set_page_extent_mapped(pages[i]);
1433                WARN_ON(!PageLocked(pages[i]));
1434        }
1435
1436        return ret;
1437}
1438
1439static noinline int check_can_nocow(struct inode *inode, loff_t pos,
1440                                    size_t *write_bytes)
1441{
1442        struct btrfs_root *root = BTRFS_I(inode)->root;
1443        struct btrfs_ordered_extent *ordered;
1444        u64 lockstart, lockend;
1445        u64 num_bytes;
1446        int ret;
1447
1448        ret = btrfs_start_write_no_snapshoting(root);
1449        if (!ret)
1450                return -ENOSPC;
1451
1452        lockstart = round_down(pos, root->sectorsize);
1453        lockend = round_up(pos + *write_bytes, root->sectorsize) - 1;
1454
1455        while (1) {
1456                lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1457                ordered = btrfs_lookup_ordered_range(inode, lockstart,
1458                                                     lockend - lockstart + 1);
1459                if (!ordered) {
1460                        break;
1461                }
1462                unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1463                btrfs_start_ordered_extent(inode, ordered, 1);
1464                btrfs_put_ordered_extent(ordered);
1465        }
1466
1467        num_bytes = lockend - lockstart + 1;
1468        ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
1469        if (ret <= 0) {
1470                ret = 0;
1471                btrfs_end_write_no_snapshoting(root);
1472        } else {
1473                *write_bytes = min_t(size_t, *write_bytes ,
1474                                     num_bytes - pos + lockstart);
1475        }
1476
1477        unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1478
1479        return ret;
1480}
1481
1482static noinline ssize_t __btrfs_buffered_write(struct file *file,
1483                                               struct iov_iter *i,
1484                                               loff_t pos)
1485{
1486        struct inode *inode = file_inode(file);
1487        struct btrfs_root *root = BTRFS_I(inode)->root;
1488        struct page **pages = NULL;
1489        struct extent_state *cached_state = NULL;
1490        u64 release_bytes = 0;
1491        u64 lockstart;
1492        u64 lockend;
1493        size_t num_written = 0;
1494        int nrptrs;
1495        int ret = 0;
1496        bool only_release_metadata = false;
1497        bool force_page_uptodate = false;
1498        bool need_unlock;
1499
1500        nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1501                        PAGE_SIZE / (sizeof(struct page *)));
1502        nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1503        nrptrs = max(nrptrs, 8);
1504        pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1505        if (!pages)
1506                return -ENOMEM;
1507
1508        while (iov_iter_count(i) > 0) {
1509                size_t offset = pos & (PAGE_SIZE - 1);
1510                size_t sector_offset;
1511                size_t write_bytes = min(iov_iter_count(i),
1512                                         nrptrs * (size_t)PAGE_SIZE -
1513                                         offset);
1514                size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1515                                                PAGE_SIZE);
1516                size_t reserve_bytes;
1517                size_t dirty_pages;
1518                size_t copied;
1519                size_t dirty_sectors;
1520                size_t num_sectors;
1521
1522                WARN_ON(num_pages > nrptrs);
1523
1524                /*
1525                 * Fault pages before locking them in prepare_pages
1526                 * to avoid recursive lock
1527                 */
1528                if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1529                        ret = -EFAULT;
1530                        break;
1531                }
1532
1533                sector_offset = pos & (root->sectorsize - 1);
1534                reserve_bytes = round_up(write_bytes + sector_offset,
1535                                root->sectorsize);
1536
1537                if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1538                                              BTRFS_INODE_PREALLOC)) &&
1539                    check_can_nocow(inode, pos, &write_bytes) > 0) {
1540                        /*
1541                         * For nodata cow case, no need to reserve
1542                         * data space.
1543                         */
1544                        only_release_metadata = true;
1545                        /*
1546                         * our prealloc extent may be smaller than
1547                         * write_bytes, so scale down.
1548                         */
1549                        num_pages = DIV_ROUND_UP(write_bytes + offset,
1550                                                 PAGE_SIZE);
1551                        reserve_bytes = round_up(write_bytes + sector_offset,
1552                                        root->sectorsize);
1553                        goto reserve_metadata;
1554                }
1555
1556                ret = btrfs_check_data_free_space(inode, pos, write_bytes);
1557                if (ret < 0)
1558                        break;
1559
1560reserve_metadata:
1561                ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
1562                if (ret) {
1563                        if (!only_release_metadata)
1564                                btrfs_free_reserved_data_space(inode, pos,
1565                                                               write_bytes);
1566                        else
1567                                btrfs_end_write_no_snapshoting(root);
1568                        break;
1569                }
1570
1571                release_bytes = reserve_bytes;
1572                need_unlock = false;
1573again:
1574                /*
1575                 * This is going to setup the pages array with the number of
1576                 * pages we want, so we don't really need to worry about the
1577                 * contents of pages from loop to loop
1578                 */
1579                ret = prepare_pages(inode, pages, num_pages,
1580                                    pos, write_bytes,
1581                                    force_page_uptodate);
1582                if (ret)
1583                        break;
1584
1585                ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
1586                                                pos, write_bytes, &lockstart,
1587                                                &lockend, &cached_state);
1588                if (ret < 0) {
1589                        if (ret == -EAGAIN)
1590                                goto again;
1591                        break;
1592                } else if (ret > 0) {
1593                        need_unlock = true;
1594                        ret = 0;
1595                }
1596
1597                copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1598
1599                /*
1600                 * if we have trouble faulting in the pages, fall
1601                 * back to one page at a time
1602                 */
1603                if (copied < write_bytes)
1604                        nrptrs = 1;
1605
1606                if (copied == 0) {
1607                        force_page_uptodate = true;
1608                        dirty_pages = 0;
1609                } else {
1610                        force_page_uptodate = false;
1611                        dirty_pages = DIV_ROUND_UP(copied + offset,
1612                                                   PAGE_SIZE);
1613                }
1614
1615                /*
1616                 * If we had a short copy we need to release the excess delaloc
1617                 * bytes we reserved.  We need to increment outstanding_extents
1618                 * because btrfs_delalloc_release_space will decrement it, but
1619                 * we still have an outstanding extent for the chunk we actually
1620                 * managed to copy.
1621                 */
1622                num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
1623                                                reserve_bytes);
1624                dirty_sectors = round_up(copied + sector_offset,
1625                                        root->sectorsize);
1626                dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
1627                                                dirty_sectors);
1628
1629                if (num_sectors > dirty_sectors) {
1630                        release_bytes = (write_bytes - copied)
1631                                & ~((u64)root->sectorsize - 1);
1632                        if (copied > 0) {
1633                                spin_lock(&BTRFS_I(inode)->lock);
1634                                BTRFS_I(inode)->outstanding_extents++;
1635                                spin_unlock(&BTRFS_I(inode)->lock);
1636                        }
1637                        if (only_release_metadata) {
1638                                btrfs_delalloc_release_metadata(inode,
1639                                                                release_bytes);
1640                        } else {
1641                                u64 __pos;
1642
1643                                __pos = round_down(pos, root->sectorsize) +
1644                                        (dirty_pages << PAGE_SHIFT);
1645                                btrfs_delalloc_release_space(inode, __pos,
1646                                                             release_bytes);
1647                        }
1648                }
1649
1650                release_bytes = round_up(copied + sector_offset,
1651                                        root->sectorsize);
1652
1653                if (copied > 0)
1654                        ret = btrfs_dirty_pages(root, inode, pages,
1655                                                dirty_pages, pos, copied,
1656                                                NULL);
1657                if (need_unlock)
1658                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1659                                             lockstart, lockend, &cached_state,
1660                                             GFP_NOFS);
1661                if (ret) {
1662                        btrfs_drop_pages(pages, num_pages);
1663                        break;
1664                }
1665
1666                release_bytes = 0;
1667                if (only_release_metadata)
1668                        btrfs_end_write_no_snapshoting(root);
1669
1670                if (only_release_metadata && copied > 0) {
1671                        lockstart = round_down(pos, root->sectorsize);
1672                        lockend = round_up(pos + copied, root->sectorsize) - 1;
1673
1674                        set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1675                                       lockend, EXTENT_NORESERVE, NULL,
1676                                       NULL, GFP_NOFS);
1677                        only_release_metadata = false;
1678                }
1679
1680                btrfs_drop_pages(pages, num_pages);
1681
1682                cond_resched();
1683
1684                balance_dirty_pages_ratelimited(inode->i_mapping);
1685                if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
1686                        btrfs_btree_balance_dirty(root);
1687
1688                pos += copied;
1689                num_written += copied;
1690        }
1691
1692        kfree(pages);
1693
1694        if (release_bytes) {
1695                if (only_release_metadata) {
1696                        btrfs_end_write_no_snapshoting(root);
1697                        btrfs_delalloc_release_metadata(inode, release_bytes);
1698                } else {
1699                        btrfs_delalloc_release_space(inode, pos, release_bytes);
1700                }
1701        }
1702
1703        return num_written ? num_written : ret;
1704}
1705
1706static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1707                                    struct iov_iter *from,
1708                                    loff_t pos)
1709{
1710        struct file *file = iocb->ki_filp;
1711        struct inode *inode = file_inode(file);
1712        ssize_t written;
1713        ssize_t written_buffered;
1714        loff_t endbyte;
1715        int err;
1716
1717        written = generic_file_direct_write(iocb, from, pos);
1718
1719        if (written < 0 || !iov_iter_count(from))
1720                return written;
1721
1722        pos += written;
1723        written_buffered = __btrfs_buffered_write(file, from, pos);
1724        if (written_buffered < 0) {
1725                err = written_buffered;
1726                goto out;
1727        }
1728        /*
1729         * Ensure all data is persisted. We want the next direct IO read to be
1730         * able to read what was just written.
1731         */
1732        endbyte = pos + written_buffered - 1;
1733        err = btrfs_fdatawrite_range(inode, pos, endbyte);
1734        if (err)
1735                goto out;
1736        err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1737        if (err)
1738                goto out;
1739        written += written_buffered;
1740        iocb->ki_pos = pos + written_buffered;
1741        invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1742                                 endbyte >> PAGE_SHIFT);
1743out:
1744        return written ? written : err;
1745}
1746
1747static void update_time_for_write(struct inode *inode)
1748{
1749        struct timespec now;
1750
1751        if (IS_NOCMTIME(inode))
1752                return;
1753
1754        now = current_fs_time(inode->i_sb);
1755        if (!timespec_equal(&inode->i_mtime, &now))
1756                inode->i_mtime = now;
1757
1758        if (!timespec_equal(&inode->i_ctime, &now))
1759                inode->i_ctime = now;
1760
1761        if (IS_I_VERSION(inode))
1762                inode_inc_iversion(inode);
1763}
1764
1765static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1766                                    struct iov_iter *from)
1767{
1768        struct file *file = iocb->ki_filp;
1769        struct inode *inode = file_inode(file);
1770        struct btrfs_root *root = BTRFS_I(inode)->root;
1771        u64 start_pos;
1772        u64 end_pos;
1773        ssize_t num_written = 0;
1774        bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
1775        ssize_t err;
1776        loff_t pos;
1777        size_t count;
1778        loff_t oldsize;
1779        int clean_page = 0;
1780
1781        inode_lock(inode);
1782        err = generic_write_checks(iocb, from);
1783        if (err <= 0) {
1784                inode_unlock(inode);
1785                return err;
1786        }
1787
1788        current->backing_dev_info = inode_to_bdi(inode);
1789        err = file_remove_privs(file);
1790        if (err) {
1791                inode_unlock(inode);
1792                goto out;
1793        }
1794
1795        /*
1796         * If BTRFS flips readonly due to some impossible error
1797         * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1798         * although we have opened a file as writable, we have
1799         * to stop this write operation to ensure FS consistency.
1800         */
1801        if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
1802                inode_unlock(inode);
1803                err = -EROFS;
1804                goto out;
1805        }
1806
1807        /*
1808         * We reserve space for updating the inode when we reserve space for the
1809         * extent we are going to write, so we will enospc out there.  We don't
1810         * need to start yet another transaction to update the inode as we will
1811         * update the inode when we finish writing whatever data we write.
1812         */
1813        update_time_for_write(inode);
1814
1815        pos = iocb->ki_pos;
1816        count = iov_iter_count(from);
1817        start_pos = round_down(pos, root->sectorsize);
1818        oldsize = i_size_read(inode);
1819        if (start_pos > oldsize) {
1820                /* Expand hole size to cover write data, preventing empty gap */
1821                end_pos = round_up(pos + count, root->sectorsize);
1822                err = btrfs_cont_expand(inode, oldsize, end_pos);
1823                if (err) {
1824                        inode_unlock(inode);
1825                        goto out;
1826                }
1827                if (start_pos > round_up(oldsize, root->sectorsize))
1828                        clean_page = 1;
1829        }
1830
1831        if (sync)
1832                atomic_inc(&BTRFS_I(inode)->sync_writers);
1833
1834        if (iocb->ki_flags & IOCB_DIRECT) {
1835                num_written = __btrfs_direct_write(iocb, from, pos);
1836        } else {
1837                num_written = __btrfs_buffered_write(file, from, pos);
1838                if (num_written > 0)
1839                        iocb->ki_pos = pos + num_written;
1840                if (clean_page)
1841                        pagecache_isize_extended(inode, oldsize,
1842                                                i_size_read(inode));
1843        }
1844
1845        inode_unlock(inode);
1846
1847        /*
1848         * We also have to set last_sub_trans to the current log transid,
1849         * otherwise subsequent syncs to a file that's been synced in this
1850         * transaction will appear to have already occurred.
1851         */
1852        spin_lock(&BTRFS_I(inode)->lock);
1853        BTRFS_I(inode)->last_sub_trans = root->log_transid;
1854        spin_unlock(&BTRFS_I(inode)->lock);
1855        if (num_written > 0) {
1856                err = generic_write_sync(file, pos, num_written);
1857                if (err < 0)
1858                        num_written = err;
1859        }
1860
1861        if (sync)
1862                atomic_dec(&BTRFS_I(inode)->sync_writers);
1863out:
1864        current->backing_dev_info = NULL;
1865        return num_written ? num_written : err;
1866}
1867
1868int btrfs_release_file(struct inode *inode, struct file *filp)
1869{
1870        if (filp->private_data)
1871                btrfs_ioctl_trans_end(filp);
1872        /*
1873         * ordered_data_close is set by settattr when we are about to truncate
1874         * a file from a non-zero size to a zero size.  This tries to
1875         * flush down new bytes that may have been written if the
1876         * application were using truncate to replace a file in place.
1877         */
1878        if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1879                               &BTRFS_I(inode)->runtime_flags))
1880                        filemap_flush(inode->i_mapping);
1881        return 0;
1882}
1883
1884static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1885{
1886        int ret;
1887
1888        atomic_inc(&BTRFS_I(inode)->sync_writers);
1889        ret = btrfs_fdatawrite_range(inode, start, end);
1890        atomic_dec(&BTRFS_I(inode)->sync_writers);
1891
1892        return ret;
1893}
1894
1895/*
1896 * fsync call for both files and directories.  This logs the inode into
1897 * the tree log instead of forcing full commits whenever possible.
1898 *
1899 * It needs to call filemap_fdatawait so that all ordered extent updates are
1900 * in the metadata btree are up to date for copying to the log.
1901 *
1902 * It drops the inode mutex before doing the tree log commit.  This is an
1903 * important optimization for directories because holding the mutex prevents
1904 * new operations on the dir while we write to disk.
1905 */
1906int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1907{
1908        struct dentry *dentry = file_dentry(file);
1909        struct inode *inode = d_inode(dentry);
1910        struct btrfs_root *root = BTRFS_I(inode)->root;
1911        struct btrfs_trans_handle *trans;
1912        struct btrfs_log_ctx ctx;
1913        int ret = 0;
1914        bool full_sync = 0;
1915        u64 len;
1916
1917        /*
1918         * The range length can be represented by u64, we have to do the typecasts
1919         * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
1920         */
1921        len = (u64)end - (u64)start + 1;
1922        trace_btrfs_sync_file(file, datasync);
1923
1924        /*
1925         * We write the dirty pages in the range and wait until they complete
1926         * out of the ->i_mutex. If so, we can flush the dirty pages by
1927         * multi-task, and make the performance up.  See
1928         * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1929         */
1930        ret = start_ordered_ops(inode, start, end);
1931        if (ret)
1932                return ret;
1933
1934        inode_lock(inode);
1935        atomic_inc(&root->log_batch);
1936        full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1937                             &BTRFS_I(inode)->runtime_flags);
1938        /*
1939         * We might have have had more pages made dirty after calling
1940         * start_ordered_ops and before acquiring the inode's i_mutex.
1941         */
1942        if (full_sync) {
1943                /*
1944                 * For a full sync, we need to make sure any ordered operations
1945                 * start and finish before we start logging the inode, so that
1946                 * all extents are persisted and the respective file extent
1947                 * items are in the fs/subvol btree.
1948                 */
1949                ret = btrfs_wait_ordered_range(inode, start, len);
1950        } else {
1951                /*
1952                 * Start any new ordered operations before starting to log the
1953                 * inode. We will wait for them to finish in btrfs_sync_log().
1954                 *
1955                 * Right before acquiring the inode's mutex, we might have new
1956                 * writes dirtying pages, which won't immediately start the
1957                 * respective ordered operations - that is done through the
1958                 * fill_delalloc callbacks invoked from the writepage and
1959                 * writepages address space operations. So make sure we start
1960                 * all ordered operations before starting to log our inode. Not
1961                 * doing this means that while logging the inode, writeback
1962                 * could start and invoke writepage/writepages, which would call
1963                 * the fill_delalloc callbacks (cow_file_range,
1964                 * submit_compressed_extents). These callbacks add first an
1965                 * extent map to the modified list of extents and then create
1966                 * the respective ordered operation, which means in
1967                 * tree-log.c:btrfs_log_inode() we might capture all existing
1968                 * ordered operations (with btrfs_get_logged_extents()) before
1969                 * the fill_delalloc callback adds its ordered operation, and by
1970                 * the time we visit the modified list of extent maps (with
1971                 * btrfs_log_changed_extents()), we see and process the extent
1972                 * map they created. We then use the extent map to construct a
1973                 * file extent item for logging without waiting for the
1974                 * respective ordered operation to finish - this file extent
1975                 * item points to a disk location that might not have yet been
1976                 * written to, containing random data - so after a crash a log
1977                 * replay will make our inode have file extent items that point
1978                 * to disk locations containing invalid data, as we returned
1979                 * success to userspace without waiting for the respective
1980                 * ordered operation to finish, because it wasn't captured by
1981                 * btrfs_get_logged_extents().
1982                 */
1983                ret = start_ordered_ops(inode, start, end);
1984        }
1985        if (ret) {
1986                inode_unlock(inode);
1987                goto out;
1988        }
1989        atomic_inc(&root->log_batch);
1990
1991        /*
1992         * If the last transaction that changed this file was before the current
1993         * transaction and we have the full sync flag set in our inode, we can
1994         * bail out now without any syncing.
1995         *
1996         * Note that we can't bail out if the full sync flag isn't set. This is
1997         * because when the full sync flag is set we start all ordered extents
1998         * and wait for them to fully complete - when they complete they update
1999         * the inode's last_trans field through:
2000         *
2001         *     btrfs_finish_ordered_io() ->
2002         *         btrfs_update_inode_fallback() ->
2003         *             btrfs_update_inode() ->
2004         *                 btrfs_set_inode_last_trans()
2005         *
2006         * So we are sure that last_trans is up to date and can do this check to
2007         * bail out safely. For the fast path, when the full sync flag is not
2008         * set in our inode, we can not do it because we start only our ordered
2009         * extents and don't wait for them to complete (that is when
2010         * btrfs_finish_ordered_io runs), so here at this point their last_trans
2011         * value might be less than or equals to fs_info->last_trans_committed,
2012         * and setting a speculative last_trans for an inode when a buffered
2013         * write is made (such as fs_info->generation + 1 for example) would not
2014         * be reliable since after setting the value and before fsync is called
2015         * any number of transactions can start and commit (transaction kthread
2016         * commits the current transaction periodically), and a transaction
2017         * commit does not start nor waits for ordered extents to complete.
2018         */
2019        smp_mb();
2020        if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
2021            (full_sync && BTRFS_I(inode)->last_trans <=
2022             root->fs_info->last_trans_committed) ||
2023            (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
2024             BTRFS_I(inode)->last_trans
2025             <= root->fs_info->last_trans_committed)) {
2026                /*
2027                 * We'v had everything committed since the last time we were
2028                 * modified so clear this flag in case it was set for whatever
2029                 * reason, it's no longer relevant.
2030                 */
2031                clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2032                          &BTRFS_I(inode)->runtime_flags);
2033                inode_unlock(inode);
2034                goto out;
2035        }
2036
2037        /*
2038         * ok we haven't committed the transaction yet, lets do a commit
2039         */
2040        if (file->private_data)
2041                btrfs_ioctl_trans_end(file);
2042
2043        /*
2044         * We use start here because we will need to wait on the IO to complete
2045         * in btrfs_sync_log, which could require joining a transaction (for
2046         * example checking cross references in the nocow path).  If we use join
2047         * here we could get into a situation where we're waiting on IO to
2048         * happen that is blocked on a transaction trying to commit.  With start
2049         * we inc the extwriter counter, so we wait for all extwriters to exit
2050         * before we start blocking join'ers.  This comment is to keep somebody
2051         * from thinking they are super smart and changing this to
2052         * btrfs_join_transaction *cough*Josef*cough*.
2053         */
2054        trans = btrfs_start_transaction(root, 0);
2055        if (IS_ERR(trans)) {
2056                ret = PTR_ERR(trans);
2057                inode_unlock(inode);
2058                goto out;
2059        }
2060        trans->sync = true;
2061
2062        btrfs_init_log_ctx(&ctx);
2063
2064        ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
2065        if (ret < 0) {
2066                /* Fallthrough and commit/free transaction. */
2067                ret = 1;
2068        }
2069
2070        /* we've logged all the items and now have a consistent
2071         * version of the file in the log.  It is possible that
2072         * someone will come in and modify the file, but that's
2073         * fine because the log is consistent on disk, and we
2074         * have references to all of the file's extents
2075         *
2076         * It is possible that someone will come in and log the
2077         * file again, but that will end up using the synchronization
2078         * inside btrfs_sync_log to keep things safe.
2079         */
2080        inode_unlock(inode);
2081
2082        /*
2083         * If any of the ordered extents had an error, just return it to user
2084         * space, so that the application knows some writes didn't succeed and
2085         * can take proper action (retry for e.g.). Blindly committing the
2086         * transaction in this case, would fool userspace that everything was
2087         * successful. And we also want to make sure our log doesn't contain
2088         * file extent items pointing to extents that weren't fully written to -
2089         * just like in the non fast fsync path, where we check for the ordered
2090         * operation's error flag before writing to the log tree and return -EIO
2091         * if any of them had this flag set (btrfs_wait_ordered_range) -
2092         * therefore we need to check for errors in the ordered operations,
2093         * which are indicated by ctx.io_err.
2094         */
2095        if (ctx.io_err) {
2096                btrfs_end_transaction(trans, root);
2097                ret = ctx.io_err;
2098                goto out;
2099        }
2100
2101        if (ret != BTRFS_NO_LOG_SYNC) {
2102                if (!ret) {
2103                        ret = btrfs_sync_log(trans, root, &ctx);
2104                        if (!ret) {
2105                                ret = btrfs_end_transaction(trans, root);
2106                                goto out;
2107                        }
2108                }
2109                if (!full_sync) {
2110                        ret = btrfs_wait_ordered_range(inode, start, len);
2111                        if (ret) {
2112                                btrfs_end_transaction(trans, root);
2113                                goto out;
2114                        }
2115                }
2116                ret = btrfs_commit_transaction(trans, root);
2117        } else {
2118                ret = btrfs_end_transaction(trans, root);
2119        }
2120out:
2121        return ret > 0 ? -EIO : ret;
2122}
2123
2124static const struct vm_operations_struct btrfs_file_vm_ops = {
2125        .fault          = filemap_fault,
2126        .map_pages      = filemap_map_pages,
2127        .page_mkwrite   = btrfs_page_mkwrite,
2128};
2129
2130static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
2131{
2132        struct address_space *mapping = filp->f_mapping;
2133
2134        if (!mapping->a_ops->readpage)
2135                return -ENOEXEC;
2136
2137        file_accessed(filp);
2138        vma->vm_ops = &btrfs_file_vm_ops;
2139
2140        return 0;
2141}
2142
2143static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
2144                          int slot, u64 start, u64 end)
2145{
2146        struct btrfs_file_extent_item *fi;
2147        struct btrfs_key key;
2148
2149        if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2150                return 0;
2151
2152        btrfs_item_key_to_cpu(leaf, &key, slot);
2153        if (key.objectid != btrfs_ino(inode) ||
2154            key.type != BTRFS_EXTENT_DATA_KEY)
2155                return 0;
2156
2157        fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2158
2159        if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2160                return 0;
2161
2162        if (btrfs_file_extent_disk_bytenr(leaf, fi))
2163                return 0;
2164
2165        if (key.offset == end)
2166                return 1;
2167        if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2168                return 1;
2169        return 0;
2170}
2171
2172static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
2173                      struct btrfs_path *path, u64 offset, u64 end)
2174{
2175        struct btrfs_root *root = BTRFS_I(inode)->root;
2176        struct extent_buffer *leaf;
2177        struct btrfs_file_extent_item *fi;
2178        struct extent_map *hole_em;
2179        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2180        struct btrfs_key key;
2181        int ret;
2182
2183        if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
2184                goto out;
2185
2186        key.objectid = btrfs_ino(inode);
2187        key.type = BTRFS_EXTENT_DATA_KEY;
2188        key.offset = offset;
2189
2190        ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2191        if (ret < 0)
2192                return ret;
2193        BUG_ON(!ret);
2194
2195        leaf = path->nodes[0];
2196        if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
2197                u64 num_bytes;
2198
2199                path->slots[0]--;
2200                fi = btrfs_item_ptr(leaf, path->slots[0],
2201                                    struct btrfs_file_extent_item);
2202                num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2203                        end - offset;
2204                btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2205                btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2206                btrfs_set_file_extent_offset(leaf, fi, 0);
2207                btrfs_mark_buffer_dirty(leaf);
2208                goto out;
2209        }
2210
2211        if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2212                u64 num_bytes;
2213
2214                key.offset = offset;
2215                btrfs_set_item_key_safe(root->fs_info, path, &key);
2216                fi = btrfs_item_ptr(leaf, path->slots[0],
2217                                    struct btrfs_file_extent_item);
2218                num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2219                        offset;
2220                btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2221                btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2222                btrfs_set_file_extent_offset(leaf, fi, 0);
2223                btrfs_mark_buffer_dirty(leaf);
2224                goto out;
2225        }
2226        btrfs_release_path(path);
2227
2228        ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
2229                                       0, 0, end - offset, 0, end - offset,
2230                                       0, 0, 0);
2231        if (ret)
2232                return ret;
2233
2234out:
2235        btrfs_release_path(path);
2236
2237        hole_em = alloc_extent_map();
2238        if (!hole_em) {
2239                btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2240                set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2241                        &BTRFS_I(inode)->runtime_flags);
2242        } else {
2243                hole_em->start = offset;
2244                hole_em->len = end - offset;
2245                hole_em->ram_bytes = hole_em->len;
2246                hole_em->orig_start = offset;
2247
2248                hole_em->block_start = EXTENT_MAP_HOLE;
2249                hole_em->block_len = 0;
2250                hole_em->orig_block_len = 0;
2251                hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
2252                hole_em->compress_type = BTRFS_COMPRESS_NONE;
2253                hole_em->generation = trans->transid;
2254
2255                do {
2256                        btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2257                        write_lock(&em_tree->lock);
2258                        ret = add_extent_mapping(em_tree, hole_em, 1);
2259                        write_unlock(&em_tree->lock);
2260                } while (ret == -EEXIST);
2261                free_extent_map(hole_em);
2262                if (ret)
2263                        set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2264                                &BTRFS_I(inode)->runtime_flags);
2265        }
2266
2267        return 0;
2268}
2269
2270/*
2271 * Find a hole extent on given inode and change start/len to the end of hole
2272 * extent.(hole/vacuum extent whose em->start <= start &&
2273 *         em->start + em->len > start)
2274 * When a hole extent is found, return 1 and modify start/len.
2275 */
2276static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2277{
2278        struct extent_map *em;
2279        int ret = 0;
2280
2281        em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0);
2282        if (IS_ERR_OR_NULL(em)) {
2283                if (!em)
2284                        ret = -ENOMEM;
2285                else
2286                        ret = PTR_ERR(em);
2287                return ret;
2288        }
2289
2290        /* Hole or vacuum extent(only exists in no-hole mode) */
2291        if (em->block_start == EXTENT_MAP_HOLE) {
2292                ret = 1;
2293                *len = em->start + em->len > *start + *len ?
2294                       0 : *start + *len - em->start - em->len;
2295                *start = em->start + em->len;
2296        }
2297        free_extent_map(em);
2298        return ret;
2299}
2300
2301static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2302{
2303        struct btrfs_root *root = BTRFS_I(inode)->root;
2304        struct extent_state *cached_state = NULL;
2305        struct btrfs_path *path;
2306        struct btrfs_block_rsv *rsv;
2307        struct btrfs_trans_handle *trans;
2308        u64 lockstart;
2309        u64 lockend;
2310        u64 tail_start;
2311        u64 tail_len;
2312        u64 orig_start = offset;
2313        u64 cur_offset;
2314        u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
2315        u64 drop_end;
2316        int ret = 0;
2317        int err = 0;
2318        unsigned int rsv_count;
2319        bool same_block;
2320        bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
2321        u64 ino_size;
2322        bool truncated_block = false;
2323        bool updated_inode = false;
2324
2325        ret = btrfs_wait_ordered_range(inode, offset, len);
2326        if (ret)
2327                return ret;
2328
2329        inode_lock(inode);
2330        ino_size = round_up(inode->i_size, root->sectorsize);
2331        ret = find_first_non_hole(inode, &offset, &len);
2332        if (ret < 0)
2333                goto out_only_mutex;
2334        if (ret && !len) {
2335                /* Already in a large hole */
2336                ret = 0;
2337                goto out_only_mutex;
2338        }
2339
2340        lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
2341        lockend = round_down(offset + len,
2342                             BTRFS_I(inode)->root->sectorsize) - 1;
2343        same_block = (BTRFS_BYTES_TO_BLKS(root->fs_info, offset))
2344                == (BTRFS_BYTES_TO_BLKS(root->fs_info, offset + len - 1));
2345        /*
2346         * We needn't truncate any block which is beyond the end of the file
2347         * because we are sure there is no data there.
2348         */
2349        /*
2350         * Only do this if we are in the same block and we aren't doing the
2351         * entire block.
2352         */
2353        if (same_block && len < root->sectorsize) {
2354                if (offset < ino_size) {
2355                        truncated_block = true;
2356                        ret = btrfs_truncate_block(inode, offset, len, 0);
2357                } else {
2358                        ret = 0;
2359                }
2360                goto out_only_mutex;
2361        }
2362
2363        /* zero back part of the first block */
2364        if (offset < ino_size) {
2365                truncated_block = true;
2366                ret = btrfs_truncate_block(inode, offset, 0, 0);
2367                if (ret) {
2368                        inode_unlock(inode);
2369                        return ret;
2370                }
2371        }
2372
2373        /* Check the aligned pages after the first unaligned page,
2374         * if offset != orig_start, which means the first unaligned page
2375         * including serveral following pages are already in holes,
2376         * the extra check can be skipped */
2377        if (offset == orig_start) {
2378                /* after truncate page, check hole again */
2379                len = offset + len - lockstart;
2380                offset = lockstart;
2381                ret = find_first_non_hole(inode, &offset, &len);
2382                if (ret < 0)
2383                        goto out_only_mutex;
2384                if (ret && !len) {
2385                        ret = 0;
2386                        goto out_only_mutex;
2387                }
2388                lockstart = offset;
2389        }
2390
2391        /* Check the tail unaligned part is in a hole */
2392        tail_start = lockend + 1;
2393        tail_len = offset + len - tail_start;
2394        if (tail_len) {
2395                ret = find_first_non_hole(inode, &tail_start, &tail_len);
2396                if (unlikely(ret < 0))
2397                        goto out_only_mutex;
2398                if (!ret) {
2399                        /* zero the front end of the last page */
2400                        if (tail_start + tail_len < ino_size) {
2401                                truncated_block = true;
2402                                ret = btrfs_truncate_block(inode,
2403                                                        tail_start + tail_len,
2404                                                        0, 1);
2405                                if (ret)
2406                                        goto out_only_mutex;
2407                        }
2408                }
2409        }
2410
2411        if (lockend < lockstart) {
2412                ret = 0;
2413                goto out_only_mutex;
2414        }
2415
2416        while (1) {
2417                struct btrfs_ordered_extent *ordered;
2418
2419                truncate_pagecache_range(inode, lockstart, lockend);
2420
2421                lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2422                                 &cached_state);
2423                ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2424
2425                /*
2426                 * We need to make sure we have no ordered extents in this range
2427                 * and nobody raced in and read a page in this range, if we did
2428                 * we need to try again.
2429                 */
2430                if ((!ordered ||
2431                    (ordered->file_offset + ordered->len <= lockstart ||
2432                     ordered->file_offset > lockend)) &&
2433                     !btrfs_page_exists_in_range(inode, lockstart, lockend)) {
2434                        if (ordered)
2435                                btrfs_put_ordered_extent(ordered);
2436                        break;
2437                }
2438                if (ordered)
2439                        btrfs_put_ordered_extent(ordered);
2440                unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2441                                     lockend, &cached_state, GFP_NOFS);
2442                ret = btrfs_wait_ordered_range(inode, lockstart,
2443                                               lockend - lockstart + 1);
2444                if (ret) {
2445                        inode_unlock(inode);
2446                        return ret;
2447                }
2448        }
2449
2450        path = btrfs_alloc_path();
2451        if (!path) {
2452                ret = -ENOMEM;
2453                goto out;
2454        }
2455
2456        rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2457        if (!rsv) {
2458                ret = -ENOMEM;
2459                goto out_free;
2460        }
2461        rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
2462        rsv->failfast = 1;
2463
2464        /*
2465         * 1 - update the inode
2466         * 1 - removing the extents in the range
2467         * 1 - adding the hole extent if no_holes isn't set
2468         */
2469        rsv_count = no_holes ? 2 : 3;
2470        trans = btrfs_start_transaction(root, rsv_count);
2471        if (IS_ERR(trans)) {
2472                err = PTR_ERR(trans);
2473                goto out_free;
2474        }
2475
2476        ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
2477                                      min_size);
2478        BUG_ON(ret);
2479        trans->block_rsv = rsv;
2480
2481        cur_offset = lockstart;
2482        len = lockend - cur_offset;
2483        while (cur_offset < lockend) {
2484                ret = __btrfs_drop_extents(trans, root, inode, path,
2485                                           cur_offset, lockend + 1,
2486                                           &drop_end, 1, 0, 0, NULL);
2487                if (ret != -ENOSPC)
2488                        break;
2489
2490                trans->block_rsv = &root->fs_info->trans_block_rsv;
2491
2492                if (cur_offset < ino_size) {
2493                        ret = fill_holes(trans, inode, path, cur_offset,
2494                                         drop_end);
2495                        if (ret) {
2496                                err = ret;
2497                                break;
2498                        }
2499                }
2500
2501                cur_offset = drop_end;
2502
2503                ret = btrfs_update_inode(trans, root, inode);
2504                if (ret) {
2505                        err = ret;
2506                        break;
2507                }
2508
2509                btrfs_end_transaction(trans, root);
2510                btrfs_btree_balance_dirty(root);
2511
2512                trans = btrfs_start_transaction(root, rsv_count);
2513                if (IS_ERR(trans)) {
2514                        ret = PTR_ERR(trans);
2515                        trans = NULL;
2516                        break;
2517                }
2518
2519                ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
2520                                              rsv, min_size);
2521                BUG_ON(ret);    /* shouldn't happen */
2522                trans->block_rsv = rsv;
2523
2524                ret = find_first_non_hole(inode, &cur_offset, &len);
2525                if (unlikely(ret < 0))
2526                        break;
2527                if (ret && !len) {
2528                        ret = 0;
2529                        break;
2530                }
2531        }
2532
2533        if (ret) {
2534                err = ret;
2535                goto out_trans;
2536        }
2537
2538        trans->block_rsv = &root->fs_info->trans_block_rsv;
2539        /*
2540         * If we are using the NO_HOLES feature we might have had already an
2541         * hole that overlaps a part of the region [lockstart, lockend] and
2542         * ends at (or beyond) lockend. Since we have no file extent items to
2543         * represent holes, drop_end can be less than lockend and so we must
2544         * make sure we have an extent map representing the existing hole (the
2545         * call to __btrfs_drop_extents() might have dropped the existing extent
2546         * map representing the existing hole), otherwise the fast fsync path
2547         * will not record the existence of the hole region
2548         * [existing_hole_start, lockend].
2549         */
2550        if (drop_end <= lockend)
2551                drop_end = lockend + 1;
2552        /*
2553         * Don't insert file hole extent item if it's for a range beyond eof
2554         * (because it's useless) or if it represents a 0 bytes range (when
2555         * cur_offset == drop_end).
2556         */
2557        if (cur_offset < ino_size && cur_offset < drop_end) {
2558                ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2559                if (ret) {
2560                        err = ret;
2561                        goto out_trans;
2562                }
2563        }
2564
2565out_trans:
2566        if (!trans)
2567                goto out_free;
2568
2569        inode_inc_iversion(inode);
2570        inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
2571
2572        trans->block_rsv = &root->fs_info->trans_block_rsv;
2573        ret = btrfs_update_inode(trans, root, inode);
2574        updated_inode = true;
2575        btrfs_end_transaction(trans, root);
2576        btrfs_btree_balance_dirty(root);
2577out_free:
2578        btrfs_free_path(path);
2579        btrfs_free_block_rsv(root, rsv);
2580out:
2581        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2582                             &cached_state, GFP_NOFS);
2583out_only_mutex:
2584        if (!updated_inode && truncated_block && !ret && !err) {
2585                /*
2586                 * If we only end up zeroing part of a page, we still need to
2587                 * update the inode item, so that all the time fields are
2588                 * updated as well as the necessary btrfs inode in memory fields
2589                 * for detecting, at fsync time, if the inode isn't yet in the
2590                 * log tree or it's there but not up to date.
2591                 */
2592                trans = btrfs_start_transaction(root, 1);
2593                if (IS_ERR(trans)) {
2594                        err = PTR_ERR(trans);
2595                } else {
2596                        err = btrfs_update_inode(trans, root, inode);
2597                        ret = btrfs_end_transaction(trans, root);
2598                }
2599        }
2600        inode_unlock(inode);
2601        if (ret && !err)
2602                err = ret;
2603        return err;
2604}
2605
2606/* Helper structure to record which range is already reserved */
2607struct falloc_range {
2608        struct list_head list;
2609        u64 start;
2610        u64 len;
2611};
2612
2613/*
2614 * Helper function to add falloc range
2615 *
2616 * Caller should have locked the larger range of extent containing
2617 * [start, len)
2618 */
2619static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2620{
2621        struct falloc_range *prev = NULL;
2622        struct falloc_range *range = NULL;
2623
2624        if (list_empty(head))
2625                goto insert;
2626
2627        /*
2628         * As fallocate iterate by bytenr order, we only need to check
2629         * the last range.
2630         */
2631        prev = list_entry(head->prev, struct falloc_range, list);
2632        if (prev->start + prev->len == start) {
2633                prev->len += len;
2634                return 0;
2635        }
2636insert:
2637        range = kmalloc(sizeof(*range), GFP_KERNEL);
2638        if (!range)
2639                return -ENOMEM;
2640        range->start = start;
2641        range->len = len;
2642        list_add_tail(&range->list, head);
2643        return 0;
2644}
2645
2646static long btrfs_fallocate(struct file *file, int mode,
2647                            loff_t offset, loff_t len)
2648{
2649        struct inode *inode = file_inode(file);
2650        struct extent_state *cached_state = NULL;
2651        struct falloc_range *range;
2652        struct falloc_range *tmp;
2653        struct list_head reserve_list;
2654        u64 cur_offset;
2655        u64 last_byte;
2656        u64 alloc_start;
2657        u64 alloc_end;
2658        u64 alloc_hint = 0;
2659        u64 locked_end;
2660        u64 actual_end = 0;
2661        struct extent_map *em;
2662        int blocksize = BTRFS_I(inode)->root->sectorsize;
2663        int ret;
2664
2665        alloc_start = round_down(offset, blocksize);
2666        alloc_end = round_up(offset + len, blocksize);
2667
2668        /* Make sure we aren't being give some crap mode */
2669        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2670                return -EOPNOTSUPP;
2671
2672        if (mode & FALLOC_FL_PUNCH_HOLE)
2673                return btrfs_punch_hole(inode, offset, len);
2674
2675        /*
2676         * Only trigger disk allocation, don't trigger qgroup reserve
2677         *
2678         * For qgroup space, it will be checked later.
2679         */
2680        ret = btrfs_alloc_data_chunk_ondemand(inode, alloc_end - alloc_start);
2681        if (ret < 0)
2682                return ret;
2683
2684        inode_lock(inode);
2685
2686        if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
2687                ret = inode_newsize_ok(inode, offset + len);
2688                if (ret)
2689                        goto out;
2690        }
2691
2692        /*
2693         * TODO: Move these two operations after we have checked
2694         * accurate reserved space, or fallocate can still fail but
2695         * with page truncated or size expanded.
2696         *
2697         * But that's a minor problem and won't do much harm BTW.
2698         */
2699        if (alloc_start > inode->i_size) {
2700                ret = btrfs_cont_expand(inode, i_size_read(inode),
2701                                        alloc_start);
2702                if (ret)
2703                        goto out;
2704        } else if (offset + len > inode->i_size) {
2705                /*
2706                 * If we are fallocating from the end of the file onward we
2707                 * need to zero out the end of the block if i_size lands in the
2708                 * middle of a block.
2709                 */
2710                ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
2711                if (ret)
2712                        goto out;
2713        }
2714
2715        /*
2716         * wait for ordered IO before we have any locks.  We'll loop again
2717         * below with the locks held.
2718         */
2719        ret = btrfs_wait_ordered_range(inode, alloc_start,
2720                                       alloc_end - alloc_start);
2721        if (ret)
2722                goto out;
2723
2724        locked_end = alloc_end - 1;
2725        while (1) {
2726                struct btrfs_ordered_extent *ordered;
2727
2728                /* the extent lock is ordered inside the running
2729                 * transaction
2730                 */
2731                lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2732                                 locked_end, &cached_state);
2733                ordered = btrfs_lookup_first_ordered_extent(inode,
2734                                                            alloc_end - 1);
2735                if (ordered &&
2736                    ordered->file_offset + ordered->len > alloc_start &&
2737                    ordered->file_offset < alloc_end) {
2738                        btrfs_put_ordered_extent(ordered);
2739                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2740                                             alloc_start, locked_end,
2741                                             &cached_state, GFP_KERNEL);
2742                        /*
2743                         * we can't wait on the range with the transaction
2744                         * running or with the extent lock held
2745                         */
2746                        ret = btrfs_wait_ordered_range(inode, alloc_start,
2747                                                       alloc_end - alloc_start);
2748                        if (ret)
2749                                goto out;
2750                } else {
2751                        if (ordered)
2752                                btrfs_put_ordered_extent(ordered);
2753                        break;
2754                }
2755        }
2756
2757        /* First, check if we exceed the qgroup limit */
2758        INIT_LIST_HEAD(&reserve_list);
2759        cur_offset = alloc_start;
2760        while (1) {
2761                em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2762                                      alloc_end - cur_offset, 0);
2763                if (IS_ERR_OR_NULL(em)) {
2764                        if (!em)
2765                                ret = -ENOMEM;
2766                        else
2767                                ret = PTR_ERR(em);
2768                        break;
2769                }
2770                last_byte = min(extent_map_end(em), alloc_end);
2771                actual_end = min_t(u64, extent_map_end(em), offset + len);
2772                last_byte = ALIGN(last_byte, blocksize);
2773                if (em->block_start == EXTENT_MAP_HOLE ||
2774                    (cur_offset >= inode->i_size &&
2775                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2776                        ret = add_falloc_range(&reserve_list, cur_offset,
2777                                               last_byte - cur_offset);
2778                        if (ret < 0) {
2779                                free_extent_map(em);
2780                                break;
2781                        }
2782                        ret = btrfs_qgroup_reserve_data(inode, cur_offset,
2783                                        last_byte - cur_offset);
2784                        if (ret < 0)
2785                                break;
2786                }
2787                free_extent_map(em);
2788                cur_offset = last_byte;
2789                if (cur_offset >= alloc_end)
2790                        break;
2791        }
2792
2793        /*
2794         * If ret is still 0, means we're OK to fallocate.
2795         * Or just cleanup the list and exit.
2796         */
2797        list_for_each_entry_safe(range, tmp, &reserve_list, list) {
2798                if (!ret)
2799                        ret = btrfs_prealloc_file_range(inode, mode,
2800                                        range->start,
2801                                        range->len, 1 << inode->i_blkbits,
2802                                        offset + len, &alloc_hint);
2803                list_del(&range->list);
2804                kfree(range);
2805        }
2806        if (ret < 0)
2807                goto out_unlock;
2808
2809        if (actual_end > inode->i_size &&
2810            !(mode & FALLOC_FL_KEEP_SIZE)) {
2811                struct btrfs_trans_handle *trans;
2812                struct btrfs_root *root = BTRFS_I(inode)->root;
2813
2814                /*
2815                 * We didn't need to allocate any more space, but we
2816                 * still extended the size of the file so we need to
2817                 * update i_size and the inode item.
2818                 */
2819                trans = btrfs_start_transaction(root, 1);
2820                if (IS_ERR(trans)) {
2821                        ret = PTR_ERR(trans);
2822                } else {
2823                        inode->i_ctime = current_fs_time(inode->i_sb);
2824                        i_size_write(inode, actual_end);
2825                        btrfs_ordered_update_i_size(inode, actual_end, NULL);
2826                        ret = btrfs_update_inode(trans, root, inode);
2827                        if (ret)
2828                                btrfs_end_transaction(trans, root);
2829                        else
2830                                ret = btrfs_end_transaction(trans, root);
2831                }
2832        }
2833out_unlock:
2834        unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2835                             &cached_state, GFP_KERNEL);
2836out:
2837        /*
2838         * As we waited the extent range, the data_rsv_map must be empty
2839         * in the range, as written data range will be released from it.
2840         * And for prealloacted extent, it will also be released when
2841         * its metadata is written.
2842         * So this is completely used as cleanup.
2843         */
2844        btrfs_qgroup_free_data(inode, alloc_start, alloc_end - alloc_start);
2845        inode_unlock(inode);
2846        /* Let go of our reservation. */
2847        btrfs_free_reserved_data_space(inode, alloc_start,
2848                                       alloc_end - alloc_start);
2849        return ret;
2850}
2851
2852static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2853{
2854        struct btrfs_root *root = BTRFS_I(inode)->root;
2855        struct extent_map *em = NULL;
2856        struct extent_state *cached_state = NULL;
2857        u64 lockstart;
2858        u64 lockend;
2859        u64 start;
2860        u64 len;
2861        int ret = 0;
2862
2863        if (inode->i_size == 0)
2864                return -ENXIO;
2865
2866        /*
2867         * *offset can be negative, in this case we start finding DATA/HOLE from
2868         * the very start of the file.
2869         */
2870        start = max_t(loff_t, 0, *offset);
2871
2872        lockstart = round_down(start, root->sectorsize);
2873        lockend = round_up(i_size_read(inode), root->sectorsize);
2874        if (lockend <= lockstart)
2875                lockend = lockstart + root->sectorsize;
2876        lockend--;
2877        len = lockend - lockstart + 1;
2878
2879        lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2880                         &cached_state);
2881
2882        while (start < inode->i_size) {
2883                em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2884                if (IS_ERR(em)) {
2885                        ret = PTR_ERR(em);
2886                        em = NULL;
2887                        break;
2888                }
2889
2890                if (whence == SEEK_HOLE &&
2891                    (em->block_start == EXTENT_MAP_HOLE ||
2892                     test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2893                        break;
2894                else if (whence == SEEK_DATA &&
2895                           (em->block_start != EXTENT_MAP_HOLE &&
2896                            !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2897                        break;
2898
2899                start = em->start + em->len;
2900                free_extent_map(em);
2901                em = NULL;
2902                cond_resched();
2903        }
2904        free_extent_map(em);
2905        if (!ret) {
2906                if (whence == SEEK_DATA && start >= inode->i_size)
2907                        ret = -ENXIO;
2908                else
2909                        *offset = min_t(loff_t, start, inode->i_size);
2910        }
2911        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2912                             &cached_state, GFP_NOFS);
2913        return ret;
2914}
2915
2916static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
2917{
2918        struct inode *inode = file->f_mapping->host;
2919        int ret;
2920
2921        inode_lock(inode);
2922        switch (whence) {
2923        case SEEK_END:
2924        case SEEK_CUR:
2925                offset = generic_file_llseek(file, offset, whence);
2926                goto out;
2927        case SEEK_DATA:
2928        case SEEK_HOLE:
2929                if (offset >= i_size_read(inode)) {
2930                        inode_unlock(inode);
2931                        return -ENXIO;
2932                }
2933
2934                ret = find_desired_extent(inode, &offset, whence);
2935                if (ret) {
2936                        inode_unlock(inode);
2937                        return ret;
2938                }
2939        }
2940
2941        offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2942out:
2943        inode_unlock(inode);
2944        return offset;
2945}
2946
2947const struct file_operations btrfs_file_operations = {
2948        .llseek         = btrfs_file_llseek,
2949        .read_iter      = generic_file_read_iter,
2950        .splice_read    = generic_file_splice_read,
2951        .write_iter     = btrfs_file_write_iter,
2952        .mmap           = btrfs_file_mmap,
2953        .open           = generic_file_open,
2954        .release        = btrfs_release_file,
2955        .fsync          = btrfs_sync_file,
2956        .fallocate      = btrfs_fallocate,
2957        .unlocked_ioctl = btrfs_ioctl,
2958#ifdef CONFIG_COMPAT
2959        .compat_ioctl   = btrfs_ioctl,
2960#endif
2961        .copy_file_range = btrfs_copy_file_range,
2962        .clone_file_range = btrfs_clone_file_range,
2963        .dedupe_file_range = btrfs_dedupe_file_range,
2964};
2965
2966void btrfs_auto_defrag_exit(void)
2967{
2968        kmem_cache_destroy(btrfs_inode_defrag_cachep);
2969}
2970
2971int btrfs_auto_defrag_init(void)
2972{
2973        btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
2974                                        sizeof(struct inode_defrag), 0,
2975                                        SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2976                                        NULL);
2977        if (!btrfs_inode_defrag_cachep)
2978                return -ENOMEM;
2979
2980        return 0;
2981}
2982
2983int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
2984{
2985        int ret;
2986
2987        /*
2988         * So with compression we will find and lock a dirty page and clear the
2989         * first one as dirty, setup an async extent, and immediately return
2990         * with the entire range locked but with nobody actually marked with
2991         * writeback.  So we can't just filemap_write_and_wait_range() and
2992         * expect it to work since it will just kick off a thread to do the
2993         * actual work.  So we need to call filemap_fdatawrite_range _again_
2994         * since it will wait on the page lock, which won't be unlocked until
2995         * after the pages have been marked as writeback and so we're good to go
2996         * from there.  We have to do this otherwise we'll miss the ordered
2997         * extents and that results in badness.  Please Josef, do not think you
2998         * know better and pull this out at some point in the future, it is
2999         * right and you are wrong.
3000         */
3001        ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3002        if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3003                             &BTRFS_I(inode)->runtime_flags))
3004                ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3005
3006        return ret;
3007}
3008