linux/fs/btrfs/file.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/fs.h>
  20#include <linux/pagemap.h>
  21#include <linux/highmem.h>
  22#include <linux/time.h>
  23#include <linux/init.h>
  24#include <linux/string.h>
  25#include <linux/backing-dev.h>
  26#include <linux/mpage.h>
  27#include <linux/falloc.h>
  28#include <linux/swap.h>
  29#include <linux/writeback.h>
  30#include <linux/statfs.h>
  31#include <linux/compat.h>
  32#include <linux/slab.h>
  33#include <linux/btrfs.h>
  34#include <linux/uio.h>
  35#include "ctree.h"
  36#include "disk-io.h"
  37#include "transaction.h"
  38#include "btrfs_inode.h"
  39#include "print-tree.h"
  40#include "tree-log.h"
  41#include "locking.h"
  42#include "volumes.h"
  43#include "qgroup.h"
  44
  45static struct kmem_cache *btrfs_inode_defrag_cachep;
  46/*
  47 * when auto defrag is enabled we
  48 * queue up these defrag structs to remember which
  49 * inodes need defragging passes
  50 */
  51struct inode_defrag {
  52        struct rb_node rb_node;
  53        /* objectid */
  54        u64 ino;
  55        /*
  56         * transid where the defrag was added, we search for
  57         * extents newer than this
  58         */
  59        u64 transid;
  60
  61        /* root objectid */
  62        u64 root;
  63
  64        /* last offset we were able to defrag */
  65        u64 last_offset;
  66
  67        /* if we've wrapped around back to zero once already */
  68        int cycled;
  69};
  70
  71static int __compare_inode_defrag(struct inode_defrag *defrag1,
  72                                  struct inode_defrag *defrag2)
  73{
  74        if (defrag1->root > defrag2->root)
  75                return 1;
  76        else if (defrag1->root < defrag2->root)
  77                return -1;
  78        else if (defrag1->ino > defrag2->ino)
  79                return 1;
  80        else if (defrag1->ino < defrag2->ino)
  81                return -1;
  82        else
  83                return 0;
  84}
  85
  86/* pop a record for an inode into the defrag tree.  The lock
  87 * must be held already
  88 *
  89 * If you're inserting a record for an older transid than an
  90 * existing record, the transid already in the tree is lowered
  91 *
  92 * If an existing record is found the defrag item you
  93 * pass in is freed
  94 */
  95static int __btrfs_add_inode_defrag(struct inode *inode,
  96                                    struct inode_defrag *defrag)
  97{
  98        struct btrfs_root *root = BTRFS_I(inode)->root;
  99        struct inode_defrag *entry;
 100        struct rb_node **p;
 101        struct rb_node *parent = NULL;
 102        int ret;
 103
 104        p = &root->fs_info->defrag_inodes.rb_node;
 105        while (*p) {
 106                parent = *p;
 107                entry = rb_entry(parent, struct inode_defrag, rb_node);
 108
 109                ret = __compare_inode_defrag(defrag, entry);
 110                if (ret < 0)
 111                        p = &parent->rb_left;
 112                else if (ret > 0)
 113                        p = &parent->rb_right;
 114                else {
 115                        /* if we're reinserting an entry for
 116                         * an old defrag run, make sure to
 117                         * lower the transid of our existing record
 118                         */
 119                        if (defrag->transid < entry->transid)
 120                                entry->transid = defrag->transid;
 121                        if (defrag->last_offset > entry->last_offset)
 122                                entry->last_offset = defrag->last_offset;
 123                        return -EEXIST;
 124                }
 125        }
 126        set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 127        rb_link_node(&defrag->rb_node, parent, p);
 128        rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
 129        return 0;
 130}
 131
 132static inline int __need_auto_defrag(struct btrfs_root *root)
 133{
 134        if (!btrfs_test_opt(root, AUTO_DEFRAG))
 135                return 0;
 136
 137        if (btrfs_fs_closing(root->fs_info))
 138                return 0;
 139
 140        return 1;
 141}
 142
 143/*
 144 * insert a defrag record for this inode if auto defrag is
 145 * enabled
 146 */
 147int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 148                           struct inode *inode)
 149{
 150        struct btrfs_root *root = BTRFS_I(inode)->root;
 151        struct inode_defrag *defrag;
 152        u64 transid;
 153        int ret;
 154
 155        if (!__need_auto_defrag(root))
 156                return 0;
 157
 158        if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
 159                return 0;
 160
 161        if (trans)
 162                transid = trans->transid;
 163        else
 164                transid = BTRFS_I(inode)->root->last_trans;
 165
 166        defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
 167        if (!defrag)
 168                return -ENOMEM;
 169
 170        defrag->ino = btrfs_ino(inode);
 171        defrag->transid = transid;
 172        defrag->root = root->root_key.objectid;
 173
 174        spin_lock(&root->fs_info->defrag_inodes_lock);
 175        if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
 176                /*
 177                 * If we set IN_DEFRAG flag and evict the inode from memory,
 178                 * and then re-read this inode, this new inode doesn't have
 179                 * IN_DEFRAG flag. At the case, we may find the existed defrag.
 180                 */
 181                ret = __btrfs_add_inode_defrag(inode, defrag);
 182                if (ret)
 183                        kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 184        } else {
 185                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 186        }
 187        spin_unlock(&root->fs_info->defrag_inodes_lock);
 188        return 0;
 189}
 190
 191/*
 192 * Requeue the defrag object. If there is a defrag object that points to
 193 * the same inode in the tree, we will merge them together (by
 194 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
 195 */
 196static void btrfs_requeue_inode_defrag(struct inode *inode,
 197                                       struct inode_defrag *defrag)
 198{
 199        struct btrfs_root *root = BTRFS_I(inode)->root;
 200        int ret;
 201
 202        if (!__need_auto_defrag(root))
 203                goto out;
 204
 205        /*
 206         * Here we don't check the IN_DEFRAG flag, because we need merge
 207         * them together.
 208         */
 209        spin_lock(&root->fs_info->defrag_inodes_lock);
 210        ret = __btrfs_add_inode_defrag(inode, defrag);
 211        spin_unlock(&root->fs_info->defrag_inodes_lock);
 212        if (ret)
 213                goto out;
 214        return;
 215out:
 216        kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 217}
 218
 219/*
 220 * pick the defragable inode that we want, if it doesn't exist, we will get
 221 * the next one.
 222 */
 223static struct inode_defrag *
 224btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
 225{
 226        struct inode_defrag *entry = NULL;
 227        struct inode_defrag tmp;
 228        struct rb_node *p;
 229        struct rb_node *parent = NULL;
 230        int ret;
 231
 232        tmp.ino = ino;
 233        tmp.root = root;
 234
 235        spin_lock(&fs_info->defrag_inodes_lock);
 236        p = fs_info->defrag_inodes.rb_node;
 237        while (p) {
 238                parent = p;
 239                entry = rb_entry(parent, struct inode_defrag, rb_node);
 240
 241                ret = __compare_inode_defrag(&tmp, entry);
 242                if (ret < 0)
 243                        p = parent->rb_left;
 244                else if (ret > 0)
 245                        p = parent->rb_right;
 246                else
 247                        goto out;
 248        }
 249
 250        if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
 251                parent = rb_next(parent);
 252                if (parent)
 253                        entry = rb_entry(parent, struct inode_defrag, rb_node);
 254                else
 255                        entry = NULL;
 256        }
 257out:
 258        if (entry)
 259                rb_erase(parent, &fs_info->defrag_inodes);
 260        spin_unlock(&fs_info->defrag_inodes_lock);
 261        return entry;
 262}
 263
 264void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
 265{
 266        struct inode_defrag *defrag;
 267        struct rb_node *node;
 268
 269        spin_lock(&fs_info->defrag_inodes_lock);
 270        node = rb_first(&fs_info->defrag_inodes);
 271        while (node) {
 272                rb_erase(node, &fs_info->defrag_inodes);
 273                defrag = rb_entry(node, struct inode_defrag, rb_node);
 274                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 275
 276                cond_resched_lock(&fs_info->defrag_inodes_lock);
 277
 278                node = rb_first(&fs_info->defrag_inodes);
 279        }
 280        spin_unlock(&fs_info->defrag_inodes_lock);
 281}
 282
 283#define BTRFS_DEFRAG_BATCH      1024
 284
 285static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
 286                                    struct inode_defrag *defrag)
 287{
 288        struct btrfs_root *inode_root;
 289        struct inode *inode;
 290        struct btrfs_key key;
 291        struct btrfs_ioctl_defrag_range_args range;
 292        int num_defrag;
 293        int index;
 294        int ret;
 295
 296        /* get the inode */
 297        key.objectid = defrag->root;
 298        key.type = BTRFS_ROOT_ITEM_KEY;
 299        key.offset = (u64)-1;
 300
 301        index = srcu_read_lock(&fs_info->subvol_srcu);
 302
 303        inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
 304        if (IS_ERR(inode_root)) {
 305                ret = PTR_ERR(inode_root);
 306                goto cleanup;
 307        }
 308
 309        key.objectid = defrag->ino;
 310        key.type = BTRFS_INODE_ITEM_KEY;
 311        key.offset = 0;
 312        inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
 313        if (IS_ERR(inode)) {
 314                ret = PTR_ERR(inode);
 315                goto cleanup;
 316        }
 317        srcu_read_unlock(&fs_info->subvol_srcu, index);
 318
 319        /* do a chunk of defrag */
 320        clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 321        memset(&range, 0, sizeof(range));
 322        range.len = (u64)-1;
 323        range.start = defrag->last_offset;
 324
 325        sb_start_write(fs_info->sb);
 326        num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
 327                                       BTRFS_DEFRAG_BATCH);
 328        sb_end_write(fs_info->sb);
 329        /*
 330         * if we filled the whole defrag batch, there
 331         * must be more work to do.  Queue this defrag
 332         * again
 333         */
 334        if (num_defrag == BTRFS_DEFRAG_BATCH) {
 335                defrag->last_offset = range.start;
 336                btrfs_requeue_inode_defrag(inode, defrag);
 337        } else if (defrag->last_offset && !defrag->cycled) {
 338                /*
 339                 * we didn't fill our defrag batch, but
 340                 * we didn't start at zero.  Make sure we loop
 341                 * around to the start of the file.
 342                 */
 343                defrag->last_offset = 0;
 344                defrag->cycled = 1;
 345                btrfs_requeue_inode_defrag(inode, defrag);
 346        } else {
 347                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 348        }
 349
 350        iput(inode);
 351        return 0;
 352cleanup:
 353        srcu_read_unlock(&fs_info->subvol_srcu, index);
 354        kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 355        return ret;
 356}
 357
 358/*
 359 * run through the list of inodes in the FS that need
 360 * defragging
 361 */
 362int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 363{
 364        struct inode_defrag *defrag;
 365        u64 first_ino = 0;
 366        u64 root_objectid = 0;
 367
 368        atomic_inc(&fs_info->defrag_running);
 369        while (1) {
 370                /* Pause the auto defragger. */
 371                if (test_bit(BTRFS_FS_STATE_REMOUNTING,
 372                             &fs_info->fs_state))
 373                        break;
 374
 375                if (!__need_auto_defrag(fs_info->tree_root))
 376                        break;
 377
 378                /* find an inode to defrag */
 379                defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
 380                                                 first_ino);
 381                if (!defrag) {
 382                        if (root_objectid || first_ino) {
 383                                root_objectid = 0;
 384                                first_ino = 0;
 385                                continue;
 386                        } else {
 387                                break;
 388                        }
 389                }
 390
 391                first_ino = defrag->ino + 1;
 392                root_objectid = defrag->root;
 393
 394                __btrfs_run_defrag_inode(fs_info, defrag);
 395        }
 396        atomic_dec(&fs_info->defrag_running);
 397
 398        /*
 399         * during unmount, we use the transaction_wait queue to
 400         * wait for the defragger to stop
 401         */
 402        wake_up(&fs_info->transaction_wait);
 403        return 0;
 404}
 405
 406/* simple helper to fault in pages and copy.  This should go away
 407 * and be replaced with calls into generic code.
 408 */
 409static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
 410                                         struct page **prepared_pages,
 411                                         struct iov_iter *i)
 412{
 413        size_t copied = 0;
 414        size_t total_copied = 0;
 415        int pg = 0;
 416        int offset = pos & (PAGE_CACHE_SIZE - 1);
 417
 418        while (write_bytes > 0) {
 419                size_t count = min_t(size_t,
 420                                     PAGE_CACHE_SIZE - offset, write_bytes);
 421                struct page *page = prepared_pages[pg];
 422                /*
 423                 * Copy data from userspace to the current page
 424                 */
 425                copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
 426
 427                /* Flush processor's dcache for this page */
 428                flush_dcache_page(page);
 429
 430                /*
 431                 * if we get a partial write, we can end up with
 432                 * partially up to date pages.  These add
 433                 * a lot of complexity, so make sure they don't
 434                 * happen by forcing this copy to be retried.
 435                 *
 436                 * The rest of the btrfs_file_write code will fall
 437                 * back to page at a time copies after we return 0.
 438                 */
 439                if (!PageUptodate(page) && copied < count)
 440                        copied = 0;
 441
 442                iov_iter_advance(i, copied);
 443                write_bytes -= copied;
 444                total_copied += copied;
 445
 446                /* Return to btrfs_file_write_iter to fault page */
 447                if (unlikely(copied == 0))
 448                        break;
 449
 450                if (copied < PAGE_CACHE_SIZE - offset) {
 451                        offset += copied;
 452                } else {
 453                        pg++;
 454                        offset = 0;
 455                }
 456        }
 457        return total_copied;
 458}
 459
 460/*
 461 * unlocks pages after btrfs_file_write is done with them
 462 */
 463static void btrfs_drop_pages(struct page **pages, size_t num_pages)
 464{
 465        size_t i;
 466        for (i = 0; i < num_pages; i++) {
 467                /* page checked is some magic around finding pages that
 468                 * have been modified without going through btrfs_set_page_dirty
 469                 * clear it here. There should be no need to mark the pages
 470                 * accessed as prepare_pages should have marked them accessed
 471                 * in prepare_pages via find_or_create_page()
 472                 */
 473                ClearPageChecked(pages[i]);
 474                unlock_page(pages[i]);
 475                page_cache_release(pages[i]);
 476        }
 477}
 478
 479/*
 480 * after copy_from_user, pages need to be dirtied and we need to make
 481 * sure holes are created between the current EOF and the start of
 482 * any next extents (if required).
 483 *
 484 * this also makes the decision about creating an inline extent vs
 485 * doing real data extents, marking pages dirty and delalloc as required.
 486 */
 487int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
 488                             struct page **pages, size_t num_pages,
 489                             loff_t pos, size_t write_bytes,
 490                             struct extent_state **cached)
 491{
 492        int err = 0;
 493        int i;
 494        u64 num_bytes;
 495        u64 start_pos;
 496        u64 end_of_last_block;
 497        u64 end_pos = pos + write_bytes;
 498        loff_t isize = i_size_read(inode);
 499
 500        start_pos = pos & ~((u64)root->sectorsize - 1);
 501        num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
 502
 503        end_of_last_block = start_pos + num_bytes - 1;
 504        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
 505                                        cached);
 506        if (err)
 507                return err;
 508
 509        for (i = 0; i < num_pages; i++) {
 510                struct page *p = pages[i];
 511                SetPageUptodate(p);
 512                ClearPageChecked(p);
 513                set_page_dirty(p);
 514        }
 515
 516        /*
 517         * we've only changed i_size in ram, and we haven't updated
 518         * the disk i_size.  There is no need to log the inode
 519         * at this time.
 520         */
 521        if (end_pos > isize)
 522                i_size_write(inode, end_pos);
 523        return 0;
 524}
 525
 526/*
 527 * this drops all the extents in the cache that intersect the range
 528 * [start, end].  Existing extents are split as required.
 529 */
 530void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
 531                             int skip_pinned)
 532{
 533        struct extent_map *em;
 534        struct extent_map *split = NULL;
 535        struct extent_map *split2 = NULL;
 536        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 537        u64 len = end - start + 1;
 538        u64 gen;
 539        int ret;
 540        int testend = 1;
 541        unsigned long flags;
 542        int compressed = 0;
 543        bool modified;
 544
 545        WARN_ON(end < start);
 546        if (end == (u64)-1) {
 547                len = (u64)-1;
 548                testend = 0;
 549        }
 550        while (1) {
 551                int no_splits = 0;
 552
 553                modified = false;
 554                if (!split)
 555                        split = alloc_extent_map();
 556                if (!split2)
 557                        split2 = alloc_extent_map();
 558                if (!split || !split2)
 559                        no_splits = 1;
 560
 561                write_lock(&em_tree->lock);
 562                em = lookup_extent_mapping(em_tree, start, len);
 563                if (!em) {
 564                        write_unlock(&em_tree->lock);
 565                        break;
 566                }
 567                flags = em->flags;
 568                gen = em->generation;
 569                if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
 570                        if (testend && em->start + em->len >= start + len) {
 571                                free_extent_map(em);
 572                                write_unlock(&em_tree->lock);
 573                                break;
 574                        }
 575                        start = em->start + em->len;
 576                        if (testend)
 577                                len = start + len - (em->start + em->len);
 578                        free_extent_map(em);
 579                        write_unlock(&em_tree->lock);
 580                        continue;
 581                }
 582                compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 583                clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 584                clear_bit(EXTENT_FLAG_LOGGING, &flags);
 585                modified = !list_empty(&em->list);
 586                if (no_splits)
 587                        goto next;
 588
 589                if (em->start < start) {
 590                        split->start = em->start;
 591                        split->len = start - em->start;
 592
 593                        if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 594                                split->orig_start = em->orig_start;
 595                                split->block_start = em->block_start;
 596
 597                                if (compressed)
 598                                        split->block_len = em->block_len;
 599                                else
 600                                        split->block_len = split->len;
 601                                split->orig_block_len = max(split->block_len,
 602                                                em->orig_block_len);
 603                                split->ram_bytes = em->ram_bytes;
 604                        } else {
 605                                split->orig_start = split->start;
 606                                split->block_len = 0;
 607                                split->block_start = em->block_start;
 608                                split->orig_block_len = 0;
 609                                split->ram_bytes = split->len;
 610                        }
 611
 612                        split->generation = gen;
 613                        split->bdev = em->bdev;
 614                        split->flags = flags;
 615                        split->compress_type = em->compress_type;
 616                        replace_extent_mapping(em_tree, em, split, modified);
 617                        free_extent_map(split);
 618                        split = split2;
 619                        split2 = NULL;
 620                }
 621                if (testend && em->start + em->len > start + len) {
 622                        u64 diff = start + len - em->start;
 623
 624                        split->start = start + len;
 625                        split->len = em->start + em->len - (start + len);
 626                        split->bdev = em->bdev;
 627                        split->flags = flags;
 628                        split->compress_type = em->compress_type;
 629                        split->generation = gen;
 630
 631                        if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 632                                split->orig_block_len = max(em->block_len,
 633                                                    em->orig_block_len);
 634
 635                                split->ram_bytes = em->ram_bytes;
 636                                if (compressed) {
 637                                        split->block_len = em->block_len;
 638                                        split->block_start = em->block_start;
 639                                        split->orig_start = em->orig_start;
 640                                } else {
 641                                        split->block_len = split->len;
 642                                        split->block_start = em->block_start
 643                                                + diff;
 644                                        split->orig_start = em->orig_start;
 645                                }
 646                        } else {
 647                                split->ram_bytes = split->len;
 648                                split->orig_start = split->start;
 649                                split->block_len = 0;
 650                                split->block_start = em->block_start;
 651                                split->orig_block_len = 0;
 652                        }
 653
 654                        if (extent_map_in_tree(em)) {
 655                                replace_extent_mapping(em_tree, em, split,
 656                                                       modified);
 657                        } else {
 658                                ret = add_extent_mapping(em_tree, split,
 659                                                         modified);
 660                                ASSERT(ret == 0); /* Logic error */
 661                        }
 662                        free_extent_map(split);
 663                        split = NULL;
 664                }
 665next:
 666                if (extent_map_in_tree(em))
 667                        remove_extent_mapping(em_tree, em);
 668                write_unlock(&em_tree->lock);
 669
 670                /* once for us */
 671                free_extent_map(em);
 672                /* once for the tree*/
 673                free_extent_map(em);
 674        }
 675        if (split)
 676                free_extent_map(split);
 677        if (split2)
 678                free_extent_map(split2);
 679}
 680
 681/*
 682 * this is very complex, but the basic idea is to drop all extents
 683 * in the range start - end.  hint_block is filled in with a block number
 684 * that would be a good hint to the block allocator for this file.
 685 *
 686 * If an extent intersects the range but is not entirely inside the range
 687 * it is either truncated or split.  Anything entirely inside the range
 688 * is deleted from the tree.
 689 */
 690int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 691                         struct btrfs_root *root, struct inode *inode,
 692                         struct btrfs_path *path, u64 start, u64 end,
 693                         u64 *drop_end, int drop_cache,
 694                         int replace_extent,
 695                         u32 extent_item_size,
 696                         int *key_inserted)
 697{
 698        struct extent_buffer *leaf;
 699        struct btrfs_file_extent_item *fi;
 700        struct btrfs_key key;
 701        struct btrfs_key new_key;
 702        u64 ino = btrfs_ino(inode);
 703        u64 search_start = start;
 704        u64 disk_bytenr = 0;
 705        u64 num_bytes = 0;
 706        u64 extent_offset = 0;
 707        u64 extent_end = 0;
 708        int del_nr = 0;
 709        int del_slot = 0;
 710        int extent_type;
 711        int recow;
 712        int ret;
 713        int modify_tree = -1;
 714        int update_refs;
 715        int found = 0;
 716        int leafs_visited = 0;
 717
 718        if (drop_cache)
 719                btrfs_drop_extent_cache(inode, start, end - 1, 0);
 720
 721        if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
 722                modify_tree = 0;
 723
 724        update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
 725                       root == root->fs_info->tree_root);
 726        while (1) {
 727                recow = 0;
 728                ret = btrfs_lookup_file_extent(trans, root, path, ino,
 729                                               search_start, modify_tree);
 730                if (ret < 0)
 731                        break;
 732                if (ret > 0 && path->slots[0] > 0 && search_start == start) {
 733                        leaf = path->nodes[0];
 734                        btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
 735                        if (key.objectid == ino &&
 736                            key.type == BTRFS_EXTENT_DATA_KEY)
 737                                path->slots[0]--;
 738                }
 739                ret = 0;
 740                leafs_visited++;
 741next_slot:
 742                leaf = path->nodes[0];
 743                if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 744                        BUG_ON(del_nr > 0);
 745                        ret = btrfs_next_leaf(root, path);
 746                        if (ret < 0)
 747                                break;
 748                        if (ret > 0) {
 749                                ret = 0;
 750                                break;
 751                        }
 752                        leafs_visited++;
 753                        leaf = path->nodes[0];
 754                        recow = 1;
 755                }
 756
 757                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 758
 759                if (key.objectid > ino)
 760                        break;
 761                if (WARN_ON_ONCE(key.objectid < ino) ||
 762                    key.type < BTRFS_EXTENT_DATA_KEY) {
 763                        ASSERT(del_nr == 0);
 764                        path->slots[0]++;
 765                        goto next_slot;
 766                }
 767                if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
 768                        break;
 769
 770                fi = btrfs_item_ptr(leaf, path->slots[0],
 771                                    struct btrfs_file_extent_item);
 772                extent_type = btrfs_file_extent_type(leaf, fi);
 773
 774                if (extent_type == BTRFS_FILE_EXTENT_REG ||
 775                    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 776                        disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 777                        num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 778                        extent_offset = btrfs_file_extent_offset(leaf, fi);
 779                        extent_end = key.offset +
 780                                btrfs_file_extent_num_bytes(leaf, fi);
 781                } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 782                        extent_end = key.offset +
 783                                btrfs_file_extent_inline_len(leaf,
 784                                                     path->slots[0], fi);
 785                } else {
 786                        /* can't happen */
 787                        BUG();
 788                }
 789
 790                /*
 791                 * Don't skip extent items representing 0 byte lengths. They
 792                 * used to be created (bug) if while punching holes we hit
 793                 * -ENOSPC condition. So if we find one here, just ensure we
 794                 * delete it, otherwise we would insert a new file extent item
 795                 * with the same key (offset) as that 0 bytes length file
 796                 * extent item in the call to setup_items_for_insert() later
 797                 * in this function.
 798                 */
 799                if (extent_end == key.offset && extent_end >= search_start)
 800                        goto delete_extent_item;
 801
 802                if (extent_end <= search_start) {
 803                        path->slots[0]++;
 804                        goto next_slot;
 805                }
 806
 807                found = 1;
 808                search_start = max(key.offset, start);
 809                if (recow || !modify_tree) {
 810                        modify_tree = -1;
 811                        btrfs_release_path(path);
 812                        continue;
 813                }
 814
 815                /*
 816                 *     | - range to drop - |
 817                 *  | -------- extent -------- |
 818                 */
 819                if (start > key.offset && end < extent_end) {
 820                        BUG_ON(del_nr > 0);
 821                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 822                                ret = -EOPNOTSUPP;
 823                                break;
 824                        }
 825
 826                        memcpy(&new_key, &key, sizeof(new_key));
 827                        new_key.offset = start;
 828                        ret = btrfs_duplicate_item(trans, root, path,
 829                                                   &new_key);
 830                        if (ret == -EAGAIN) {
 831                                btrfs_release_path(path);
 832                                continue;
 833                        }
 834                        if (ret < 0)
 835                                break;
 836
 837                        leaf = path->nodes[0];
 838                        fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 839                                            struct btrfs_file_extent_item);
 840                        btrfs_set_file_extent_num_bytes(leaf, fi,
 841                                                        start - key.offset);
 842
 843                        fi = btrfs_item_ptr(leaf, path->slots[0],
 844                                            struct btrfs_file_extent_item);
 845
 846                        extent_offset += start - key.offset;
 847                        btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 848                        btrfs_set_file_extent_num_bytes(leaf, fi,
 849                                                        extent_end - start);
 850                        btrfs_mark_buffer_dirty(leaf);
 851
 852                        if (update_refs && disk_bytenr > 0) {
 853                                ret = btrfs_inc_extent_ref(trans, root,
 854                                                disk_bytenr, num_bytes, 0,
 855                                                root->root_key.objectid,
 856                                                new_key.objectid,
 857                                                start - extent_offset);
 858                                BUG_ON(ret); /* -ENOMEM */
 859                        }
 860                        key.offset = start;
 861                }
 862                /*
 863                 *  | ---- range to drop ----- |
 864                 *      | -------- extent -------- |
 865                 */
 866                if (start <= key.offset && end < extent_end) {
 867                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 868                                ret = -EOPNOTSUPP;
 869                                break;
 870                        }
 871
 872                        memcpy(&new_key, &key, sizeof(new_key));
 873                        new_key.offset = end;
 874                        btrfs_set_item_key_safe(root->fs_info, path, &new_key);
 875
 876                        extent_offset += end - key.offset;
 877                        btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 878                        btrfs_set_file_extent_num_bytes(leaf, fi,
 879                                                        extent_end - end);
 880                        btrfs_mark_buffer_dirty(leaf);
 881                        if (update_refs && disk_bytenr > 0)
 882                                inode_sub_bytes(inode, end - key.offset);
 883                        break;
 884                }
 885
 886                search_start = extent_end;
 887                /*
 888                 *       | ---- range to drop ----- |
 889                 *  | -------- extent -------- |
 890                 */
 891                if (start > key.offset && end >= extent_end) {
 892                        BUG_ON(del_nr > 0);
 893                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 894                                ret = -EOPNOTSUPP;
 895                                break;
 896                        }
 897
 898                        btrfs_set_file_extent_num_bytes(leaf, fi,
 899                                                        start - key.offset);
 900                        btrfs_mark_buffer_dirty(leaf);
 901                        if (update_refs && disk_bytenr > 0)
 902                                inode_sub_bytes(inode, extent_end - start);
 903                        if (end == extent_end)
 904                                break;
 905
 906                        path->slots[0]++;
 907                        goto next_slot;
 908                }
 909
 910                /*
 911                 *  | ---- range to drop ----- |
 912                 *    | ------ extent ------ |
 913                 */
 914                if (start <= key.offset && end >= extent_end) {
 915delete_extent_item:
 916                        if (del_nr == 0) {
 917                                del_slot = path->slots[0];
 918                                del_nr = 1;
 919                        } else {
 920                                BUG_ON(del_slot + del_nr != path->slots[0]);
 921                                del_nr++;
 922                        }
 923
 924                        if (update_refs &&
 925                            extent_type == BTRFS_FILE_EXTENT_INLINE) {
 926                                inode_sub_bytes(inode,
 927                                                extent_end - key.offset);
 928                                extent_end = ALIGN(extent_end,
 929                                                   root->sectorsize);
 930                        } else if (update_refs && disk_bytenr > 0) {
 931                                ret = btrfs_free_extent(trans, root,
 932                                                disk_bytenr, num_bytes, 0,
 933                                                root->root_key.objectid,
 934                                                key.objectid, key.offset -
 935                                                extent_offset);
 936                                BUG_ON(ret); /* -ENOMEM */
 937                                inode_sub_bytes(inode,
 938                                                extent_end - key.offset);
 939                        }
 940
 941                        if (end == extent_end)
 942                                break;
 943
 944                        if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
 945                                path->slots[0]++;
 946                                goto next_slot;
 947                        }
 948
 949                        ret = btrfs_del_items(trans, root, path, del_slot,
 950                                              del_nr);
 951                        if (ret) {
 952                                btrfs_abort_transaction(trans, root, ret);
 953                                break;
 954                        }
 955
 956                        del_nr = 0;
 957                        del_slot = 0;
 958
 959                        btrfs_release_path(path);
 960                        continue;
 961                }
 962
 963                BUG_ON(1);
 964        }
 965
 966        if (!ret && del_nr > 0) {
 967                /*
 968                 * Set path->slots[0] to first slot, so that after the delete
 969                 * if items are move off from our leaf to its immediate left or
 970                 * right neighbor leafs, we end up with a correct and adjusted
 971                 * path->slots[0] for our insertion (if replace_extent != 0).
 972                 */
 973                path->slots[0] = del_slot;
 974                ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
 975                if (ret)
 976                        btrfs_abort_transaction(trans, root, ret);
 977        }
 978
 979        leaf = path->nodes[0];
 980        /*
 981         * If btrfs_del_items() was called, it might have deleted a leaf, in
 982         * which case it unlocked our path, so check path->locks[0] matches a
 983         * write lock.
 984         */
 985        if (!ret && replace_extent && leafs_visited == 1 &&
 986            (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
 987             path->locks[0] == BTRFS_WRITE_LOCK) &&
 988            btrfs_leaf_free_space(root, leaf) >=
 989            sizeof(struct btrfs_item) + extent_item_size) {
 990
 991                key.objectid = ino;
 992                key.type = BTRFS_EXTENT_DATA_KEY;
 993                key.offset = start;
 994                if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
 995                        struct btrfs_key slot_key;
 996
 997                        btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
 998                        if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
 999                                path->slots[0]++;
1000                }
1001                setup_items_for_insert(root, path, &key,
1002                                       &extent_item_size,
1003                                       extent_item_size,
1004                                       sizeof(struct btrfs_item) +
1005                                       extent_item_size, 1);
1006                *key_inserted = 1;
1007        }
1008
1009        if (!replace_extent || !(*key_inserted))
1010                btrfs_release_path(path);
1011        if (drop_end)
1012                *drop_end = found ? min(end, extent_end) : end;
1013        return ret;
1014}
1015
1016int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1017                       struct btrfs_root *root, struct inode *inode, u64 start,
1018                       u64 end, int drop_cache)
1019{
1020        struct btrfs_path *path;
1021        int ret;
1022
1023        path = btrfs_alloc_path();
1024        if (!path)
1025                return -ENOMEM;
1026        ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
1027                                   drop_cache, 0, 0, NULL);
1028        btrfs_free_path(path);
1029        return ret;
1030}
1031
1032static int extent_mergeable(struct extent_buffer *leaf, int slot,
1033                            u64 objectid, u64 bytenr, u64 orig_offset,
1034                            u64 *start, u64 *end)
1035{
1036        struct btrfs_file_extent_item *fi;
1037        struct btrfs_key key;
1038        u64 extent_end;
1039
1040        if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1041                return 0;
1042
1043        btrfs_item_key_to_cpu(leaf, &key, slot);
1044        if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1045                return 0;
1046
1047        fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1048        if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1049            btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1050            btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1051            btrfs_file_extent_compression(leaf, fi) ||
1052            btrfs_file_extent_encryption(leaf, fi) ||
1053            btrfs_file_extent_other_encoding(leaf, fi))
1054                return 0;
1055
1056        extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1057        if ((*start && *start != key.offset) || (*end && *end != extent_end))
1058                return 0;
1059
1060        *start = key.offset;
1061        *end = extent_end;
1062        return 1;
1063}
1064
1065/*
1066 * Mark extent in the range start - end as written.
1067 *
1068 * This changes extent type from 'pre-allocated' to 'regular'. If only
1069 * part of extent is marked as written, the extent will be split into
1070 * two or three.
1071 */
1072int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1073                              struct inode *inode, u64 start, u64 end)
1074{
1075        struct btrfs_root *root = BTRFS_I(inode)->root;
1076        struct extent_buffer *leaf;
1077        struct btrfs_path *path;
1078        struct btrfs_file_extent_item *fi;
1079        struct btrfs_key key;
1080        struct btrfs_key new_key;
1081        u64 bytenr;
1082        u64 num_bytes;
1083        u64 extent_end;
1084        u64 orig_offset;
1085        u64 other_start;
1086        u64 other_end;
1087        u64 split;
1088        int del_nr = 0;
1089        int del_slot = 0;
1090        int recow;
1091        int ret;
1092        u64 ino = btrfs_ino(inode);
1093
1094        path = btrfs_alloc_path();
1095        if (!path)
1096                return -ENOMEM;
1097again:
1098        recow = 0;
1099        split = start;
1100        key.objectid = ino;
1101        key.type = BTRFS_EXTENT_DATA_KEY;
1102        key.offset = split;
1103
1104        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1105        if (ret < 0)
1106                goto out;
1107        if (ret > 0 && path->slots[0] > 0)
1108                path->slots[0]--;
1109
1110        leaf = path->nodes[0];
1111        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1112        BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
1113        fi = btrfs_item_ptr(leaf, path->slots[0],
1114                            struct btrfs_file_extent_item);
1115        BUG_ON(btrfs_file_extent_type(leaf, fi) !=
1116               BTRFS_FILE_EXTENT_PREALLOC);
1117        extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1118        BUG_ON(key.offset > start || extent_end < end);
1119
1120        bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1121        num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1122        orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1123        memcpy(&new_key, &key, sizeof(new_key));
1124
1125        if (start == key.offset && end < extent_end) {
1126                other_start = 0;
1127                other_end = start;
1128                if (extent_mergeable(leaf, path->slots[0] - 1,
1129                                     ino, bytenr, orig_offset,
1130                                     &other_start, &other_end)) {
1131                        new_key.offset = end;
1132                        btrfs_set_item_key_safe(root->fs_info, path, &new_key);
1133                        fi = btrfs_item_ptr(leaf, path->slots[0],
1134                                            struct btrfs_file_extent_item);
1135                        btrfs_set_file_extent_generation(leaf, fi,
1136                                                         trans->transid);
1137                        btrfs_set_file_extent_num_bytes(leaf, fi,
1138                                                        extent_end - end);
1139                        btrfs_set_file_extent_offset(leaf, fi,
1140                                                     end - orig_offset);
1141                        fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1142                                            struct btrfs_file_extent_item);
1143                        btrfs_set_file_extent_generation(leaf, fi,
1144                                                         trans->transid);
1145                        btrfs_set_file_extent_num_bytes(leaf, fi,
1146                                                        end - other_start);
1147                        btrfs_mark_buffer_dirty(leaf);
1148                        goto out;
1149                }
1150        }
1151
1152        if (start > key.offset && end == extent_end) {
1153                other_start = end;
1154                other_end = 0;
1155                if (extent_mergeable(leaf, path->slots[0] + 1,
1156                                     ino, bytenr, orig_offset,
1157                                     &other_start, &other_end)) {
1158                        fi = btrfs_item_ptr(leaf, path->slots[0],
1159                                            struct btrfs_file_extent_item);
1160                        btrfs_set_file_extent_num_bytes(leaf, fi,
1161                                                        start - key.offset);
1162                        btrfs_set_file_extent_generation(leaf, fi,
1163                                                         trans->transid);
1164                        path->slots[0]++;
1165                        new_key.offset = start;
1166                        btrfs_set_item_key_safe(root->fs_info, path, &new_key);
1167
1168                        fi = btrfs_item_ptr(leaf, path->slots[0],
1169                                            struct btrfs_file_extent_item);
1170                        btrfs_set_file_extent_generation(leaf, fi,
1171                                                         trans->transid);
1172                        btrfs_set_file_extent_num_bytes(leaf, fi,
1173                                                        other_end - start);
1174                        btrfs_set_file_extent_offset(leaf, fi,
1175                                                     start - orig_offset);
1176                        btrfs_mark_buffer_dirty(leaf);
1177                        goto out;
1178                }
1179        }
1180
1181        while (start > key.offset || end < extent_end) {
1182                if (key.offset == start)
1183                        split = end;
1184
1185                new_key.offset = split;
1186                ret = btrfs_duplicate_item(trans, root, path, &new_key);
1187                if (ret == -EAGAIN) {
1188                        btrfs_release_path(path);
1189                        goto again;
1190                }
1191                if (ret < 0) {
1192                        btrfs_abort_transaction(trans, root, ret);
1193                        goto out;
1194                }
1195
1196                leaf = path->nodes[0];
1197                fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1198                                    struct btrfs_file_extent_item);
1199                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1200                btrfs_set_file_extent_num_bytes(leaf, fi,
1201                                                split - key.offset);
1202
1203                fi = btrfs_item_ptr(leaf, path->slots[0],
1204                                    struct btrfs_file_extent_item);
1205
1206                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1207                btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1208                btrfs_set_file_extent_num_bytes(leaf, fi,
1209                                                extent_end - split);
1210                btrfs_mark_buffer_dirty(leaf);
1211
1212                ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1213                                           root->root_key.objectid,
1214                                           ino, orig_offset);
1215                BUG_ON(ret); /* -ENOMEM */
1216
1217                if (split == start) {
1218                        key.offset = start;
1219                } else {
1220                        BUG_ON(start != key.offset);
1221                        path->slots[0]--;
1222                        extent_end = end;
1223                }
1224                recow = 1;
1225        }
1226
1227        other_start = end;
1228        other_end = 0;
1229        if (extent_mergeable(leaf, path->slots[0] + 1,
1230                             ino, bytenr, orig_offset,
1231                             &other_start, &other_end)) {
1232                if (recow) {
1233                        btrfs_release_path(path);
1234                        goto again;
1235                }
1236                extent_end = other_end;
1237                del_slot = path->slots[0] + 1;
1238                del_nr++;
1239                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1240                                        0, root->root_key.objectid,
1241                                        ino, orig_offset);
1242                BUG_ON(ret); /* -ENOMEM */
1243        }
1244        other_start = 0;
1245        other_end = start;
1246        if (extent_mergeable(leaf, path->slots[0] - 1,
1247                             ino, bytenr, orig_offset,
1248                             &other_start, &other_end)) {
1249                if (recow) {
1250                        btrfs_release_path(path);
1251                        goto again;
1252                }
1253                key.offset = other_start;
1254                del_slot = path->slots[0];
1255                del_nr++;
1256                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1257                                        0, root->root_key.objectid,
1258                                        ino, orig_offset);
1259                BUG_ON(ret); /* -ENOMEM */
1260        }
1261        if (del_nr == 0) {
1262                fi = btrfs_item_ptr(leaf, path->slots[0],
1263                           struct btrfs_file_extent_item);
1264                btrfs_set_file_extent_type(leaf, fi,
1265                                           BTRFS_FILE_EXTENT_REG);
1266                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1267                btrfs_mark_buffer_dirty(leaf);
1268        } else {
1269                fi = btrfs_item_ptr(leaf, del_slot - 1,
1270                           struct btrfs_file_extent_item);
1271                btrfs_set_file_extent_type(leaf, fi,
1272                                           BTRFS_FILE_EXTENT_REG);
1273                btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1274                btrfs_set_file_extent_num_bytes(leaf, fi,
1275                                                extent_end - key.offset);
1276                btrfs_mark_buffer_dirty(leaf);
1277
1278                ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1279                if (ret < 0) {
1280                        btrfs_abort_transaction(trans, root, ret);
1281                        goto out;
1282                }
1283        }
1284out:
1285        btrfs_free_path(path);
1286        return 0;
1287}
1288
1289/*
1290 * on error we return an unlocked page and the error value
1291 * on success we return a locked page and 0
1292 */
1293static int prepare_uptodate_page(struct inode *inode,
1294                                 struct page *page, u64 pos,
1295                                 bool force_uptodate)
1296{
1297        int ret = 0;
1298
1299        if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1300            !PageUptodate(page)) {
1301                ret = btrfs_readpage(NULL, page);
1302                if (ret)
1303                        return ret;
1304                lock_page(page);
1305                if (!PageUptodate(page)) {
1306                        unlock_page(page);
1307                        return -EIO;
1308                }
1309                if (page->mapping != inode->i_mapping) {
1310                        unlock_page(page);
1311                        return -EAGAIN;
1312                }
1313        }
1314        return 0;
1315}
1316
1317/*
1318 * this just gets pages into the page cache and locks them down.
1319 */
1320static noinline int prepare_pages(struct inode *inode, struct page **pages,
1321                                  size_t num_pages, loff_t pos,
1322                                  size_t write_bytes, bool force_uptodate)
1323{
1324        int i;
1325        unsigned long index = pos >> PAGE_CACHE_SHIFT;
1326        gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1327        int err = 0;
1328        int faili;
1329
1330        for (i = 0; i < num_pages; i++) {
1331again:
1332                pages[i] = find_or_create_page(inode->i_mapping, index + i,
1333                                               mask | __GFP_WRITE);
1334                if (!pages[i]) {
1335                        faili = i - 1;
1336                        err = -ENOMEM;
1337                        goto fail;
1338                }
1339
1340                if (i == 0)
1341                        err = prepare_uptodate_page(inode, pages[i], pos,
1342                                                    force_uptodate);
1343                if (!err && i == num_pages - 1)
1344                        err = prepare_uptodate_page(inode, pages[i],
1345                                                    pos + write_bytes, false);
1346                if (err) {
1347                        page_cache_release(pages[i]);
1348                        if (err == -EAGAIN) {
1349                                err = 0;
1350                                goto again;
1351                        }
1352                        faili = i - 1;
1353                        goto fail;
1354                }
1355                wait_on_page_writeback(pages[i]);
1356        }
1357
1358        return 0;
1359fail:
1360        while (faili >= 0) {
1361                unlock_page(pages[faili]);
1362                page_cache_release(pages[faili]);
1363                faili--;
1364        }
1365        return err;
1366
1367}
1368
1369/*
1370 * This function locks the extent and properly waits for data=ordered extents
1371 * to finish before allowing the pages to be modified if need.
1372 *
1373 * The return value:
1374 * 1 - the extent is locked
1375 * 0 - the extent is not locked, and everything is OK
1376 * -EAGAIN - need re-prepare the pages
1377 * the other < 0 number - Something wrong happens
1378 */
1379static noinline int
1380lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
1381                                size_t num_pages, loff_t pos,
1382                                u64 *lockstart, u64 *lockend,
1383                                struct extent_state **cached_state)
1384{
1385        u64 start_pos;
1386        u64 last_pos;
1387        int i;
1388        int ret = 0;
1389
1390        start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
1391        last_pos = start_pos + ((u64)num_pages << PAGE_CACHE_SHIFT) - 1;
1392
1393        if (start_pos < inode->i_size) {
1394                struct btrfs_ordered_extent *ordered;
1395                lock_extent_bits(&BTRFS_I(inode)->io_tree,
1396                                 start_pos, last_pos, cached_state);
1397                ordered = btrfs_lookup_ordered_range(inode, start_pos,
1398                                                     last_pos - start_pos + 1);
1399                if (ordered &&
1400                    ordered->file_offset + ordered->len > start_pos &&
1401                    ordered->file_offset <= last_pos) {
1402                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1403                                             start_pos, last_pos,
1404                                             cached_state, GFP_NOFS);
1405                        for (i = 0; i < num_pages; i++) {
1406                                unlock_page(pages[i]);
1407                                page_cache_release(pages[i]);
1408                        }
1409                        btrfs_start_ordered_extent(inode, ordered, 1);
1410                        btrfs_put_ordered_extent(ordered);
1411                        return -EAGAIN;
1412                }
1413                if (ordered)
1414                        btrfs_put_ordered_extent(ordered);
1415
1416                clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1417                                  last_pos, EXTENT_DIRTY | EXTENT_DELALLOC |
1418                                  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1419                                  0, 0, cached_state, GFP_NOFS);
1420                *lockstart = start_pos;
1421                *lockend = last_pos;
1422                ret = 1;
1423        }
1424
1425        for (i = 0; i < num_pages; i++) {
1426                if (clear_page_dirty_for_io(pages[i]))
1427                        account_page_redirty(pages[i]);
1428                set_page_extent_mapped(pages[i]);
1429                WARN_ON(!PageLocked(pages[i]));
1430        }
1431
1432        return ret;
1433}
1434
1435static noinline int check_can_nocow(struct inode *inode, loff_t pos,
1436                                    size_t *write_bytes)
1437{
1438        struct btrfs_root *root = BTRFS_I(inode)->root;
1439        struct btrfs_ordered_extent *ordered;
1440        u64 lockstart, lockend;
1441        u64 num_bytes;
1442        int ret;
1443
1444        ret = btrfs_start_write_no_snapshoting(root);
1445        if (!ret)
1446                return -ENOSPC;
1447
1448        lockstart = round_down(pos, root->sectorsize);
1449        lockend = round_up(pos + *write_bytes, root->sectorsize) - 1;
1450
1451        while (1) {
1452                lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1453                ordered = btrfs_lookup_ordered_range(inode, lockstart,
1454                                                     lockend - lockstart + 1);
1455                if (!ordered) {
1456                        break;
1457                }
1458                unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1459                btrfs_start_ordered_extent(inode, ordered, 1);
1460                btrfs_put_ordered_extent(ordered);
1461        }
1462
1463        num_bytes = lockend - lockstart + 1;
1464        ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
1465        if (ret <= 0) {
1466                ret = 0;
1467                btrfs_end_write_no_snapshoting(root);
1468        } else {
1469                *write_bytes = min_t(size_t, *write_bytes ,
1470                                     num_bytes - pos + lockstart);
1471        }
1472
1473        unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1474
1475        return ret;
1476}
1477
1478static noinline ssize_t __btrfs_buffered_write(struct file *file,
1479                                               struct iov_iter *i,
1480                                               loff_t pos)
1481{
1482        struct inode *inode = file_inode(file);
1483        struct btrfs_root *root = BTRFS_I(inode)->root;
1484        struct page **pages = NULL;
1485        struct extent_state *cached_state = NULL;
1486        u64 release_bytes = 0;
1487        u64 lockstart;
1488        u64 lockend;
1489        size_t num_written = 0;
1490        int nrptrs;
1491        int ret = 0;
1492        bool only_release_metadata = false;
1493        bool force_page_uptodate = false;
1494        bool need_unlock;
1495
1496        nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE),
1497                        PAGE_CACHE_SIZE / (sizeof(struct page *)));
1498        nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1499        nrptrs = max(nrptrs, 8);
1500        pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1501        if (!pages)
1502                return -ENOMEM;
1503
1504        while (iov_iter_count(i) > 0) {
1505                size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1506                size_t write_bytes = min(iov_iter_count(i),
1507                                         nrptrs * (size_t)PAGE_CACHE_SIZE -
1508                                         offset);
1509                size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1510                                                PAGE_CACHE_SIZE);
1511                size_t reserve_bytes;
1512                size_t dirty_pages;
1513                size_t copied;
1514
1515                WARN_ON(num_pages > nrptrs);
1516
1517                /*
1518                 * Fault pages before locking them in prepare_pages
1519                 * to avoid recursive lock
1520                 */
1521                if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1522                        ret = -EFAULT;
1523                        break;
1524                }
1525
1526                reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
1527
1528                if (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1529                                             BTRFS_INODE_PREALLOC)) {
1530                        ret = check_can_nocow(inode, pos, &write_bytes);
1531                        if (ret < 0)
1532                                break;
1533                        if (ret > 0) {
1534                                /*
1535                                 * For nodata cow case, no need to reserve
1536                                 * data space.
1537                                 */
1538                                only_release_metadata = true;
1539                                /*
1540                                 * our prealloc extent may be smaller than
1541                                 * write_bytes, so scale down.
1542                                 */
1543                                num_pages = DIV_ROUND_UP(write_bytes + offset,
1544                                                         PAGE_CACHE_SIZE);
1545                                reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
1546                                goto reserve_metadata;
1547                        }
1548                }
1549                ret = btrfs_check_data_free_space(inode, pos, write_bytes);
1550                if (ret < 0)
1551                        break;
1552
1553reserve_metadata:
1554                ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
1555                if (ret) {
1556                        if (!only_release_metadata)
1557                                btrfs_free_reserved_data_space(inode, pos,
1558                                                               write_bytes);
1559                        else
1560                                btrfs_end_write_no_snapshoting(root);
1561                        break;
1562                }
1563
1564                release_bytes = reserve_bytes;
1565                need_unlock = false;
1566again:
1567                /*
1568                 * This is going to setup the pages array with the number of
1569                 * pages we want, so we don't really need to worry about the
1570                 * contents of pages from loop to loop
1571                 */
1572                ret = prepare_pages(inode, pages, num_pages,
1573                                    pos, write_bytes,
1574                                    force_page_uptodate);
1575                if (ret)
1576                        break;
1577
1578                ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
1579                                                      pos, &lockstart, &lockend,
1580                                                      &cached_state);
1581                if (ret < 0) {
1582                        if (ret == -EAGAIN)
1583                                goto again;
1584                        break;
1585                } else if (ret > 0) {
1586                        need_unlock = true;
1587                        ret = 0;
1588                }
1589
1590                copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1591
1592                /*
1593                 * if we have trouble faulting in the pages, fall
1594                 * back to one page at a time
1595                 */
1596                if (copied < write_bytes)
1597                        nrptrs = 1;
1598
1599                if (copied == 0) {
1600                        force_page_uptodate = true;
1601                        dirty_pages = 0;
1602                } else {
1603                        force_page_uptodate = false;
1604                        dirty_pages = DIV_ROUND_UP(copied + offset,
1605                                                   PAGE_CACHE_SIZE);
1606                }
1607
1608                /*
1609                 * If we had a short copy we need to release the excess delaloc
1610                 * bytes we reserved.  We need to increment outstanding_extents
1611                 * because btrfs_delalloc_release_space will decrement it, but
1612                 * we still have an outstanding extent for the chunk we actually
1613                 * managed to copy.
1614                 */
1615                if (num_pages > dirty_pages) {
1616                        release_bytes = (num_pages - dirty_pages) <<
1617                                PAGE_CACHE_SHIFT;
1618                        if (copied > 0) {
1619                                spin_lock(&BTRFS_I(inode)->lock);
1620                                BTRFS_I(inode)->outstanding_extents++;
1621                                spin_unlock(&BTRFS_I(inode)->lock);
1622                        }
1623                        if (only_release_metadata) {
1624                                btrfs_delalloc_release_metadata(inode,
1625                                                                release_bytes);
1626                        } else {
1627                                u64 __pos;
1628
1629                                __pos = round_down(pos, root->sectorsize) +
1630                                        (dirty_pages << PAGE_CACHE_SHIFT);
1631                                btrfs_delalloc_release_space(inode, __pos,
1632                                                             release_bytes);
1633                        }
1634                }
1635
1636                release_bytes = dirty_pages << PAGE_CACHE_SHIFT;
1637
1638                if (copied > 0)
1639                        ret = btrfs_dirty_pages(root, inode, pages,
1640                                                dirty_pages, pos, copied,
1641                                                NULL);
1642                if (need_unlock)
1643                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1644                                             lockstart, lockend, &cached_state,
1645                                             GFP_NOFS);
1646                if (ret) {
1647                        btrfs_drop_pages(pages, num_pages);
1648                        break;
1649                }
1650
1651                release_bytes = 0;
1652                if (only_release_metadata)
1653                        btrfs_end_write_no_snapshoting(root);
1654
1655                if (only_release_metadata && copied > 0) {
1656                        lockstart = round_down(pos, root->sectorsize);
1657                        lockend = lockstart +
1658                                (dirty_pages << PAGE_CACHE_SHIFT) - 1;
1659
1660                        set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1661                                       lockend, EXTENT_NORESERVE, NULL,
1662                                       NULL, GFP_NOFS);
1663                        only_release_metadata = false;
1664                }
1665
1666                btrfs_drop_pages(pages, num_pages);
1667
1668                cond_resched();
1669
1670                balance_dirty_pages_ratelimited(inode->i_mapping);
1671                if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1)
1672                        btrfs_btree_balance_dirty(root);
1673
1674                pos += copied;
1675                num_written += copied;
1676        }
1677
1678        kfree(pages);
1679
1680        if (release_bytes) {
1681                if (only_release_metadata) {
1682                        btrfs_end_write_no_snapshoting(root);
1683                        btrfs_delalloc_release_metadata(inode, release_bytes);
1684                } else {
1685                        btrfs_delalloc_release_space(inode, pos, release_bytes);
1686                }
1687        }
1688
1689        return num_written ? num_written : ret;
1690}
1691
1692static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1693                                    struct iov_iter *from,
1694                                    loff_t pos)
1695{
1696        struct file *file = iocb->ki_filp;
1697        struct inode *inode = file_inode(file);
1698        ssize_t written;
1699        ssize_t written_buffered;
1700        loff_t endbyte;
1701        int err;
1702
1703        written = generic_file_direct_write(iocb, from, pos);
1704
1705        if (written < 0 || !iov_iter_count(from))
1706                return written;
1707
1708        pos += written;
1709        written_buffered = __btrfs_buffered_write(file, from, pos);
1710        if (written_buffered < 0) {
1711                err = written_buffered;
1712                goto out;
1713        }
1714        /*
1715         * Ensure all data is persisted. We want the next direct IO read to be
1716         * able to read what was just written.
1717         */
1718        endbyte = pos + written_buffered - 1;
1719        err = btrfs_fdatawrite_range(inode, pos, endbyte);
1720        if (err)
1721                goto out;
1722        err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1723        if (err)
1724                goto out;
1725        written += written_buffered;
1726        iocb->ki_pos = pos + written_buffered;
1727        invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1728                                 endbyte >> PAGE_CACHE_SHIFT);
1729out:
1730        return written ? written : err;
1731}
1732
1733static void update_time_for_write(struct inode *inode)
1734{
1735        struct timespec now;
1736
1737        if (IS_NOCMTIME(inode))
1738                return;
1739
1740        now = current_fs_time(inode->i_sb);
1741        if (!timespec_equal(&inode->i_mtime, &now))
1742                inode->i_mtime = now;
1743
1744        if (!timespec_equal(&inode->i_ctime, &now))
1745                inode->i_ctime = now;
1746
1747        if (IS_I_VERSION(inode))
1748                inode_inc_iversion(inode);
1749}
1750
1751static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1752                                    struct iov_iter *from)
1753{
1754        struct file *file = iocb->ki_filp;
1755        struct inode *inode = file_inode(file);
1756        struct btrfs_root *root = BTRFS_I(inode)->root;
1757        u64 start_pos;
1758        u64 end_pos;
1759        ssize_t num_written = 0;
1760        bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
1761        ssize_t err;
1762        loff_t pos;
1763        size_t count;
1764
1765        inode_lock(inode);
1766        err = generic_write_checks(iocb, from);
1767        if (err <= 0) {
1768                inode_unlock(inode);
1769                return err;
1770        }
1771
1772        current->backing_dev_info = inode_to_bdi(inode);
1773        err = file_remove_privs(file);
1774        if (err) {
1775                inode_unlock(inode);
1776                goto out;
1777        }
1778
1779        /*
1780         * If BTRFS flips readonly due to some impossible error
1781         * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1782         * although we have opened a file as writable, we have
1783         * to stop this write operation to ensure FS consistency.
1784         */
1785        if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
1786                inode_unlock(inode);
1787                err = -EROFS;
1788                goto out;
1789        }
1790
1791        /*
1792         * We reserve space for updating the inode when we reserve space for the
1793         * extent we are going to write, so we will enospc out there.  We don't
1794         * need to start yet another transaction to update the inode as we will
1795         * update the inode when we finish writing whatever data we write.
1796         */
1797        update_time_for_write(inode);
1798
1799        pos = iocb->ki_pos;
1800        count = iov_iter_count(from);
1801        start_pos = round_down(pos, root->sectorsize);
1802        if (start_pos > i_size_read(inode)) {
1803                /* Expand hole size to cover write data, preventing empty gap */
1804                end_pos = round_up(pos + count, root->sectorsize);
1805                err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
1806                if (err) {
1807                        inode_unlock(inode);
1808                        goto out;
1809                }
1810        }
1811
1812        if (sync)
1813                atomic_inc(&BTRFS_I(inode)->sync_writers);
1814
1815        if (iocb->ki_flags & IOCB_DIRECT) {
1816                num_written = __btrfs_direct_write(iocb, from, pos);
1817        } else {
1818                num_written = __btrfs_buffered_write(file, from, pos);
1819                if (num_written > 0)
1820                        iocb->ki_pos = pos + num_written;
1821        }
1822
1823        inode_unlock(inode);
1824
1825        /*
1826         * We also have to set last_sub_trans to the current log transid,
1827         * otherwise subsequent syncs to a file that's been synced in this
1828         * transaction will appear to have already occured.
1829         */
1830        spin_lock(&BTRFS_I(inode)->lock);
1831        BTRFS_I(inode)->last_sub_trans = root->log_transid;
1832        spin_unlock(&BTRFS_I(inode)->lock);
1833        if (num_written > 0) {
1834                err = generic_write_sync(file, pos, num_written);
1835                if (err < 0)
1836                        num_written = err;
1837        }
1838
1839        if (sync)
1840                atomic_dec(&BTRFS_I(inode)->sync_writers);
1841out:
1842        current->backing_dev_info = NULL;
1843        return num_written ? num_written : err;
1844}
1845
1846int btrfs_release_file(struct inode *inode, struct file *filp)
1847{
1848        if (filp->private_data)
1849                btrfs_ioctl_trans_end(filp);
1850        /*
1851         * ordered_data_close is set by settattr when we are about to truncate
1852         * a file from a non-zero size to a zero size.  This tries to
1853         * flush down new bytes that may have been written if the
1854         * application were using truncate to replace a file in place.
1855         */
1856        if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1857                               &BTRFS_I(inode)->runtime_flags))
1858                        filemap_flush(inode->i_mapping);
1859        return 0;
1860}
1861
1862static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1863{
1864        int ret;
1865
1866        atomic_inc(&BTRFS_I(inode)->sync_writers);
1867        ret = btrfs_fdatawrite_range(inode, start, end);
1868        atomic_dec(&BTRFS_I(inode)->sync_writers);
1869
1870        return ret;
1871}
1872
1873/*
1874 * fsync call for both files and directories.  This logs the inode into
1875 * the tree log instead of forcing full commits whenever possible.
1876 *
1877 * It needs to call filemap_fdatawait so that all ordered extent updates are
1878 * in the metadata btree are up to date for copying to the log.
1879 *
1880 * It drops the inode mutex before doing the tree log commit.  This is an
1881 * important optimization for directories because holding the mutex prevents
1882 * new operations on the dir while we write to disk.
1883 */
1884int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1885{
1886        struct dentry *dentry = file->f_path.dentry;
1887        struct inode *inode = d_inode(dentry);
1888        struct btrfs_root *root = BTRFS_I(inode)->root;
1889        struct btrfs_trans_handle *trans;
1890        struct btrfs_log_ctx ctx;
1891        int ret = 0;
1892        bool full_sync = 0;
1893        u64 len;
1894
1895        /*
1896         * The range length can be represented by u64, we have to do the typecasts
1897         * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
1898         */
1899        len = (u64)end - (u64)start + 1;
1900        trace_btrfs_sync_file(file, datasync);
1901
1902        /*
1903         * We write the dirty pages in the range and wait until they complete
1904         * out of the ->i_mutex. If so, we can flush the dirty pages by
1905         * multi-task, and make the performance up.  See
1906         * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1907         */
1908        ret = start_ordered_ops(inode, start, end);
1909        if (ret)
1910                return ret;
1911
1912        inode_lock(inode);
1913        atomic_inc(&root->log_batch);
1914        full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1915                             &BTRFS_I(inode)->runtime_flags);
1916        /*
1917         * We might have have had more pages made dirty after calling
1918         * start_ordered_ops and before acquiring the inode's i_mutex.
1919         */
1920        if (full_sync) {
1921                /*
1922                 * For a full sync, we need to make sure any ordered operations
1923                 * start and finish before we start logging the inode, so that
1924                 * all extents are persisted and the respective file extent
1925                 * items are in the fs/subvol btree.
1926                 */
1927                ret = btrfs_wait_ordered_range(inode, start, len);
1928        } else {
1929                /*
1930                 * Start any new ordered operations before starting to log the
1931                 * inode. We will wait for them to finish in btrfs_sync_log().
1932                 *
1933                 * Right before acquiring the inode's mutex, we might have new
1934                 * writes dirtying pages, which won't immediately start the
1935                 * respective ordered operations - that is done through the
1936                 * fill_delalloc callbacks invoked from the writepage and
1937                 * writepages address space operations. So make sure we start
1938                 * all ordered operations before starting to log our inode. Not
1939                 * doing this means that while logging the inode, writeback
1940                 * could start and invoke writepage/writepages, which would call
1941                 * the fill_delalloc callbacks (cow_file_range,
1942                 * submit_compressed_extents). These callbacks add first an
1943                 * extent map to the modified list of extents and then create
1944                 * the respective ordered operation, which means in
1945                 * tree-log.c:btrfs_log_inode() we might capture all existing
1946                 * ordered operations (with btrfs_get_logged_extents()) before
1947                 * the fill_delalloc callback adds its ordered operation, and by
1948                 * the time we visit the modified list of extent maps (with
1949                 * btrfs_log_changed_extents()), we see and process the extent
1950                 * map they created. We then use the extent map to construct a
1951                 * file extent item for logging without waiting for the
1952                 * respective ordered operation to finish - this file extent
1953                 * item points to a disk location that might not have yet been
1954                 * written to, containing random data - so after a crash a log
1955                 * replay will make our inode have file extent items that point
1956                 * to disk locations containing invalid data, as we returned
1957                 * success to userspace without waiting for the respective
1958                 * ordered operation to finish, because it wasn't captured by
1959                 * btrfs_get_logged_extents().
1960                 */
1961                ret = start_ordered_ops(inode, start, end);
1962        }
1963        if (ret) {
1964                inode_unlock(inode);
1965                goto out;
1966        }
1967        atomic_inc(&root->log_batch);
1968
1969        /*
1970         * If the last transaction that changed this file was before the current
1971         * transaction and we have the full sync flag set in our inode, we can
1972         * bail out now without any syncing.
1973         *
1974         * Note that we can't bail out if the full sync flag isn't set. This is
1975         * because when the full sync flag is set we start all ordered extents
1976         * and wait for them to fully complete - when they complete they update
1977         * the inode's last_trans field through:
1978         *
1979         *     btrfs_finish_ordered_io() ->
1980         *         btrfs_update_inode_fallback() ->
1981         *             btrfs_update_inode() ->
1982         *                 btrfs_set_inode_last_trans()
1983         *
1984         * So we are sure that last_trans is up to date and can do this check to
1985         * bail out safely. For the fast path, when the full sync flag is not
1986         * set in our inode, we can not do it because we start only our ordered
1987         * extents and don't wait for them to complete (that is when
1988         * btrfs_finish_ordered_io runs), so here at this point their last_trans
1989         * value might be less than or equals to fs_info->last_trans_committed,
1990         * and setting a speculative last_trans for an inode when a buffered
1991         * write is made (such as fs_info->generation + 1 for example) would not
1992         * be reliable since after setting the value and before fsync is called
1993         * any number of transactions can start and commit (transaction kthread
1994         * commits the current transaction periodically), and a transaction
1995         * commit does not start nor waits for ordered extents to complete.
1996         */
1997        smp_mb();
1998        if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1999            (BTRFS_I(inode)->last_trans <=
2000             root->fs_info->last_trans_committed &&
2001             (full_sync ||
2002              !btrfs_have_ordered_extents_in_range(inode, start, len)))) {
2003                /*
2004                 * We'v had everything committed since the last time we were
2005                 * modified so clear this flag in case it was set for whatever
2006                 * reason, it's no longer relevant.
2007                 */
2008                clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2009                          &BTRFS_I(inode)->runtime_flags);
2010                inode_unlock(inode);
2011                goto out;
2012        }
2013
2014        /*
2015         * ok we haven't committed the transaction yet, lets do a commit
2016         */
2017        if (file->private_data)
2018                btrfs_ioctl_trans_end(file);
2019
2020        /*
2021         * We use start here because we will need to wait on the IO to complete
2022         * in btrfs_sync_log, which could require joining a transaction (for
2023         * example checking cross references in the nocow path).  If we use join
2024         * here we could get into a situation where we're waiting on IO to
2025         * happen that is blocked on a transaction trying to commit.  With start
2026         * we inc the extwriter counter, so we wait for all extwriters to exit
2027         * before we start blocking join'ers.  This comment is to keep somebody
2028         * from thinking they are super smart and changing this to
2029         * btrfs_join_transaction *cough*Josef*cough*.
2030         */
2031        trans = btrfs_start_transaction(root, 0);
2032        if (IS_ERR(trans)) {
2033                ret = PTR_ERR(trans);
2034                inode_unlock(inode);
2035                goto out;
2036        }
2037        trans->sync = true;
2038
2039        btrfs_init_log_ctx(&ctx);
2040
2041        ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
2042        if (ret < 0) {
2043                /* Fallthrough and commit/free transaction. */
2044                ret = 1;
2045        }
2046
2047        /* we've logged all the items and now have a consistent
2048         * version of the file in the log.  It is possible that
2049         * someone will come in and modify the file, but that's
2050         * fine because the log is consistent on disk, and we
2051         * have references to all of the file's extents
2052         *
2053         * It is possible that someone will come in and log the
2054         * file again, but that will end up using the synchronization
2055         * inside btrfs_sync_log to keep things safe.
2056         */
2057        inode_unlock(inode);
2058
2059        /*
2060         * If any of the ordered extents had an error, just return it to user
2061         * space, so that the application knows some writes didn't succeed and
2062         * can take proper action (retry for e.g.). Blindly committing the
2063         * transaction in this case, would fool userspace that everything was
2064         * successful. And we also want to make sure our log doesn't contain
2065         * file extent items pointing to extents that weren't fully written to -
2066         * just like in the non fast fsync path, where we check for the ordered
2067         * operation's error flag before writing to the log tree and return -EIO
2068         * if any of them had this flag set (btrfs_wait_ordered_range) -
2069         * therefore we need to check for errors in the ordered operations,
2070         * which are indicated by ctx.io_err.
2071         */
2072        if (ctx.io_err) {
2073                btrfs_end_transaction(trans, root);
2074                ret = ctx.io_err;
2075                goto out;
2076        }
2077
2078        if (ret != BTRFS_NO_LOG_SYNC) {
2079                if (!ret) {
2080                        ret = btrfs_sync_log(trans, root, &ctx);
2081                        if (!ret) {
2082                                ret = btrfs_end_transaction(trans, root);
2083                                goto out;
2084                        }
2085                }
2086                if (!full_sync) {
2087                        ret = btrfs_wait_ordered_range(inode, start, len);
2088                        if (ret) {
2089                                btrfs_end_transaction(trans, root);
2090                                goto out;
2091                        }
2092                }
2093                ret = btrfs_commit_transaction(trans, root);
2094        } else {
2095                ret = btrfs_end_transaction(trans, root);
2096        }
2097out:
2098        return ret > 0 ? -EIO : ret;
2099}
2100
2101static const struct vm_operations_struct btrfs_file_vm_ops = {
2102        .fault          = filemap_fault,
2103        .map_pages      = filemap_map_pages,
2104        .page_mkwrite   = btrfs_page_mkwrite,
2105};
2106
2107static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
2108{
2109        struct address_space *mapping = filp->f_mapping;
2110
2111        if (!mapping->a_ops->readpage)
2112                return -ENOEXEC;
2113
2114        file_accessed(filp);
2115        vma->vm_ops = &btrfs_file_vm_ops;
2116
2117        return 0;
2118}
2119
2120static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
2121                          int slot, u64 start, u64 end)
2122{
2123        struct btrfs_file_extent_item *fi;
2124        struct btrfs_key key;
2125
2126        if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2127                return 0;
2128
2129        btrfs_item_key_to_cpu(leaf, &key, slot);
2130        if (key.objectid != btrfs_ino(inode) ||
2131            key.type != BTRFS_EXTENT_DATA_KEY)
2132                return 0;
2133
2134        fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2135
2136        if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2137                return 0;
2138
2139        if (btrfs_file_extent_disk_bytenr(leaf, fi))
2140                return 0;
2141
2142        if (key.offset == end)
2143                return 1;
2144        if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2145                return 1;
2146        return 0;
2147}
2148
2149static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
2150                      struct btrfs_path *path, u64 offset, u64 end)
2151{
2152        struct btrfs_root *root = BTRFS_I(inode)->root;
2153        struct extent_buffer *leaf;
2154        struct btrfs_file_extent_item *fi;
2155        struct extent_map *hole_em;
2156        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2157        struct btrfs_key key;
2158        int ret;
2159
2160        if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
2161                goto out;
2162
2163        key.objectid = btrfs_ino(inode);
2164        key.type = BTRFS_EXTENT_DATA_KEY;
2165        key.offset = offset;
2166
2167        ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2168        if (ret < 0)
2169                return ret;
2170        BUG_ON(!ret);
2171
2172        leaf = path->nodes[0];
2173        if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
2174                u64 num_bytes;
2175
2176                path->slots[0]--;
2177                fi = btrfs_item_ptr(leaf, path->slots[0],
2178                                    struct btrfs_file_extent_item);
2179                num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2180                        end - offset;
2181                btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2182                btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2183                btrfs_set_file_extent_offset(leaf, fi, 0);
2184                btrfs_mark_buffer_dirty(leaf);
2185                goto out;
2186        }
2187
2188        if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2189                u64 num_bytes;
2190
2191                key.offset = offset;
2192                btrfs_set_item_key_safe(root->fs_info, path, &key);
2193                fi = btrfs_item_ptr(leaf, path->slots[0],
2194                                    struct btrfs_file_extent_item);
2195                num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2196                        offset;
2197                btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2198                btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2199                btrfs_set_file_extent_offset(leaf, fi, 0);
2200                btrfs_mark_buffer_dirty(leaf);
2201                goto out;
2202        }
2203        btrfs_release_path(path);
2204
2205        ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
2206                                       0, 0, end - offset, 0, end - offset,
2207                                       0, 0, 0);
2208        if (ret)
2209                return ret;
2210
2211out:
2212        btrfs_release_path(path);
2213
2214        hole_em = alloc_extent_map();
2215        if (!hole_em) {
2216                btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2217                set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2218                        &BTRFS_I(inode)->runtime_flags);
2219        } else {
2220                hole_em->start = offset;
2221                hole_em->len = end - offset;
2222                hole_em->ram_bytes = hole_em->len;
2223                hole_em->orig_start = offset;
2224
2225                hole_em->block_start = EXTENT_MAP_HOLE;
2226                hole_em->block_len = 0;
2227                hole_em->orig_block_len = 0;
2228                hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
2229                hole_em->compress_type = BTRFS_COMPRESS_NONE;
2230                hole_em->generation = trans->transid;
2231
2232                do {
2233                        btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2234                        write_lock(&em_tree->lock);
2235                        ret = add_extent_mapping(em_tree, hole_em, 1);
2236                        write_unlock(&em_tree->lock);
2237                } while (ret == -EEXIST);
2238                free_extent_map(hole_em);
2239                if (ret)
2240                        set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2241                                &BTRFS_I(inode)->runtime_flags);
2242        }
2243
2244        return 0;
2245}
2246
2247/*
2248 * Find a hole extent on given inode and change start/len to the end of hole
2249 * extent.(hole/vacuum extent whose em->start <= start &&
2250 *         em->start + em->len > start)
2251 * When a hole extent is found, return 1 and modify start/len.
2252 */
2253static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2254{
2255        struct extent_map *em;
2256        int ret = 0;
2257
2258        em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0);
2259        if (IS_ERR_OR_NULL(em)) {
2260                if (!em)
2261                        ret = -ENOMEM;
2262                else
2263                        ret = PTR_ERR(em);
2264                return ret;
2265        }
2266
2267        /* Hole or vacuum extent(only exists in no-hole mode) */
2268        if (em->block_start == EXTENT_MAP_HOLE) {
2269                ret = 1;
2270                *len = em->start + em->len > *start + *len ?
2271                       0 : *start + *len - em->start - em->len;
2272                *start = em->start + em->len;
2273        }
2274        free_extent_map(em);
2275        return ret;
2276}
2277
2278static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2279{
2280        struct btrfs_root *root = BTRFS_I(inode)->root;
2281        struct extent_state *cached_state = NULL;
2282        struct btrfs_path *path;
2283        struct btrfs_block_rsv *rsv;
2284        struct btrfs_trans_handle *trans;
2285        u64 lockstart;
2286        u64 lockend;
2287        u64 tail_start;
2288        u64 tail_len;
2289        u64 orig_start = offset;
2290        u64 cur_offset;
2291        u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
2292        u64 drop_end;
2293        int ret = 0;
2294        int err = 0;
2295        unsigned int rsv_count;
2296        bool same_page;
2297        bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
2298        u64 ino_size;
2299        bool truncated_page = false;
2300        bool updated_inode = false;
2301
2302        ret = btrfs_wait_ordered_range(inode, offset, len);
2303        if (ret)
2304                return ret;
2305
2306        inode_lock(inode);
2307        ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE);
2308        ret = find_first_non_hole(inode, &offset, &len);
2309        if (ret < 0)
2310                goto out_only_mutex;
2311        if (ret && !len) {
2312                /* Already in a large hole */
2313                ret = 0;
2314                goto out_only_mutex;
2315        }
2316
2317        lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
2318        lockend = round_down(offset + len,
2319                             BTRFS_I(inode)->root->sectorsize) - 1;
2320        same_page = ((offset >> PAGE_CACHE_SHIFT) ==
2321                    ((offset + len - 1) >> PAGE_CACHE_SHIFT));
2322
2323        /*
2324         * We needn't truncate any page which is beyond the end of the file
2325         * because we are sure there is no data there.
2326         */
2327        /*
2328         * Only do this if we are in the same page and we aren't doing the
2329         * entire page.
2330         */
2331        if (same_page && len < PAGE_CACHE_SIZE) {
2332                if (offset < ino_size) {
2333                        truncated_page = true;
2334                        ret = btrfs_truncate_page(inode, offset, len, 0);
2335                } else {
2336                        ret = 0;
2337                }
2338                goto out_only_mutex;
2339        }
2340
2341        /* zero back part of the first page */
2342        if (offset < ino_size) {
2343                truncated_page = true;
2344                ret = btrfs_truncate_page(inode, offset, 0, 0);
2345                if (ret) {
2346                        inode_unlock(inode);
2347                        return ret;
2348                }
2349        }
2350
2351        /* Check the aligned pages after the first unaligned page,
2352         * if offset != orig_start, which means the first unaligned page
2353         * including serveral following pages are already in holes,
2354         * the extra check can be skipped */
2355        if (offset == orig_start) {
2356                /* after truncate page, check hole again */
2357                len = offset + len - lockstart;
2358                offset = lockstart;
2359                ret = find_first_non_hole(inode, &offset, &len);
2360                if (ret < 0)
2361                        goto out_only_mutex;
2362                if (ret && !len) {
2363                        ret = 0;
2364                        goto out_only_mutex;
2365                }
2366                lockstart = offset;
2367        }
2368
2369        /* Check the tail unaligned part is in a hole */
2370        tail_start = lockend + 1;
2371        tail_len = offset + len - tail_start;
2372        if (tail_len) {
2373                ret = find_first_non_hole(inode, &tail_start, &tail_len);
2374                if (unlikely(ret < 0))
2375                        goto out_only_mutex;
2376                if (!ret) {
2377                        /* zero the front end of the last page */
2378                        if (tail_start + tail_len < ino_size) {
2379                                truncated_page = true;
2380                                ret = btrfs_truncate_page(inode,
2381                                                tail_start + tail_len, 0, 1);
2382                                if (ret)
2383                                        goto out_only_mutex;
2384                        }
2385                }
2386        }
2387
2388        if (lockend < lockstart) {
2389                ret = 0;
2390                goto out_only_mutex;
2391        }
2392
2393        while (1) {
2394                struct btrfs_ordered_extent *ordered;
2395
2396                truncate_pagecache_range(inode, lockstart, lockend);
2397
2398                lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2399                                 &cached_state);
2400                ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2401
2402                /*
2403                 * We need to make sure we have no ordered extents in this range
2404                 * and nobody raced in and read a page in this range, if we did
2405                 * we need to try again.
2406                 */
2407                if ((!ordered ||
2408                    (ordered->file_offset + ordered->len <= lockstart ||
2409                     ordered->file_offset > lockend)) &&
2410                     !btrfs_page_exists_in_range(inode, lockstart, lockend)) {
2411                        if (ordered)
2412                                btrfs_put_ordered_extent(ordered);
2413                        break;
2414                }
2415                if (ordered)
2416                        btrfs_put_ordered_extent(ordered);
2417                unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2418                                     lockend, &cached_state, GFP_NOFS);
2419                ret = btrfs_wait_ordered_range(inode, lockstart,
2420                                               lockend - lockstart + 1);
2421                if (ret) {
2422                        inode_unlock(inode);
2423                        return ret;
2424                }
2425        }
2426
2427        path = btrfs_alloc_path();
2428        if (!path) {
2429                ret = -ENOMEM;
2430                goto out;
2431        }
2432
2433        rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2434        if (!rsv) {
2435                ret = -ENOMEM;
2436                goto out_free;
2437        }
2438        rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
2439        rsv->failfast = 1;
2440
2441        /*
2442         * 1 - update the inode
2443         * 1 - removing the extents in the range
2444         * 1 - adding the hole extent if no_holes isn't set
2445         */
2446        rsv_count = no_holes ? 2 : 3;
2447        trans = btrfs_start_transaction(root, rsv_count);
2448        if (IS_ERR(trans)) {
2449                err = PTR_ERR(trans);
2450                goto out_free;
2451        }
2452
2453        ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
2454                                      min_size);
2455        BUG_ON(ret);
2456        trans->block_rsv = rsv;
2457
2458        cur_offset = lockstart;
2459        len = lockend - cur_offset;
2460        while (cur_offset < lockend) {
2461                ret = __btrfs_drop_extents(trans, root, inode, path,
2462                                           cur_offset, lockend + 1,
2463                                           &drop_end, 1, 0, 0, NULL);
2464                if (ret != -ENOSPC)
2465                        break;
2466
2467                trans->block_rsv = &root->fs_info->trans_block_rsv;
2468
2469                if (cur_offset < ino_size) {
2470                        ret = fill_holes(trans, inode, path, cur_offset,
2471                                         drop_end);
2472                        if (ret) {
2473                                err = ret;
2474                                break;
2475                        }
2476                }
2477
2478                cur_offset = drop_end;
2479
2480                ret = btrfs_update_inode(trans, root, inode);
2481                if (ret) {
2482                        err = ret;
2483                        break;
2484                }
2485
2486                btrfs_end_transaction(trans, root);
2487                btrfs_btree_balance_dirty(root);
2488
2489                trans = btrfs_start_transaction(root, rsv_count);
2490                if (IS_ERR(trans)) {
2491                        ret = PTR_ERR(trans);
2492                        trans = NULL;
2493                        break;
2494                }
2495
2496                ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
2497                                              rsv, min_size);
2498                BUG_ON(ret);    /* shouldn't happen */
2499                trans->block_rsv = rsv;
2500
2501                ret = find_first_non_hole(inode, &cur_offset, &len);
2502                if (unlikely(ret < 0))
2503                        break;
2504                if (ret && !len) {
2505                        ret = 0;
2506                        break;
2507                }
2508        }
2509
2510        if (ret) {
2511                err = ret;
2512                goto out_trans;
2513        }
2514
2515        trans->block_rsv = &root->fs_info->trans_block_rsv;
2516        /*
2517         * If we are using the NO_HOLES feature we might have had already an
2518         * hole that overlaps a part of the region [lockstart, lockend] and
2519         * ends at (or beyond) lockend. Since we have no file extent items to
2520         * represent holes, drop_end can be less than lockend and so we must
2521         * make sure we have an extent map representing the existing hole (the
2522         * call to __btrfs_drop_extents() might have dropped the existing extent
2523         * map representing the existing hole), otherwise the fast fsync path
2524         * will not record the existence of the hole region
2525         * [existing_hole_start, lockend].
2526         */
2527        if (drop_end <= lockend)
2528                drop_end = lockend + 1;
2529        /*
2530         * Don't insert file hole extent item if it's for a range beyond eof
2531         * (because it's useless) or if it represents a 0 bytes range (when
2532         * cur_offset == drop_end).
2533         */
2534        if (cur_offset < ino_size && cur_offset < drop_end) {
2535                ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2536                if (ret) {
2537                        err = ret;
2538                        goto out_trans;
2539                }
2540        }
2541
2542out_trans:
2543        if (!trans)
2544                goto out_free;
2545
2546        inode_inc_iversion(inode);
2547        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2548
2549        trans->block_rsv = &root->fs_info->trans_block_rsv;
2550        ret = btrfs_update_inode(trans, root, inode);
2551        updated_inode = true;
2552        btrfs_end_transaction(trans, root);
2553        btrfs_btree_balance_dirty(root);
2554out_free:
2555        btrfs_free_path(path);
2556        btrfs_free_block_rsv(root, rsv);
2557out:
2558        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2559                             &cached_state, GFP_NOFS);
2560out_only_mutex:
2561        if (!updated_inode && truncated_page && !ret && !err) {
2562                /*
2563                 * If we only end up zeroing part of a page, we still need to
2564                 * update the inode item, so that all the time fields are
2565                 * updated as well as the necessary btrfs inode in memory fields
2566                 * for detecting, at fsync time, if the inode isn't yet in the
2567                 * log tree or it's there but not up to date.
2568                 */
2569                trans = btrfs_start_transaction(root, 1);
2570                if (IS_ERR(trans)) {
2571                        err = PTR_ERR(trans);
2572                } else {
2573                        err = btrfs_update_inode(trans, root, inode);
2574                        ret = btrfs_end_transaction(trans, root);
2575                }
2576        }
2577        inode_unlock(inode);
2578        if (ret && !err)
2579                err = ret;
2580        return err;
2581}
2582
2583/* Helper structure to record which range is already reserved */
2584struct falloc_range {
2585        struct list_head list;
2586        u64 start;
2587        u64 len;
2588};
2589
2590/*
2591 * Helper function to add falloc range
2592 *
2593 * Caller should have locked the larger range of extent containing
2594 * [start, len)
2595 */
2596static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2597{
2598        struct falloc_range *prev = NULL;
2599        struct falloc_range *range = NULL;
2600
2601        if (list_empty(head))
2602                goto insert;
2603
2604        /*
2605         * As fallocate iterate by bytenr order, we only need to check
2606         * the last range.
2607         */
2608        prev = list_entry(head->prev, struct falloc_range, list);
2609        if (prev->start + prev->len == start) {
2610                prev->len += len;
2611                return 0;
2612        }
2613insert:
2614        range = kmalloc(sizeof(*range), GFP_NOFS);
2615        if (!range)
2616                return -ENOMEM;
2617        range->start = start;
2618        range->len = len;
2619        list_add_tail(&range->list, head);
2620        return 0;
2621}
2622
2623static long btrfs_fallocate(struct file *file, int mode,
2624                            loff_t offset, loff_t len)
2625{
2626        struct inode *inode = file_inode(file);
2627        struct extent_state *cached_state = NULL;
2628        struct falloc_range *range;
2629        struct falloc_range *tmp;
2630        struct list_head reserve_list;
2631        u64 cur_offset;
2632        u64 last_byte;
2633        u64 alloc_start;
2634        u64 alloc_end;
2635        u64 alloc_hint = 0;
2636        u64 locked_end;
2637        u64 actual_end = 0;
2638        struct extent_map *em;
2639        int blocksize = BTRFS_I(inode)->root->sectorsize;
2640        int ret;
2641
2642        alloc_start = round_down(offset, blocksize);
2643        alloc_end = round_up(offset + len, blocksize);
2644
2645        /* Make sure we aren't being give some crap mode */
2646        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2647                return -EOPNOTSUPP;
2648
2649        if (mode & FALLOC_FL_PUNCH_HOLE)
2650                return btrfs_punch_hole(inode, offset, len);
2651
2652        /*
2653         * Only trigger disk allocation, don't trigger qgroup reserve
2654         *
2655         * For qgroup space, it will be checked later.
2656         */
2657        ret = btrfs_alloc_data_chunk_ondemand(inode, alloc_end - alloc_start);
2658        if (ret < 0)
2659                return ret;
2660
2661        inode_lock(inode);
2662        ret = inode_newsize_ok(inode, alloc_end);
2663        if (ret)
2664                goto out;
2665
2666        /*
2667         * TODO: Move these two operations after we have checked
2668         * accurate reserved space, or fallocate can still fail but
2669         * with page truncated or size expanded.
2670         *
2671         * But that's a minor problem and won't do much harm BTW.
2672         */
2673        if (alloc_start > inode->i_size) {
2674                ret = btrfs_cont_expand(inode, i_size_read(inode),
2675                                        alloc_start);
2676                if (ret)
2677                        goto out;
2678        } else if (offset + len > inode->i_size) {
2679                /*
2680                 * If we are fallocating from the end of the file onward we
2681                 * need to zero out the end of the page if i_size lands in the
2682                 * middle of a page.
2683                 */
2684                ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
2685                if (ret)
2686                        goto out;
2687        }
2688
2689        /*
2690         * wait for ordered IO before we have any locks.  We'll loop again
2691         * below with the locks held.
2692         */
2693        ret = btrfs_wait_ordered_range(inode, alloc_start,
2694                                       alloc_end - alloc_start);
2695        if (ret)
2696                goto out;
2697
2698        locked_end = alloc_end - 1;
2699        while (1) {
2700                struct btrfs_ordered_extent *ordered;
2701
2702                /* the extent lock is ordered inside the running
2703                 * transaction
2704                 */
2705                lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2706                                 locked_end, &cached_state);
2707                ordered = btrfs_lookup_first_ordered_extent(inode,
2708                                                            alloc_end - 1);
2709                if (ordered &&
2710                    ordered->file_offset + ordered->len > alloc_start &&
2711                    ordered->file_offset < alloc_end) {
2712                        btrfs_put_ordered_extent(ordered);
2713                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2714                                             alloc_start, locked_end,
2715                                             &cached_state, GFP_NOFS);
2716                        /*
2717                         * we can't wait on the range with the transaction
2718                         * running or with the extent lock held
2719                         */
2720                        ret = btrfs_wait_ordered_range(inode, alloc_start,
2721                                                       alloc_end - alloc_start);
2722                        if (ret)
2723                                goto out;
2724                } else {
2725                        if (ordered)
2726                                btrfs_put_ordered_extent(ordered);
2727                        break;
2728                }
2729        }
2730
2731        /* First, check if we exceed the qgroup limit */
2732        INIT_LIST_HEAD(&reserve_list);
2733        cur_offset = alloc_start;
2734        while (1) {
2735                em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2736                                      alloc_end - cur_offset, 0);
2737                if (IS_ERR_OR_NULL(em)) {
2738                        if (!em)
2739                                ret = -ENOMEM;
2740                        else
2741                                ret = PTR_ERR(em);
2742                        break;
2743                }
2744                last_byte = min(extent_map_end(em), alloc_end);
2745                actual_end = min_t(u64, extent_map_end(em), offset + len);
2746                last_byte = ALIGN(last_byte, blocksize);
2747                if (em->block_start == EXTENT_MAP_HOLE ||
2748                    (cur_offset >= inode->i_size &&
2749                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2750                        ret = add_falloc_range(&reserve_list, cur_offset,
2751                                               last_byte - cur_offset);
2752                        if (ret < 0) {
2753                                free_extent_map(em);
2754                                break;
2755                        }
2756                        ret = btrfs_qgroup_reserve_data(inode, cur_offset,
2757                                        last_byte - cur_offset);
2758                        if (ret < 0)
2759                                break;
2760                }
2761                free_extent_map(em);
2762                cur_offset = last_byte;
2763                if (cur_offset >= alloc_end)
2764                        break;
2765        }
2766
2767        /*
2768         * If ret is still 0, means we're OK to fallocate.
2769         * Or just cleanup the list and exit.
2770         */
2771        list_for_each_entry_safe(range, tmp, &reserve_list, list) {
2772                if (!ret)
2773                        ret = btrfs_prealloc_file_range(inode, mode,
2774                                        range->start,
2775                                        range->len, 1 << inode->i_blkbits,
2776                                        offset + len, &alloc_hint);
2777                list_del(&range->list);
2778                kfree(range);
2779        }
2780        if (ret < 0)
2781                goto out_unlock;
2782
2783        if (actual_end > inode->i_size &&
2784            !(mode & FALLOC_FL_KEEP_SIZE)) {
2785                struct btrfs_trans_handle *trans;
2786                struct btrfs_root *root = BTRFS_I(inode)->root;
2787
2788                /*
2789                 * We didn't need to allocate any more space, but we
2790                 * still extended the size of the file so we need to
2791                 * update i_size and the inode item.
2792                 */
2793                trans = btrfs_start_transaction(root, 1);
2794                if (IS_ERR(trans)) {
2795                        ret = PTR_ERR(trans);
2796                } else {
2797                        inode->i_ctime = CURRENT_TIME;
2798                        i_size_write(inode, actual_end);
2799                        btrfs_ordered_update_i_size(inode, actual_end, NULL);
2800                        ret = btrfs_update_inode(trans, root, inode);
2801                        if (ret)
2802                                btrfs_end_transaction(trans, root);
2803                        else
2804                                ret = btrfs_end_transaction(trans, root);
2805                }
2806        }
2807out_unlock:
2808        unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2809                             &cached_state, GFP_NOFS);
2810out:
2811        /*
2812         * As we waited the extent range, the data_rsv_map must be empty
2813         * in the range, as written data range will be released from it.
2814         * And for prealloacted extent, it will also be released when
2815         * its metadata is written.
2816         * So this is completely used as cleanup.
2817         */
2818        btrfs_qgroup_free_data(inode, alloc_start, alloc_end - alloc_start);
2819        inode_unlock(inode);
2820        /* Let go of our reservation. */
2821        btrfs_free_reserved_data_space(inode, alloc_start,
2822                                       alloc_end - alloc_start);
2823        return ret;
2824}
2825
2826static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2827{
2828        struct btrfs_root *root = BTRFS_I(inode)->root;
2829        struct extent_map *em = NULL;
2830        struct extent_state *cached_state = NULL;
2831        u64 lockstart;
2832        u64 lockend;
2833        u64 start;
2834        u64 len;
2835        int ret = 0;
2836
2837        if (inode->i_size == 0)
2838                return -ENXIO;
2839
2840        /*
2841         * *offset can be negative, in this case we start finding DATA/HOLE from
2842         * the very start of the file.
2843         */
2844        start = max_t(loff_t, 0, *offset);
2845
2846        lockstart = round_down(start, root->sectorsize);
2847        lockend = round_up(i_size_read(inode), root->sectorsize);
2848        if (lockend <= lockstart)
2849                lockend = lockstart + root->sectorsize;
2850        lockend--;
2851        len = lockend - lockstart + 1;
2852
2853        lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2854                         &cached_state);
2855
2856        while (start < inode->i_size) {
2857                em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2858                if (IS_ERR(em)) {
2859                        ret = PTR_ERR(em);
2860                        em = NULL;
2861                        break;
2862                }
2863
2864                if (whence == SEEK_HOLE &&
2865                    (em->block_start == EXTENT_MAP_HOLE ||
2866                     test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2867                        break;
2868                else if (whence == SEEK_DATA &&
2869                           (em->block_start != EXTENT_MAP_HOLE &&
2870                            !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2871                        break;
2872
2873                start = em->start + em->len;
2874                free_extent_map(em);
2875                em = NULL;
2876                cond_resched();
2877        }
2878        free_extent_map(em);
2879        if (!ret) {
2880                if (whence == SEEK_DATA && start >= inode->i_size)
2881                        ret = -ENXIO;
2882                else
2883                        *offset = min_t(loff_t, start, inode->i_size);
2884        }
2885        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2886                             &cached_state, GFP_NOFS);
2887        return ret;
2888}
2889
2890static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
2891{
2892        struct inode *inode = file->f_mapping->host;
2893        int ret;
2894
2895        inode_lock(inode);
2896        switch (whence) {
2897        case SEEK_END:
2898        case SEEK_CUR:
2899                offset = generic_file_llseek(file, offset, whence);
2900                goto out;
2901        case SEEK_DATA:
2902        case SEEK_HOLE:
2903                if (offset >= i_size_read(inode)) {
2904                        inode_unlock(inode);
2905                        return -ENXIO;
2906                }
2907
2908                ret = find_desired_extent(inode, &offset, whence);
2909                if (ret) {
2910                        inode_unlock(inode);
2911                        return ret;
2912                }
2913        }
2914
2915        offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2916out:
2917        inode_unlock(inode);
2918        return offset;
2919}
2920
2921const struct file_operations btrfs_file_operations = {
2922        .llseek         = btrfs_file_llseek,
2923        .read_iter      = generic_file_read_iter,
2924        .splice_read    = generic_file_splice_read,
2925        .write_iter     = btrfs_file_write_iter,
2926        .mmap           = btrfs_file_mmap,
2927        .open           = generic_file_open,
2928        .release        = btrfs_release_file,
2929        .fsync          = btrfs_sync_file,
2930        .fallocate      = btrfs_fallocate,
2931        .unlocked_ioctl = btrfs_ioctl,
2932#ifdef CONFIG_COMPAT
2933        .compat_ioctl   = btrfs_ioctl,
2934#endif
2935        .copy_file_range = btrfs_copy_file_range,
2936        .clone_file_range = btrfs_clone_file_range,
2937        .dedupe_file_range = btrfs_dedupe_file_range,
2938};
2939
2940void btrfs_auto_defrag_exit(void)
2941{
2942        if (btrfs_inode_defrag_cachep)
2943                kmem_cache_destroy(btrfs_inode_defrag_cachep);
2944}
2945
2946int btrfs_auto_defrag_init(void)
2947{
2948        btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
2949                                        sizeof(struct inode_defrag), 0,
2950                                        SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2951                                        NULL);
2952        if (!btrfs_inode_defrag_cachep)
2953                return -ENOMEM;
2954
2955        return 0;
2956}
2957
2958int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
2959{
2960        int ret;
2961
2962        /*
2963         * So with compression we will find and lock a dirty page and clear the
2964         * first one as dirty, setup an async extent, and immediately return
2965         * with the entire range locked but with nobody actually marked with
2966         * writeback.  So we can't just filemap_write_and_wait_range() and
2967         * expect it to work since it will just kick off a thread to do the
2968         * actual work.  So we need to call filemap_fdatawrite_range _again_
2969         * since it will wait on the page lock, which won't be unlocked until
2970         * after the pages have been marked as writeback and so we're good to go
2971         * from there.  We have to do this otherwise we'll miss the ordered
2972         * extents and that results in badness.  Please Josef, do not think you
2973         * know better and pull this out at some point in the future, it is
2974         * right and you are wrong.
2975         */
2976        ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
2977        if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
2978                             &BTRFS_I(inode)->runtime_flags))
2979                ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
2980
2981        return ret;
2982}
2983