linux/fs/btrfs/file.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/fs.h>
  20#include <linux/pagemap.h>
  21#include <linux/highmem.h>
  22#include <linux/time.h>
  23#include <linux/init.h>
  24#include <linux/string.h>
  25#include <linux/backing-dev.h>
  26#include <linux/mpage.h>
  27#include <linux/falloc.h>
  28#include <linux/swap.h>
  29#include <linux/writeback.h>
  30#include <linux/statfs.h>
  31#include <linux/compat.h>
  32#include <linux/slab.h>
  33#include "ctree.h"
  34#include "disk-io.h"
  35#include "transaction.h"
  36#include "btrfs_inode.h"
  37#include "ioctl.h"
  38#include "print-tree.h"
  39#include "tree-log.h"
  40#include "locking.h"
  41#include "compat.h"
  42
  43/*
  44 * when auto defrag is enabled we
  45 * queue up these defrag structs to remember which
  46 * inodes need defragging passes
  47 */
  48struct inode_defrag {
  49        struct rb_node rb_node;
  50        /* objectid */
  51        u64 ino;
  52        /*
  53         * transid where the defrag was added, we search for
  54         * extents newer than this
  55         */
  56        u64 transid;
  57
  58        /* root objectid */
  59        u64 root;
  60
  61        /* last offset we were able to defrag */
  62        u64 last_offset;
  63
  64        /* if we've wrapped around back to zero once already */
  65        int cycled;
  66};
  67
  68/* pop a record for an inode into the defrag tree.  The lock
  69 * must be held already
  70 *
  71 * If you're inserting a record for an older transid than an
  72 * existing record, the transid already in the tree is lowered
  73 *
  74 * If an existing record is found the defrag item you
  75 * pass in is freed
  76 */
  77static void __btrfs_add_inode_defrag(struct inode *inode,
  78                                    struct inode_defrag *defrag)
  79{
  80        struct btrfs_root *root = BTRFS_I(inode)->root;
  81        struct inode_defrag *entry;
  82        struct rb_node **p;
  83        struct rb_node *parent = NULL;
  84
  85        p = &root->fs_info->defrag_inodes.rb_node;
  86        while (*p) {
  87                parent = *p;
  88                entry = rb_entry(parent, struct inode_defrag, rb_node);
  89
  90                if (defrag->ino < entry->ino)
  91                        p = &parent->rb_left;
  92                else if (defrag->ino > entry->ino)
  93                        p = &parent->rb_right;
  94                else {
  95                        /* if we're reinserting an entry for
  96                         * an old defrag run, make sure to
  97                         * lower the transid of our existing record
  98                         */
  99                        if (defrag->transid < entry->transid)
 100                                entry->transid = defrag->transid;
 101                        if (defrag->last_offset > entry->last_offset)
 102                                entry->last_offset = defrag->last_offset;
 103                        goto exists;
 104                }
 105        }
 106        BTRFS_I(inode)->in_defrag = 1;
 107        rb_link_node(&defrag->rb_node, parent, p);
 108        rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
 109        return;
 110
 111exists:
 112        kfree(defrag);
 113        return;
 114
 115}
 116
 117/*
 118 * insert a defrag record for this inode if auto defrag is
 119 * enabled
 120 */
 121int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 122                           struct inode *inode)
 123{
 124        struct btrfs_root *root = BTRFS_I(inode)->root;
 125        struct inode_defrag *defrag;
 126        u64 transid;
 127
 128        if (!btrfs_test_opt(root, AUTO_DEFRAG))
 129                return 0;
 130
 131        if (btrfs_fs_closing(root->fs_info))
 132                return 0;
 133
 134        if (BTRFS_I(inode)->in_defrag)
 135                return 0;
 136
 137        if (trans)
 138                transid = trans->transid;
 139        else
 140                transid = BTRFS_I(inode)->root->last_trans;
 141
 142        defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
 143        if (!defrag)
 144                return -ENOMEM;
 145
 146        defrag->ino = btrfs_ino(inode);
 147        defrag->transid = transid;
 148        defrag->root = root->root_key.objectid;
 149
 150        spin_lock(&root->fs_info->defrag_inodes_lock);
 151        if (!BTRFS_I(inode)->in_defrag)
 152                __btrfs_add_inode_defrag(inode, defrag);
 153        else
 154                kfree(defrag);
 155        spin_unlock(&root->fs_info->defrag_inodes_lock);
 156        return 0;
 157}
 158
 159/*
 160 * must be called with the defrag_inodes lock held
 161 */
 162struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino,
 163                                             struct rb_node **next)
 164{
 165        struct inode_defrag *entry = NULL;
 166        struct rb_node *p;
 167        struct rb_node *parent = NULL;
 168
 169        p = info->defrag_inodes.rb_node;
 170        while (p) {
 171                parent = p;
 172                entry = rb_entry(parent, struct inode_defrag, rb_node);
 173
 174                if (ino < entry->ino)
 175                        p = parent->rb_left;
 176                else if (ino > entry->ino)
 177                        p = parent->rb_right;
 178                else
 179                        return entry;
 180        }
 181
 182        if (next) {
 183                while (parent && ino > entry->ino) {
 184                        parent = rb_next(parent);
 185                        entry = rb_entry(parent, struct inode_defrag, rb_node);
 186                }
 187                *next = parent;
 188        }
 189        return NULL;
 190}
 191
 192/*
 193 * run through the list of inodes in the FS that need
 194 * defragging
 195 */
 196int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 197{
 198        struct inode_defrag *defrag;
 199        struct btrfs_root *inode_root;
 200        struct inode *inode;
 201        struct rb_node *n;
 202        struct btrfs_key key;
 203        struct btrfs_ioctl_defrag_range_args range;
 204        u64 first_ino = 0;
 205        int num_defrag;
 206        int defrag_batch = 1024;
 207
 208        memset(&range, 0, sizeof(range));
 209        range.len = (u64)-1;
 210
 211        atomic_inc(&fs_info->defrag_running);
 212        spin_lock(&fs_info->defrag_inodes_lock);
 213        while(1) {
 214                n = NULL;
 215
 216                /* find an inode to defrag */
 217                defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n);
 218                if (!defrag) {
 219                        if (n)
 220                                defrag = rb_entry(n, struct inode_defrag, rb_node);
 221                        else if (first_ino) {
 222                                first_ino = 0;
 223                                continue;
 224                        } else {
 225                                break;
 226                        }
 227                }
 228
 229                /* remove it from the rbtree */
 230                first_ino = defrag->ino + 1;
 231                rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
 232
 233                if (btrfs_fs_closing(fs_info))
 234                        goto next_free;
 235
 236                spin_unlock(&fs_info->defrag_inodes_lock);
 237
 238                /* get the inode */
 239                key.objectid = defrag->root;
 240                btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
 241                key.offset = (u64)-1;
 242                inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
 243                if (IS_ERR(inode_root))
 244                        goto next;
 245
 246                key.objectid = defrag->ino;
 247                btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
 248                key.offset = 0;
 249
 250                inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
 251                if (IS_ERR(inode))
 252                        goto next;
 253
 254                /* do a chunk of defrag */
 255                BTRFS_I(inode)->in_defrag = 0;
 256                range.start = defrag->last_offset;
 257                num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
 258                                               defrag_batch);
 259                /*
 260                 * if we filled the whole defrag batch, there
 261                 * must be more work to do.  Queue this defrag
 262                 * again
 263                 */
 264                if (num_defrag == defrag_batch) {
 265                        defrag->last_offset = range.start;
 266                        __btrfs_add_inode_defrag(inode, defrag);
 267                        /*
 268                         * we don't want to kfree defrag, we added it back to
 269                         * the rbtree
 270                         */
 271                        defrag = NULL;
 272                } else if (defrag->last_offset && !defrag->cycled) {
 273                        /*
 274                         * we didn't fill our defrag batch, but
 275                         * we didn't start at zero.  Make sure we loop
 276                         * around to the start of the file.
 277                         */
 278                        defrag->last_offset = 0;
 279                        defrag->cycled = 1;
 280                        __btrfs_add_inode_defrag(inode, defrag);
 281                        defrag = NULL;
 282                }
 283
 284                iput(inode);
 285next:
 286                spin_lock(&fs_info->defrag_inodes_lock);
 287next_free:
 288                kfree(defrag);
 289        }
 290        spin_unlock(&fs_info->defrag_inodes_lock);
 291
 292        atomic_dec(&fs_info->defrag_running);
 293
 294        /*
 295         * during unmount, we use the transaction_wait queue to
 296         * wait for the defragger to stop
 297         */
 298        wake_up(&fs_info->transaction_wait);
 299        return 0;
 300}
 301
 302/* simple helper to fault in pages and copy.  This should go away
 303 * and be replaced with calls into generic code.
 304 */
 305static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
 306                                         size_t write_bytes,
 307                                         struct page **prepared_pages,
 308                                         struct iov_iter *i)
 309{
 310        size_t copied = 0;
 311        size_t total_copied = 0;
 312        int pg = 0;
 313        int offset = pos & (PAGE_CACHE_SIZE - 1);
 314
 315        while (write_bytes > 0) {
 316                size_t count = min_t(size_t,
 317                                     PAGE_CACHE_SIZE - offset, write_bytes);
 318                struct page *page = prepared_pages[pg];
 319                /*
 320                 * Copy data from userspace to the current page
 321                 *
 322                 * Disable pagefault to avoid recursive lock since
 323                 * the pages are already locked
 324                 */
 325                pagefault_disable();
 326                copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
 327                pagefault_enable();
 328
 329                /* Flush processor's dcache for this page */
 330                flush_dcache_page(page);
 331
 332                /*
 333                 * if we get a partial write, we can end up with
 334                 * partially up to date pages.  These add
 335                 * a lot of complexity, so make sure they don't
 336                 * happen by forcing this copy to be retried.
 337                 *
 338                 * The rest of the btrfs_file_write code will fall
 339                 * back to page at a time copies after we return 0.
 340                 */
 341                if (!PageUptodate(page) && copied < count)
 342                        copied = 0;
 343
 344                iov_iter_advance(i, copied);
 345                write_bytes -= copied;
 346                total_copied += copied;
 347
 348                /* Return to btrfs_file_aio_write to fault page */
 349                if (unlikely(copied == 0))
 350                        break;
 351
 352                if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
 353                        offset += copied;
 354                } else {
 355                        pg++;
 356                        offset = 0;
 357                }
 358        }
 359        return total_copied;
 360}
 361
 362/*
 363 * unlocks pages after btrfs_file_write is done with them
 364 */
 365void btrfs_drop_pages(struct page **pages, size_t num_pages)
 366{
 367        size_t i;
 368        for (i = 0; i < num_pages; i++) {
 369                /* page checked is some magic around finding pages that
 370                 * have been modified without going through btrfs_set_page_dirty
 371                 * clear it here
 372                 */
 373                ClearPageChecked(pages[i]);
 374                unlock_page(pages[i]);
 375                mark_page_accessed(pages[i]);
 376                page_cache_release(pages[i]);
 377        }
 378}
 379
 380/*
 381 * after copy_from_user, pages need to be dirtied and we need to make
 382 * sure holes are created between the current EOF and the start of
 383 * any next extents (if required).
 384 *
 385 * this also makes the decision about creating an inline extent vs
 386 * doing real data extents, marking pages dirty and delalloc as required.
 387 */
 388int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
 389                      struct page **pages, size_t num_pages,
 390                      loff_t pos, size_t write_bytes,
 391                      struct extent_state **cached)
 392{
 393        int err = 0;
 394        int i;
 395        u64 num_bytes;
 396        u64 start_pos;
 397        u64 end_of_last_block;
 398        u64 end_pos = pos + write_bytes;
 399        loff_t isize = i_size_read(inode);
 400
 401        start_pos = pos & ~((u64)root->sectorsize - 1);
 402        num_bytes = (write_bytes + pos - start_pos +
 403                    root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
 404
 405        end_of_last_block = start_pos + num_bytes - 1;
 406        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
 407                                        cached);
 408        if (err)
 409                return err;
 410
 411        for (i = 0; i < num_pages; i++) {
 412                struct page *p = pages[i];
 413                SetPageUptodate(p);
 414                ClearPageChecked(p);
 415                set_page_dirty(p);
 416        }
 417
 418        /*
 419         * we've only changed i_size in ram, and we haven't updated
 420         * the disk i_size.  There is no need to log the inode
 421         * at this time.
 422         */
 423        if (end_pos > isize)
 424                i_size_write(inode, end_pos);
 425        return 0;
 426}
 427
 428/*
 429 * this drops all the extents in the cache that intersect the range
 430 * [start, end].  Existing extents are split as required.
 431 */
 432int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
 433                            int skip_pinned)
 434{
 435        struct extent_map *em;
 436        struct extent_map *split = NULL;
 437        struct extent_map *split2 = NULL;
 438        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 439        u64 len = end - start + 1;
 440        int ret;
 441        int testend = 1;
 442        unsigned long flags;
 443        int compressed = 0;
 444
 445        WARN_ON(end < start);
 446        if (end == (u64)-1) {
 447                len = (u64)-1;
 448                testend = 0;
 449        }
 450        while (1) {
 451                if (!split)
 452                        split = alloc_extent_map();
 453                if (!split2)
 454                        split2 = alloc_extent_map();
 455                BUG_ON(!split || !split2);
 456
 457                write_lock(&em_tree->lock);
 458                em = lookup_extent_mapping(em_tree, start, len);
 459                if (!em) {
 460                        write_unlock(&em_tree->lock);
 461                        break;
 462                }
 463                flags = em->flags;
 464                if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
 465                        if (testend && em->start + em->len >= start + len) {
 466                                free_extent_map(em);
 467                                write_unlock(&em_tree->lock);
 468                                break;
 469                        }
 470                        start = em->start + em->len;
 471                        if (testend)
 472                                len = start + len - (em->start + em->len);
 473                        free_extent_map(em);
 474                        write_unlock(&em_tree->lock);
 475                        continue;
 476                }
 477                compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 478                clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 479                remove_extent_mapping(em_tree, em);
 480
 481                if (em->block_start < EXTENT_MAP_LAST_BYTE &&
 482                    em->start < start) {
 483                        split->start = em->start;
 484                        split->len = start - em->start;
 485                        split->orig_start = em->orig_start;
 486                        split->block_start = em->block_start;
 487
 488                        if (compressed)
 489                                split->block_len = em->block_len;
 490                        else
 491                                split->block_len = split->len;
 492
 493                        split->bdev = em->bdev;
 494                        split->flags = flags;
 495                        split->compress_type = em->compress_type;
 496                        ret = add_extent_mapping(em_tree, split);
 497                        BUG_ON(ret);
 498                        free_extent_map(split);
 499                        split = split2;
 500                        split2 = NULL;
 501                }
 502                if (em->block_start < EXTENT_MAP_LAST_BYTE &&
 503                    testend && em->start + em->len > start + len) {
 504                        u64 diff = start + len - em->start;
 505
 506                        split->start = start + len;
 507                        split->len = em->start + em->len - (start + len);
 508                        split->bdev = em->bdev;
 509                        split->flags = flags;
 510                        split->compress_type = em->compress_type;
 511
 512                        if (compressed) {
 513                                split->block_len = em->block_len;
 514                                split->block_start = em->block_start;
 515                                split->orig_start = em->orig_start;
 516                        } else {
 517                                split->block_len = split->len;
 518                                split->block_start = em->block_start + diff;
 519                                split->orig_start = split->start;
 520                        }
 521
 522                        ret = add_extent_mapping(em_tree, split);
 523                        BUG_ON(ret);
 524                        free_extent_map(split);
 525                        split = NULL;
 526                }
 527                write_unlock(&em_tree->lock);
 528
 529                /* once for us */
 530                free_extent_map(em);
 531                /* once for the tree*/
 532                free_extent_map(em);
 533        }
 534        if (split)
 535                free_extent_map(split);
 536        if (split2)
 537                free_extent_map(split2);
 538        return 0;
 539}
 540
 541/*
 542 * this is very complex, but the basic idea is to drop all extents
 543 * in the range start - end.  hint_block is filled in with a block number
 544 * that would be a good hint to the block allocator for this file.
 545 *
 546 * If an extent intersects the range but is not entirely inside the range
 547 * it is either truncated or split.  Anything entirely inside the range
 548 * is deleted from the tree.
 549 */
 550int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
 551                       u64 start, u64 end, u64 *hint_byte, int drop_cache)
 552{
 553        struct btrfs_root *root = BTRFS_I(inode)->root;
 554        struct extent_buffer *leaf;
 555        struct btrfs_file_extent_item *fi;
 556        struct btrfs_path *path;
 557        struct btrfs_key key;
 558        struct btrfs_key new_key;
 559        u64 ino = btrfs_ino(inode);
 560        u64 search_start = start;
 561        u64 disk_bytenr = 0;
 562        u64 num_bytes = 0;
 563        u64 extent_offset = 0;
 564        u64 extent_end = 0;
 565        int del_nr = 0;
 566        int del_slot = 0;
 567        int extent_type;
 568        int recow;
 569        int ret;
 570
 571        if (drop_cache)
 572                btrfs_drop_extent_cache(inode, start, end - 1, 0);
 573
 574        path = btrfs_alloc_path();
 575        if (!path)
 576                return -ENOMEM;
 577
 578        while (1) {
 579                recow = 0;
 580                ret = btrfs_lookup_file_extent(trans, root, path, ino,
 581                                               search_start, -1);
 582                if (ret < 0)
 583                        break;
 584                if (ret > 0 && path->slots[0] > 0 && search_start == start) {
 585                        leaf = path->nodes[0];
 586                        btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
 587                        if (key.objectid == ino &&
 588                            key.type == BTRFS_EXTENT_DATA_KEY)
 589                                path->slots[0]--;
 590                }
 591                ret = 0;
 592next_slot:
 593                leaf = path->nodes[0];
 594                if (path->slots[0] >= btrfs_header_nritems(leaf)) {
 595                        BUG_ON(del_nr > 0);
 596                        ret = btrfs_next_leaf(root, path);
 597                        if (ret < 0)
 598                                break;
 599                        if (ret > 0) {
 600                                ret = 0;
 601                                break;
 602                        }
 603                        leaf = path->nodes[0];
 604                        recow = 1;
 605                }
 606
 607                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 608                if (key.objectid > ino ||
 609                    key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
 610                        break;
 611
 612                fi = btrfs_item_ptr(leaf, path->slots[0],
 613                                    struct btrfs_file_extent_item);
 614                extent_type = btrfs_file_extent_type(leaf, fi);
 615
 616                if (extent_type == BTRFS_FILE_EXTENT_REG ||
 617                    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 618                        disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 619                        num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 620                        extent_offset = btrfs_file_extent_offset(leaf, fi);
 621                        extent_end = key.offset +
 622                                btrfs_file_extent_num_bytes(leaf, fi);
 623                } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 624                        extent_end = key.offset +
 625                                btrfs_file_extent_inline_len(leaf, fi);
 626                } else {
 627                        WARN_ON(1);
 628                        extent_end = search_start;
 629                }
 630
 631                if (extent_end <= search_start) {
 632                        path->slots[0]++;
 633                        goto next_slot;
 634                }
 635
 636                search_start = max(key.offset, start);
 637                if (recow) {
 638                        btrfs_release_path(path);
 639                        continue;
 640                }
 641
 642                /*
 643                 *     | - range to drop - |
 644                 *  | -------- extent -------- |
 645                 */
 646                if (start > key.offset && end < extent_end) {
 647                        BUG_ON(del_nr > 0);
 648                        BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
 649
 650                        memcpy(&new_key, &key, sizeof(new_key));
 651                        new_key.offset = start;
 652                        ret = btrfs_duplicate_item(trans, root, path,
 653                                                   &new_key);
 654                        if (ret == -EAGAIN) {
 655                                btrfs_release_path(path);
 656                                continue;
 657                        }
 658                        if (ret < 0)
 659                                break;
 660
 661                        leaf = path->nodes[0];
 662                        fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 663                                            struct btrfs_file_extent_item);
 664                        btrfs_set_file_extent_num_bytes(leaf, fi,
 665                                                        start - key.offset);
 666
 667                        fi = btrfs_item_ptr(leaf, path->slots[0],
 668                                            struct btrfs_file_extent_item);
 669
 670                        extent_offset += start - key.offset;
 671                        btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 672                        btrfs_set_file_extent_num_bytes(leaf, fi,
 673                                                        extent_end - start);
 674                        btrfs_mark_buffer_dirty(leaf);
 675
 676                        if (disk_bytenr > 0) {
 677                                ret = btrfs_inc_extent_ref(trans, root,
 678                                                disk_bytenr, num_bytes, 0,
 679                                                root->root_key.objectid,
 680                                                new_key.objectid,
 681                                                start - extent_offset);
 682                                BUG_ON(ret);
 683                                *hint_byte = disk_bytenr;
 684                        }
 685                        key.offset = start;
 686                }
 687                /*
 688                 *  | ---- range to drop ----- |
 689                 *      | -------- extent -------- |
 690                 */
 691                if (start <= key.offset && end < extent_end) {
 692                        BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
 693
 694                        memcpy(&new_key, &key, sizeof(new_key));
 695                        new_key.offset = end;
 696                        btrfs_set_item_key_safe(trans, root, path, &new_key);
 697
 698                        extent_offset += end - key.offset;
 699                        btrfs_set_file_extent_offset(leaf, fi, extent_offset);
 700                        btrfs_set_file_extent_num_bytes(leaf, fi,
 701                                                        extent_end - end);
 702                        btrfs_mark_buffer_dirty(leaf);
 703                        if (disk_bytenr > 0) {
 704                                inode_sub_bytes(inode, end - key.offset);
 705                                *hint_byte = disk_bytenr;
 706                        }
 707                        break;
 708                }
 709
 710                search_start = extent_end;
 711                /*
 712                 *       | ---- range to drop ----- |
 713                 *  | -------- extent -------- |
 714                 */
 715                if (start > key.offset && end >= extent_end) {
 716                        BUG_ON(del_nr > 0);
 717                        BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
 718
 719                        btrfs_set_file_extent_num_bytes(leaf, fi,
 720                                                        start - key.offset);
 721                        btrfs_mark_buffer_dirty(leaf);
 722                        if (disk_bytenr > 0) {
 723                                inode_sub_bytes(inode, extent_end - start);
 724                                *hint_byte = disk_bytenr;
 725                        }
 726                        if (end == extent_end)
 727                                break;
 728
 729                        path->slots[0]++;
 730                        goto next_slot;
 731                }
 732
 733                /*
 734                 *  | ---- range to drop ----- |
 735                 *    | ------ extent ------ |
 736                 */
 737                if (start <= key.offset && end >= extent_end) {
 738                        if (del_nr == 0) {
 739                                del_slot = path->slots[0];
 740                                del_nr = 1;
 741                        } else {
 742                                BUG_ON(del_slot + del_nr != path->slots[0]);
 743                                del_nr++;
 744                        }
 745
 746                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 747                                inode_sub_bytes(inode,
 748                                                extent_end - key.offset);
 749                                extent_end = ALIGN(extent_end,
 750                                                   root->sectorsize);
 751                        } else if (disk_bytenr > 0) {
 752                                ret = btrfs_free_extent(trans, root,
 753                                                disk_bytenr, num_bytes, 0,
 754                                                root->root_key.objectid,
 755                                                key.objectid, key.offset -
 756                                                extent_offset);
 757                                BUG_ON(ret);
 758                                inode_sub_bytes(inode,
 759                                                extent_end - key.offset);
 760                                *hint_byte = disk_bytenr;
 761                        }
 762
 763                        if (end == extent_end)
 764                                break;
 765
 766                        if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
 767                                path->slots[0]++;
 768                                goto next_slot;
 769                        }
 770
 771                        ret = btrfs_del_items(trans, root, path, del_slot,
 772                                              del_nr);
 773                        BUG_ON(ret);
 774
 775                        del_nr = 0;
 776                        del_slot = 0;
 777
 778                        btrfs_release_path(path);
 779                        continue;
 780                }
 781
 782                BUG_ON(1);
 783        }
 784
 785        if (del_nr > 0) {
 786                ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
 787                BUG_ON(ret);
 788        }
 789
 790        btrfs_free_path(path);
 791        return ret;
 792}
 793
 794static int extent_mergeable(struct extent_buffer *leaf, int slot,
 795                            u64 objectid, u64 bytenr, u64 orig_offset,
 796                            u64 *start, u64 *end)
 797{
 798        struct btrfs_file_extent_item *fi;
 799        struct btrfs_key key;
 800        u64 extent_end;
 801
 802        if (slot < 0 || slot >= btrfs_header_nritems(leaf))
 803                return 0;
 804
 805        btrfs_item_key_to_cpu(leaf, &key, slot);
 806        if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
 807                return 0;
 808
 809        fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
 810        if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
 811            btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
 812            btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
 813            btrfs_file_extent_compression(leaf, fi) ||
 814            btrfs_file_extent_encryption(leaf, fi) ||
 815            btrfs_file_extent_other_encoding(leaf, fi))
 816                return 0;
 817
 818        extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
 819        if ((*start && *start != key.offset) || (*end && *end != extent_end))
 820                return 0;
 821
 822        *start = key.offset;
 823        *end = extent_end;
 824        return 1;
 825}
 826
 827/*
 828 * Mark extent in the range start - end as written.
 829 *
 830 * This changes extent type from 'pre-allocated' to 'regular'. If only
 831 * part of extent is marked as written, the extent will be split into
 832 * two or three.
 833 */
 834int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 835                              struct inode *inode, u64 start, u64 end)
 836{
 837        struct btrfs_root *root = BTRFS_I(inode)->root;
 838        struct extent_buffer *leaf;
 839        struct btrfs_path *path;
 840        struct btrfs_file_extent_item *fi;
 841        struct btrfs_key key;
 842        struct btrfs_key new_key;
 843        u64 bytenr;
 844        u64 num_bytes;
 845        u64 extent_end;
 846        u64 orig_offset;
 847        u64 other_start;
 848        u64 other_end;
 849        u64 split;
 850        int del_nr = 0;
 851        int del_slot = 0;
 852        int recow;
 853        int ret;
 854        u64 ino = btrfs_ino(inode);
 855
 856        btrfs_drop_extent_cache(inode, start, end - 1, 0);
 857
 858        path = btrfs_alloc_path();
 859        if (!path)
 860                return -ENOMEM;
 861again:
 862        recow = 0;
 863        split = start;
 864        key.objectid = ino;
 865        key.type = BTRFS_EXTENT_DATA_KEY;
 866        key.offset = split;
 867
 868        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
 869        if (ret < 0)
 870                goto out;
 871        if (ret > 0 && path->slots[0] > 0)
 872                path->slots[0]--;
 873
 874        leaf = path->nodes[0];
 875        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 876        BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
 877        fi = btrfs_item_ptr(leaf, path->slots[0],
 878                            struct btrfs_file_extent_item);
 879        BUG_ON(btrfs_file_extent_type(leaf, fi) !=
 880               BTRFS_FILE_EXTENT_PREALLOC);
 881        extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
 882        BUG_ON(key.offset > start || extent_end < end);
 883
 884        bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 885        num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
 886        orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
 887        memcpy(&new_key, &key, sizeof(new_key));
 888
 889        if (start == key.offset && end < extent_end) {
 890                other_start = 0;
 891                other_end = start;
 892                if (extent_mergeable(leaf, path->slots[0] - 1,
 893                                     ino, bytenr, orig_offset,
 894                                     &other_start, &other_end)) {
 895                        new_key.offset = end;
 896                        btrfs_set_item_key_safe(trans, root, path, &new_key);
 897                        fi = btrfs_item_ptr(leaf, path->slots[0],
 898                                            struct btrfs_file_extent_item);
 899                        btrfs_set_file_extent_num_bytes(leaf, fi,
 900                                                        extent_end - end);
 901                        btrfs_set_file_extent_offset(leaf, fi,
 902                                                     end - orig_offset);
 903                        fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 904                                            struct btrfs_file_extent_item);
 905                        btrfs_set_file_extent_num_bytes(leaf, fi,
 906                                                        end - other_start);
 907                        btrfs_mark_buffer_dirty(leaf);
 908                        goto out;
 909                }
 910        }
 911
 912        if (start > key.offset && end == extent_end) {
 913                other_start = end;
 914                other_end = 0;
 915                if (extent_mergeable(leaf, path->slots[0] + 1,
 916                                     ino, bytenr, orig_offset,
 917                                     &other_start, &other_end)) {
 918                        fi = btrfs_item_ptr(leaf, path->slots[0],
 919                                            struct btrfs_file_extent_item);
 920                        btrfs_set_file_extent_num_bytes(leaf, fi,
 921                                                        start - key.offset);
 922                        path->slots[0]++;
 923                        new_key.offset = start;
 924                        btrfs_set_item_key_safe(trans, root, path, &new_key);
 925
 926                        fi = btrfs_item_ptr(leaf, path->slots[0],
 927                                            struct btrfs_file_extent_item);
 928                        btrfs_set_file_extent_num_bytes(leaf, fi,
 929                                                        other_end - start);
 930                        btrfs_set_file_extent_offset(leaf, fi,
 931                                                     start - orig_offset);
 932                        btrfs_mark_buffer_dirty(leaf);
 933                        goto out;
 934                }
 935        }
 936
 937        while (start > key.offset || end < extent_end) {
 938                if (key.offset == start)
 939                        split = end;
 940
 941                new_key.offset = split;
 942                ret = btrfs_duplicate_item(trans, root, path, &new_key);
 943                if (ret == -EAGAIN) {
 944                        btrfs_release_path(path);
 945                        goto again;
 946                }
 947                BUG_ON(ret < 0);
 948
 949                leaf = path->nodes[0];
 950                fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
 951                                    struct btrfs_file_extent_item);
 952                btrfs_set_file_extent_num_bytes(leaf, fi,
 953                                                split - key.offset);
 954
 955                fi = btrfs_item_ptr(leaf, path->slots[0],
 956                                    struct btrfs_file_extent_item);
 957
 958                btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
 959                btrfs_set_file_extent_num_bytes(leaf, fi,
 960                                                extent_end - split);
 961                btrfs_mark_buffer_dirty(leaf);
 962
 963                ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
 964                                           root->root_key.objectid,
 965                                           ino, orig_offset);
 966                BUG_ON(ret);
 967
 968                if (split == start) {
 969                        key.offset = start;
 970                } else {
 971                        BUG_ON(start != key.offset);
 972                        path->slots[0]--;
 973                        extent_end = end;
 974                }
 975                recow = 1;
 976        }
 977
 978        other_start = end;
 979        other_end = 0;
 980        if (extent_mergeable(leaf, path->slots[0] + 1,
 981                             ino, bytenr, orig_offset,
 982                             &other_start, &other_end)) {
 983                if (recow) {
 984                        btrfs_release_path(path);
 985                        goto again;
 986                }
 987                extent_end = other_end;
 988                del_slot = path->slots[0] + 1;
 989                del_nr++;
 990                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
 991                                        0, root->root_key.objectid,
 992                                        ino, orig_offset);
 993                BUG_ON(ret);
 994        }
 995        other_start = 0;
 996        other_end = start;
 997        if (extent_mergeable(leaf, path->slots[0] - 1,
 998                             ino, bytenr, orig_offset,
 999                             &other_start, &other_end)) {
1000                if (recow) {
1001                        btrfs_release_path(path);
1002                        goto again;
1003                }
1004                key.offset = other_start;
1005                del_slot = path->slots[0];
1006                del_nr++;
1007                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1008                                        0, root->root_key.objectid,
1009                                        ino, orig_offset);
1010                BUG_ON(ret);
1011        }
1012        if (del_nr == 0) {
1013                fi = btrfs_item_ptr(leaf, path->slots[0],
1014                           struct btrfs_file_extent_item);
1015                btrfs_set_file_extent_type(leaf, fi,
1016                                           BTRFS_FILE_EXTENT_REG);
1017                btrfs_mark_buffer_dirty(leaf);
1018        } else {
1019                fi = btrfs_item_ptr(leaf, del_slot - 1,
1020                           struct btrfs_file_extent_item);
1021                btrfs_set_file_extent_type(leaf, fi,
1022                                           BTRFS_FILE_EXTENT_REG);
1023                btrfs_set_file_extent_num_bytes(leaf, fi,
1024                                                extent_end - key.offset);
1025                btrfs_mark_buffer_dirty(leaf);
1026
1027                ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1028                BUG_ON(ret);
1029        }
1030out:
1031        btrfs_free_path(path);
1032        return 0;
1033}
1034
1035/*
1036 * on error we return an unlocked page and the error value
1037 * on success we return a locked page and 0
1038 */
1039static int prepare_uptodate_page(struct page *page, u64 pos,
1040                                 bool force_uptodate)
1041{
1042        int ret = 0;
1043
1044        if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1045            !PageUptodate(page)) {
1046                ret = btrfs_readpage(NULL, page);
1047                if (ret)
1048                        return ret;
1049                lock_page(page);
1050                if (!PageUptodate(page)) {
1051                        unlock_page(page);
1052                        return -EIO;
1053                }
1054        }
1055        return 0;
1056}
1057
1058/*
1059 * this gets pages into the page cache and locks them down, it also properly
1060 * waits for data=ordered extents to finish before allowing the pages to be
1061 * modified.
1062 */
1063static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1064                         struct page **pages, size_t num_pages,
1065                         loff_t pos, unsigned long first_index,
1066                         size_t write_bytes, bool force_uptodate)
1067{
1068        struct extent_state *cached_state = NULL;
1069        int i;
1070        unsigned long index = pos >> PAGE_CACHE_SHIFT;
1071        struct inode *inode = fdentry(file)->d_inode;
1072        gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1073        int err = 0;
1074        int faili = 0;
1075        u64 start_pos;
1076        u64 last_pos;
1077
1078        start_pos = pos & ~((u64)root->sectorsize - 1);
1079        last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1080
1081again:
1082        for (i = 0; i < num_pages; i++) {
1083                pages[i] = find_or_create_page(inode->i_mapping, index + i,
1084                                               mask);
1085                if (!pages[i]) {
1086                        faili = i - 1;
1087                        err = -ENOMEM;
1088                        goto fail;
1089                }
1090
1091                if (i == 0)
1092                        err = prepare_uptodate_page(pages[i], pos,
1093                                                    force_uptodate);
1094                if (i == num_pages - 1)
1095                        err = prepare_uptodate_page(pages[i],
1096                                                    pos + write_bytes, false);
1097                if (err) {
1098                        page_cache_release(pages[i]);
1099                        faili = i - 1;
1100                        goto fail;
1101                }
1102                wait_on_page_writeback(pages[i]);
1103        }
1104        err = 0;
1105        if (start_pos < inode->i_size) {
1106                struct btrfs_ordered_extent *ordered;
1107                lock_extent_bits(&BTRFS_I(inode)->io_tree,
1108                                 start_pos, last_pos - 1, 0, &cached_state,
1109                                 GFP_NOFS);
1110                ordered = btrfs_lookup_first_ordered_extent(inode,
1111                                                            last_pos - 1);
1112                if (ordered &&
1113                    ordered->file_offset + ordered->len > start_pos &&
1114                    ordered->file_offset < last_pos) {
1115                        btrfs_put_ordered_extent(ordered);
1116                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1117                                             start_pos, last_pos - 1,
1118                                             &cached_state, GFP_NOFS);
1119                        for (i = 0; i < num_pages; i++) {
1120                                unlock_page(pages[i]);
1121                                page_cache_release(pages[i]);
1122                        }
1123                        btrfs_wait_ordered_range(inode, start_pos,
1124                                                 last_pos - start_pos);
1125                        goto again;
1126                }
1127                if (ordered)
1128                        btrfs_put_ordered_extent(ordered);
1129
1130                clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1131                                  last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1132                                  EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
1133                                  GFP_NOFS);
1134                unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1135                                     start_pos, last_pos - 1, &cached_state,
1136                                     GFP_NOFS);
1137        }
1138        for (i = 0; i < num_pages; i++) {
1139                clear_page_dirty_for_io(pages[i]);
1140                set_page_extent_mapped(pages[i]);
1141                WARN_ON(!PageLocked(pages[i]));
1142        }
1143        return 0;
1144fail:
1145        while (faili >= 0) {
1146                unlock_page(pages[faili]);
1147                page_cache_release(pages[faili]);
1148                faili--;
1149        }
1150        return err;
1151
1152}
1153
1154static noinline ssize_t __btrfs_buffered_write(struct file *file,
1155                                               struct iov_iter *i,
1156                                               loff_t pos)
1157{
1158        struct inode *inode = fdentry(file)->d_inode;
1159        struct btrfs_root *root = BTRFS_I(inode)->root;
1160        struct page **pages = NULL;
1161        unsigned long first_index;
1162        size_t num_written = 0;
1163        int nrptrs;
1164        int ret = 0;
1165        bool force_page_uptodate = false;
1166
1167        nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1168                     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1169                     (sizeof(struct page *)));
1170        nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1171        nrptrs = max(nrptrs, 8);
1172        pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1173        if (!pages)
1174                return -ENOMEM;
1175
1176        first_index = pos >> PAGE_CACHE_SHIFT;
1177
1178        while (iov_iter_count(i) > 0) {
1179                size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1180                size_t write_bytes = min(iov_iter_count(i),
1181                                         nrptrs * (size_t)PAGE_CACHE_SIZE -
1182                                         offset);
1183                size_t num_pages = (write_bytes + offset +
1184                                    PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1185                size_t dirty_pages;
1186                size_t copied;
1187
1188                WARN_ON(num_pages > nrptrs);
1189
1190                /*
1191                 * Fault pages before locking them in prepare_pages
1192                 * to avoid recursive lock
1193                 */
1194                if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1195                        ret = -EFAULT;
1196                        break;
1197                }
1198
1199                ret = btrfs_delalloc_reserve_space(inode,
1200                                        num_pages << PAGE_CACHE_SHIFT);
1201                if (ret)
1202                        break;
1203
1204                /*
1205                 * This is going to setup the pages array with the number of
1206                 * pages we want, so we don't really need to worry about the
1207                 * contents of pages from loop to loop
1208                 */
1209                ret = prepare_pages(root, file, pages, num_pages,
1210                                    pos, first_index, write_bytes,
1211                                    force_page_uptodate);
1212                if (ret) {
1213                        btrfs_delalloc_release_space(inode,
1214                                        num_pages << PAGE_CACHE_SHIFT);
1215                        break;
1216                }
1217
1218                copied = btrfs_copy_from_user(pos, num_pages,
1219                                           write_bytes, pages, i);
1220
1221                /*
1222                 * if we have trouble faulting in the pages, fall
1223                 * back to one page at a time
1224                 */
1225                if (copied < write_bytes)
1226                        nrptrs = 1;
1227
1228                if (copied == 0) {
1229                        force_page_uptodate = true;
1230                        dirty_pages = 0;
1231                } else {
1232                        force_page_uptodate = false;
1233                        dirty_pages = (copied + offset +
1234                                       PAGE_CACHE_SIZE - 1) >>
1235                                       PAGE_CACHE_SHIFT;
1236                }
1237
1238                /*
1239                 * If we had a short copy we need to release the excess delaloc
1240                 * bytes we reserved.  We need to increment outstanding_extents
1241                 * because btrfs_delalloc_release_space will decrement it, but
1242                 * we still have an outstanding extent for the chunk we actually
1243                 * managed to copy.
1244                 */
1245                if (num_pages > dirty_pages) {
1246                        if (copied > 0) {
1247                                spin_lock(&BTRFS_I(inode)->lock);
1248                                BTRFS_I(inode)->outstanding_extents++;
1249                                spin_unlock(&BTRFS_I(inode)->lock);
1250                        }
1251                        btrfs_delalloc_release_space(inode,
1252                                        (num_pages - dirty_pages) <<
1253                                        PAGE_CACHE_SHIFT);
1254                }
1255
1256                if (copied > 0) {
1257                        ret = btrfs_dirty_pages(root, inode, pages,
1258                                                dirty_pages, pos, copied,
1259                                                NULL);
1260                        if (ret) {
1261                                btrfs_delalloc_release_space(inode,
1262                                        dirty_pages << PAGE_CACHE_SHIFT);
1263                                btrfs_drop_pages(pages, num_pages);
1264                                break;
1265                        }
1266                }
1267
1268                btrfs_drop_pages(pages, num_pages);
1269
1270                cond_resched();
1271
1272                balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1273                                                   dirty_pages);
1274                if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1275                        btrfs_btree_balance_dirty(root, 1);
1276                btrfs_throttle(root);
1277
1278                pos += copied;
1279                num_written += copied;
1280        }
1281
1282        kfree(pages);
1283
1284        return num_written ? num_written : ret;
1285}
1286
1287static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1288                                    const struct iovec *iov,
1289                                    unsigned long nr_segs, loff_t pos,
1290                                    loff_t *ppos, size_t count, size_t ocount)
1291{
1292        struct file *file = iocb->ki_filp;
1293        struct inode *inode = fdentry(file)->d_inode;
1294        struct iov_iter i;
1295        ssize_t written;
1296        ssize_t written_buffered;
1297        loff_t endbyte;
1298        int err;
1299
1300        written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1301                                            count, ocount);
1302
1303        /*
1304         * the generic O_DIRECT will update in-memory i_size after the
1305         * DIOs are done.  But our endio handlers that update the on
1306         * disk i_size never update past the in memory i_size.  So we
1307         * need one more update here to catch any additions to the
1308         * file
1309         */
1310        if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
1311                btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
1312                mark_inode_dirty(inode);
1313        }
1314
1315        if (written < 0 || written == count)
1316                return written;
1317
1318        pos += written;
1319        count -= written;
1320        iov_iter_init(&i, iov, nr_segs, count, written);
1321        written_buffered = __btrfs_buffered_write(file, &i, pos);
1322        if (written_buffered < 0) {
1323                err = written_buffered;
1324                goto out;
1325        }
1326        endbyte = pos + written_buffered - 1;
1327        err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1328        if (err)
1329                goto out;
1330        written += written_buffered;
1331        *ppos = pos + written_buffered;
1332        invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1333                                 endbyte >> PAGE_CACHE_SHIFT);
1334out:
1335        return written ? written : err;
1336}
1337
1338static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1339                                    const struct iovec *iov,
1340                                    unsigned long nr_segs, loff_t pos)
1341{
1342        struct file *file = iocb->ki_filp;
1343        struct inode *inode = fdentry(file)->d_inode;
1344        struct btrfs_root *root = BTRFS_I(inode)->root;
1345        loff_t *ppos = &iocb->ki_pos;
1346        u64 start_pos;
1347        ssize_t num_written = 0;
1348        ssize_t err = 0;
1349        size_t count, ocount;
1350
1351        vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1352
1353        mutex_lock(&inode->i_mutex);
1354
1355        err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1356        if (err) {
1357                mutex_unlock(&inode->i_mutex);
1358                goto out;
1359        }
1360        count = ocount;
1361
1362        current->backing_dev_info = inode->i_mapping->backing_dev_info;
1363        err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1364        if (err) {
1365                mutex_unlock(&inode->i_mutex);
1366                goto out;
1367        }
1368
1369        if (count == 0) {
1370                mutex_unlock(&inode->i_mutex);
1371                goto out;
1372        }
1373
1374        err = file_remove_suid(file);
1375        if (err) {
1376                mutex_unlock(&inode->i_mutex);
1377                goto out;
1378        }
1379
1380        /*
1381         * If BTRFS flips readonly due to some impossible error
1382         * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1383         * although we have opened a file as writable, we have
1384         * to stop this write operation to ensure FS consistency.
1385         */
1386        if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1387                mutex_unlock(&inode->i_mutex);
1388                err = -EROFS;
1389                goto out;
1390        }
1391
1392        err = btrfs_update_time(file);
1393        if (err) {
1394                mutex_unlock(&inode->i_mutex);
1395                goto out;
1396        }
1397        BTRFS_I(inode)->sequence++;
1398
1399        start_pos = round_down(pos, root->sectorsize);
1400        if (start_pos > i_size_read(inode)) {
1401                err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1402                if (err) {
1403                        mutex_unlock(&inode->i_mutex);
1404                        goto out;
1405                }
1406        }
1407
1408        if (unlikely(file->f_flags & O_DIRECT)) {
1409                num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1410                                                   pos, ppos, count, ocount);
1411        } else {
1412                struct iov_iter i;
1413
1414                iov_iter_init(&i, iov, nr_segs, count, num_written);
1415
1416                num_written = __btrfs_buffered_write(file, &i, pos);
1417                if (num_written > 0)
1418                        *ppos = pos + num_written;
1419        }
1420
1421        mutex_unlock(&inode->i_mutex);
1422
1423        /*
1424         * we want to make sure fsync finds this change
1425         * but we haven't joined a transaction running right now.
1426         *
1427         * Later on, someone is sure to update the inode and get the
1428         * real transid recorded.
1429         *
1430         * We set last_trans now to the fs_info generation + 1,
1431         * this will either be one more than the running transaction
1432         * or the generation used for the next transaction if there isn't
1433         * one running right now.
1434         */
1435        BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1436        if (num_written > 0 || num_written == -EIOCBQUEUED) {
1437                err = generic_write_sync(file, pos, num_written);
1438                if (err < 0 && num_written > 0)
1439                        num_written = err;
1440        }
1441out:
1442        current->backing_dev_info = NULL;
1443        return num_written ? num_written : err;
1444}
1445
1446int btrfs_release_file(struct inode *inode, struct file *filp)
1447{
1448        /*
1449         * ordered_data_close is set by settattr when we are about to truncate
1450         * a file from a non-zero size to a zero size.  This tries to
1451         * flush down new bytes that may have been written if the
1452         * application were using truncate to replace a file in place.
1453         */
1454        if (BTRFS_I(inode)->ordered_data_close) {
1455                BTRFS_I(inode)->ordered_data_close = 0;
1456                btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1457                if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1458                        filemap_flush(inode->i_mapping);
1459        }
1460        if (filp->private_data)
1461                btrfs_ioctl_trans_end(filp);
1462        return 0;
1463}
1464
1465/*
1466 * fsync call for both files and directories.  This logs the inode into
1467 * the tree log instead of forcing full commits whenever possible.
1468 *
1469 * It needs to call filemap_fdatawait so that all ordered extent updates are
1470 * in the metadata btree are up to date for copying to the log.
1471 *
1472 * It drops the inode mutex before doing the tree log commit.  This is an
1473 * important optimization for directories because holding the mutex prevents
1474 * new operations on the dir while we write to disk.
1475 */
1476int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1477{
1478        struct dentry *dentry = file->f_path.dentry;
1479        struct inode *inode = dentry->d_inode;
1480        struct btrfs_root *root = BTRFS_I(inode)->root;
1481        int ret = 0;
1482        struct btrfs_trans_handle *trans;
1483
1484        trace_btrfs_sync_file(file, datasync);
1485
1486        ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1487        if (ret)
1488                return ret;
1489        mutex_lock(&inode->i_mutex);
1490
1491        /* we wait first, since the writeback may change the inode */
1492        root->log_batch++;
1493        btrfs_wait_ordered_range(inode, 0, (u64)-1);
1494        root->log_batch++;
1495
1496        /*
1497         * check the transaction that last modified this inode
1498         * and see if its already been committed
1499         */
1500        if (!BTRFS_I(inode)->last_trans) {
1501                mutex_unlock(&inode->i_mutex);
1502                goto out;
1503        }
1504
1505        /*
1506         * if the last transaction that changed this file was before
1507         * the current transaction, we can bail out now without any
1508         * syncing
1509         */
1510        smp_mb();
1511        if (BTRFS_I(inode)->last_trans <=
1512            root->fs_info->last_trans_committed) {
1513                BTRFS_I(inode)->last_trans = 0;
1514                mutex_unlock(&inode->i_mutex);
1515                goto out;
1516        }
1517
1518        /*
1519         * ok we haven't committed the transaction yet, lets do a commit
1520         */
1521        if (file->private_data)
1522                btrfs_ioctl_trans_end(file);
1523
1524        trans = btrfs_start_transaction(root, 0);
1525        if (IS_ERR(trans)) {
1526                ret = PTR_ERR(trans);
1527                mutex_unlock(&inode->i_mutex);
1528                goto out;
1529        }
1530
1531        ret = btrfs_log_dentry_safe(trans, root, dentry);
1532        if (ret < 0) {
1533                mutex_unlock(&inode->i_mutex);
1534                goto out;
1535        }
1536
1537        /* we've logged all the items and now have a consistent
1538         * version of the file in the log.  It is possible that
1539         * someone will come in and modify the file, but that's
1540         * fine because the log is consistent on disk, and we
1541         * have references to all of the file's extents
1542         *
1543         * It is possible that someone will come in and log the
1544         * file again, but that will end up using the synchronization
1545         * inside btrfs_sync_log to keep things safe.
1546         */
1547        mutex_unlock(&inode->i_mutex);
1548
1549        if (ret != BTRFS_NO_LOG_SYNC) {
1550                if (ret > 0) {
1551                        ret = btrfs_commit_transaction(trans, root);
1552                } else {
1553                        ret = btrfs_sync_log(trans, root);
1554                        if (ret == 0)
1555                                ret = btrfs_end_transaction(trans, root);
1556                        else
1557                                ret = btrfs_commit_transaction(trans, root);
1558                }
1559        } else {
1560                ret = btrfs_end_transaction(trans, root);
1561        }
1562out:
1563        return ret > 0 ? -EIO : ret;
1564}
1565
1566static const struct vm_operations_struct btrfs_file_vm_ops = {
1567        .fault          = filemap_fault,
1568        .page_mkwrite   = btrfs_page_mkwrite,
1569};
1570
1571static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
1572{
1573        struct address_space *mapping = filp->f_mapping;
1574
1575        if (!mapping->a_ops->readpage)
1576                return -ENOEXEC;
1577
1578        file_accessed(filp);
1579        vma->vm_ops = &btrfs_file_vm_ops;
1580        vma->vm_flags |= VM_CAN_NONLINEAR;
1581
1582        return 0;
1583}
1584
1585static long btrfs_fallocate(struct file *file, int mode,
1586                            loff_t offset, loff_t len)
1587{
1588        struct inode *inode = file->f_path.dentry->d_inode;
1589        struct extent_state *cached_state = NULL;
1590        u64 cur_offset;
1591        u64 last_byte;
1592        u64 alloc_start;
1593        u64 alloc_end;
1594        u64 alloc_hint = 0;
1595        u64 locked_end;
1596        u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1597        struct extent_map *em;
1598        int ret;
1599
1600        alloc_start = offset & ~mask;
1601        alloc_end =  (offset + len + mask) & ~mask;
1602
1603        /* We only support the FALLOC_FL_KEEP_SIZE mode */
1604        if (mode & ~FALLOC_FL_KEEP_SIZE)
1605                return -EOPNOTSUPP;
1606
1607        /*
1608         * wait for ordered IO before we have any locks.  We'll loop again
1609         * below with the locks held.
1610         */
1611        btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
1612
1613        mutex_lock(&inode->i_mutex);
1614        ret = inode_newsize_ok(inode, alloc_end);
1615        if (ret)
1616                goto out;
1617
1618        if (alloc_start > inode->i_size) {
1619                ret = btrfs_cont_expand(inode, i_size_read(inode),
1620                                        alloc_start);
1621                if (ret)
1622                        goto out;
1623        }
1624
1625        locked_end = alloc_end - 1;
1626        while (1) {
1627                struct btrfs_ordered_extent *ordered;
1628
1629                /* the extent lock is ordered inside the running
1630                 * transaction
1631                 */
1632                lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
1633                                 locked_end, 0, &cached_state, GFP_NOFS);
1634                ordered = btrfs_lookup_first_ordered_extent(inode,
1635                                                            alloc_end - 1);
1636                if (ordered &&
1637                    ordered->file_offset + ordered->len > alloc_start &&
1638                    ordered->file_offset < alloc_end) {
1639                        btrfs_put_ordered_extent(ordered);
1640                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1641                                             alloc_start, locked_end,
1642                                             &cached_state, GFP_NOFS);
1643                        /*
1644                         * we can't wait on the range with the transaction
1645                         * running or with the extent lock held
1646                         */
1647                        btrfs_wait_ordered_range(inode, alloc_start,
1648                                                 alloc_end - alloc_start);
1649                } else {
1650                        if (ordered)
1651                                btrfs_put_ordered_extent(ordered);
1652                        break;
1653                }
1654        }
1655
1656        cur_offset = alloc_start;
1657        while (1) {
1658                u64 actual_end;
1659
1660                em = btrfs_get_extent(inode, NULL, 0, cur_offset,
1661                                      alloc_end - cur_offset, 0);
1662                BUG_ON(IS_ERR_OR_NULL(em));
1663                last_byte = min(extent_map_end(em), alloc_end);
1664                actual_end = min_t(u64, extent_map_end(em), offset + len);
1665                last_byte = (last_byte + mask) & ~mask;
1666
1667                if (em->block_start == EXTENT_MAP_HOLE ||
1668                    (cur_offset >= inode->i_size &&
1669                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
1670
1671                        /*
1672                         * Make sure we have enough space before we do the
1673                         * allocation.
1674                         */
1675                        ret = btrfs_check_data_free_space(inode, last_byte -
1676                                                          cur_offset);
1677                        if (ret) {
1678                                free_extent_map(em);
1679                                break;
1680                        }
1681
1682                        ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
1683                                                        last_byte - cur_offset,
1684                                                        1 << inode->i_blkbits,
1685                                                        offset + len,
1686                                                        &alloc_hint);
1687
1688                        /* Let go of our reservation. */
1689                        btrfs_free_reserved_data_space(inode, last_byte -
1690                                                       cur_offset);
1691                        if (ret < 0) {
1692                                free_extent_map(em);
1693                                break;
1694                        }
1695                } else if (actual_end > inode->i_size &&
1696                           !(mode & FALLOC_FL_KEEP_SIZE)) {
1697                        /*
1698                         * We didn't need to allocate any more space, but we
1699                         * still extended the size of the file so we need to
1700                         * update i_size.
1701                         */
1702                        inode->i_ctime = CURRENT_TIME;
1703                        i_size_write(inode, actual_end);
1704                        btrfs_ordered_update_i_size(inode, actual_end, NULL);
1705                }
1706                free_extent_map(em);
1707
1708                cur_offset = last_byte;
1709                if (cur_offset >= alloc_end) {
1710                        ret = 0;
1711                        break;
1712                }
1713        }
1714        unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
1715                             &cached_state, GFP_NOFS);
1716out:
1717        mutex_unlock(&inode->i_mutex);
1718        return ret;
1719}
1720
1721static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
1722{
1723        struct btrfs_root *root = BTRFS_I(inode)->root;
1724        struct extent_map *em;
1725        struct extent_state *cached_state = NULL;
1726        u64 lockstart = *offset;
1727        u64 lockend = i_size_read(inode);
1728        u64 start = *offset;
1729        u64 orig_start = *offset;
1730        u64 len = i_size_read(inode);
1731        u64 last_end = 0;
1732        int ret = 0;
1733
1734        lockend = max_t(u64, root->sectorsize, lockend);
1735        if (lockend <= lockstart)
1736                lockend = lockstart + root->sectorsize;
1737
1738        len = lockend - lockstart + 1;
1739
1740        len = max_t(u64, len, root->sectorsize);
1741        if (inode->i_size == 0)
1742                return -ENXIO;
1743
1744        lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
1745                         &cached_state, GFP_NOFS);
1746
1747        /*
1748         * Delalloc is such a pain.  If we have a hole and we have pending
1749         * delalloc for a portion of the hole we will get back a hole that
1750         * exists for the entire range since it hasn't been actually written
1751         * yet.  So to take care of this case we need to look for an extent just
1752         * before the position we want in case there is outstanding delalloc
1753         * going on here.
1754         */
1755        if (origin == SEEK_HOLE && start != 0) {
1756                if (start <= root->sectorsize)
1757                        em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
1758                                                     root->sectorsize, 0);
1759                else
1760                        em = btrfs_get_extent_fiemap(inode, NULL, 0,
1761                                                     start - root->sectorsize,
1762                                                     root->sectorsize, 0);
1763                if (IS_ERR(em)) {
1764                        ret = -ENXIO;
1765                        goto out;
1766                }
1767                last_end = em->start + em->len;
1768                if (em->block_start == EXTENT_MAP_DELALLOC)
1769                        last_end = min_t(u64, last_end, inode->i_size);
1770                free_extent_map(em);
1771        }
1772
1773        while (1) {
1774                em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
1775                if (IS_ERR(em)) {
1776                        ret = -ENXIO;
1777                        break;
1778                }
1779
1780                if (em->block_start == EXTENT_MAP_HOLE) {
1781                        if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
1782                                if (last_end <= orig_start) {
1783                                        free_extent_map(em);
1784                                        ret = -ENXIO;
1785                                        break;
1786                                }
1787                        }
1788
1789                        if (origin == SEEK_HOLE) {
1790                                *offset = start;
1791                                free_extent_map(em);
1792                                break;
1793                        }
1794                } else {
1795                        if (origin == SEEK_DATA) {
1796                                if (em->block_start == EXTENT_MAP_DELALLOC) {
1797                                        if (start >= inode->i_size) {
1798                                                free_extent_map(em);
1799                                                ret = -ENXIO;
1800                                                break;
1801                                        }
1802                                }
1803
1804                                *offset = start;
1805                                free_extent_map(em);
1806                                break;
1807                        }
1808                }
1809
1810                start = em->start + em->len;
1811                last_end = em->start + em->len;
1812
1813                if (em->block_start == EXTENT_MAP_DELALLOC)
1814                        last_end = min_t(u64, last_end, inode->i_size);
1815
1816                if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
1817                        free_extent_map(em);
1818                        ret = -ENXIO;
1819                        break;
1820                }
1821                free_extent_map(em);
1822                cond_resched();
1823        }
1824        if (!ret)
1825                *offset = min(*offset, inode->i_size);
1826out:
1827        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1828                             &cached_state, GFP_NOFS);
1829        return ret;
1830}
1831
1832static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
1833{
1834        struct inode *inode = file->f_mapping->host;
1835        int ret;
1836
1837        mutex_lock(&inode->i_mutex);
1838        switch (origin) {
1839        case SEEK_END:
1840        case SEEK_CUR:
1841                offset = generic_file_llseek(file, offset, origin);
1842                goto out;
1843        case SEEK_DATA:
1844        case SEEK_HOLE:
1845                if (offset >= i_size_read(inode)) {
1846                        mutex_unlock(&inode->i_mutex);
1847                        return -ENXIO;
1848                }
1849
1850                ret = find_desired_extent(inode, &offset, origin);
1851                if (ret) {
1852                        mutex_unlock(&inode->i_mutex);
1853                        return ret;
1854                }
1855        }
1856
1857        if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
1858                offset = -EINVAL;
1859                goto out;
1860        }
1861        if (offset > inode->i_sb->s_maxbytes) {
1862                offset = -EINVAL;
1863                goto out;
1864        }
1865
1866        /* Special lock needed here? */
1867        if (offset != file->f_pos) {
1868                file->f_pos = offset;
1869                file->f_version = 0;
1870        }
1871out:
1872        mutex_unlock(&inode->i_mutex);
1873        return offset;
1874}
1875
1876const struct file_operations btrfs_file_operations = {
1877        .llseek         = btrfs_file_llseek,
1878        .read           = do_sync_read,
1879        .write          = do_sync_write,
1880        .aio_read       = generic_file_aio_read,
1881        .splice_read    = generic_file_splice_read,
1882        .aio_write      = btrfs_file_aio_write,
1883        .mmap           = btrfs_file_mmap,
1884        .open           = generic_file_open,
1885        .release        = btrfs_release_file,
1886        .fsync          = btrfs_sync_file,
1887        .fallocate      = btrfs_fallocate,
1888        .unlocked_ioctl = btrfs_ioctl,
1889#ifdef CONFIG_COMPAT
1890        .compat_ioctl   = btrfs_ioctl,
1891#endif
1892};
1893