linux/fs/btrfs/ordered-data.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/gfp.h>
  20#include <linux/slab.h>
  21#include <linux/blkdev.h>
  22#include <linux/writeback.h>
  23#include <linux/pagevec.h>
  24#include "ctree.h"
  25#include "transaction.h"
  26#include "btrfs_inode.h"
  27#include "extent_io.h"
  28
  29static u64 entry_end(struct btrfs_ordered_extent *entry)
  30{
  31        if (entry->file_offset + entry->len < entry->file_offset)
  32                return (u64)-1;
  33        return entry->file_offset + entry->len;
  34}
  35
  36/* returns NULL if the insertion worked, or it returns the node it did find
  37 * in the tree
  38 */
  39static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
  40                                   struct rb_node *node)
  41{
  42        struct rb_node **p = &root->rb_node;
  43        struct rb_node *parent = NULL;
  44        struct btrfs_ordered_extent *entry;
  45
  46        while (*p) {
  47                parent = *p;
  48                entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
  49
  50                if (file_offset < entry->file_offset)
  51                        p = &(*p)->rb_left;
  52                else if (file_offset >= entry_end(entry))
  53                        p = &(*p)->rb_right;
  54                else
  55                        return parent;
  56        }
  57
  58        rb_link_node(node, parent, p);
  59        rb_insert_color(node, root);
  60        return NULL;
  61}
  62
  63/*
  64 * look for a given offset in the tree, and if it can't be found return the
  65 * first lesser offset
  66 */
  67static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
  68                                     struct rb_node **prev_ret)
  69{
  70        struct rb_node *n = root->rb_node;
  71        struct rb_node *prev = NULL;
  72        struct rb_node *test;
  73        struct btrfs_ordered_extent *entry;
  74        struct btrfs_ordered_extent *prev_entry = NULL;
  75
  76        while (n) {
  77                entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  78                prev = n;
  79                prev_entry = entry;
  80
  81                if (file_offset < entry->file_offset)
  82                        n = n->rb_left;
  83                else if (file_offset >= entry_end(entry))
  84                        n = n->rb_right;
  85                else
  86                        return n;
  87        }
  88        if (!prev_ret)
  89                return NULL;
  90
  91        while (prev && file_offset >= entry_end(prev_entry)) {
  92                test = rb_next(prev);
  93                if (!test)
  94                        break;
  95                prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  96                                      rb_node);
  97                if (file_offset < entry_end(prev_entry))
  98                        break;
  99
 100                prev = test;
 101        }
 102        if (prev)
 103                prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
 104                                      rb_node);
 105        while (prev && file_offset < entry_end(prev_entry)) {
 106                test = rb_prev(prev);
 107                if (!test)
 108                        break;
 109                prev_entry = rb_entry(test, struct btrfs_ordered_extent,
 110                                      rb_node);
 111                prev = test;
 112        }
 113        *prev_ret = prev;
 114        return NULL;
 115}
 116
 117/*
 118 * helper to check if a given offset is inside a given entry
 119 */
 120static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
 121{
 122        if (file_offset < entry->file_offset ||
 123            entry->file_offset + entry->len <= file_offset)
 124                return 0;
 125        return 1;
 126}
 127
 128/*
 129 * look find the first ordered struct that has this offset, otherwise
 130 * the first one less than this offset
 131 */
 132static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
 133                                          u64 file_offset)
 134{
 135        struct rb_root *root = &tree->tree;
 136        struct rb_node *prev;
 137        struct rb_node *ret;
 138        struct btrfs_ordered_extent *entry;
 139
 140        if (tree->last) {
 141                entry = rb_entry(tree->last, struct btrfs_ordered_extent,
 142                                 rb_node);
 143                if (offset_in_entry(entry, file_offset))
 144                        return tree->last;
 145        }
 146        ret = __tree_search(root, file_offset, &prev);
 147        if (!ret)
 148                ret = prev;
 149        if (ret)
 150                tree->last = ret;
 151        return ret;
 152}
 153
 154/* allocate and add a new ordered_extent into the per-inode tree.
 155 * file_offset is the logical offset in the file
 156 *
 157 * start is the disk block number of an extent already reserved in the
 158 * extent allocation tree
 159 *
 160 * len is the length of the extent
 161 *
 162 * The tree is given a single reference on the ordered extent that was
 163 * inserted.
 164 */
 165int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 166                             u64 start, u64 len, u64 disk_len, int type)
 167{
 168        struct btrfs_ordered_inode_tree *tree;
 169        struct rb_node *node;
 170        struct btrfs_ordered_extent *entry;
 171
 172        tree = &BTRFS_I(inode)->ordered_tree;
 173        entry = kzalloc(sizeof(*entry), GFP_NOFS);
 174        if (!entry)
 175                return -ENOMEM;
 176
 177        mutex_lock(&tree->mutex);
 178        entry->file_offset = file_offset;
 179        entry->start = start;
 180        entry->len = len;
 181        entry->disk_len = disk_len;
 182        entry->bytes_left = len;
 183        entry->inode = inode;
 184        if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
 185                set_bit(type, &entry->flags);
 186
 187        /* one ref for the tree */
 188        atomic_set(&entry->refs, 1);
 189        init_waitqueue_head(&entry->wait);
 190        INIT_LIST_HEAD(&entry->list);
 191        INIT_LIST_HEAD(&entry->root_extent_list);
 192
 193        node = tree_insert(&tree->tree, file_offset,
 194                           &entry->rb_node);
 195        BUG_ON(node);
 196
 197        spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
 198        list_add_tail(&entry->root_extent_list,
 199                      &BTRFS_I(inode)->root->fs_info->ordered_extents);
 200        spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
 201
 202        mutex_unlock(&tree->mutex);
 203        BUG_ON(node);
 204        return 0;
 205}
 206
 207/*
 208 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
 209 * when an ordered extent is finished.  If the list covers more than one
 210 * ordered extent, it is split across multiples.
 211 */
 212int btrfs_add_ordered_sum(struct inode *inode,
 213                          struct btrfs_ordered_extent *entry,
 214                          struct btrfs_ordered_sum *sum)
 215{
 216        struct btrfs_ordered_inode_tree *tree;
 217
 218        tree = &BTRFS_I(inode)->ordered_tree;
 219        mutex_lock(&tree->mutex);
 220        list_add_tail(&sum->list, &entry->list);
 221        mutex_unlock(&tree->mutex);
 222        return 0;
 223}
 224
 225/*
 226 * this is used to account for finished IO across a given range
 227 * of the file.  The IO should not span ordered extents.  If
 228 * a given ordered_extent is completely done, 1 is returned, otherwise
 229 * 0.
 230 *
 231 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
 232 * to make sure this function only returns 1 once for a given ordered extent.
 233 */
 234int btrfs_dec_test_ordered_pending(struct inode *inode,
 235                                   u64 file_offset, u64 io_size)
 236{
 237        struct btrfs_ordered_inode_tree *tree;
 238        struct rb_node *node;
 239        struct btrfs_ordered_extent *entry;
 240        int ret;
 241
 242        tree = &BTRFS_I(inode)->ordered_tree;
 243        mutex_lock(&tree->mutex);
 244        node = tree_search(tree, file_offset);
 245        if (!node) {
 246                ret = 1;
 247                goto out;
 248        }
 249
 250        entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 251        if (!offset_in_entry(entry, file_offset)) {
 252                ret = 1;
 253                goto out;
 254        }
 255
 256        if (io_size > entry->bytes_left) {
 257                printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
 258                       (unsigned long long)entry->bytes_left,
 259                       (unsigned long long)io_size);
 260        }
 261        entry->bytes_left -= io_size;
 262        if (entry->bytes_left == 0)
 263                ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 264        else
 265                ret = 1;
 266out:
 267        mutex_unlock(&tree->mutex);
 268        return ret == 0;
 269}
 270
 271/*
 272 * used to drop a reference on an ordered extent.  This will free
 273 * the extent if the last reference is dropped
 274 */
 275int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 276{
 277        struct list_head *cur;
 278        struct btrfs_ordered_sum *sum;
 279
 280        if (atomic_dec_and_test(&entry->refs)) {
 281                while (!list_empty(&entry->list)) {
 282                        cur = entry->list.next;
 283                        sum = list_entry(cur, struct btrfs_ordered_sum, list);
 284                        list_del(&sum->list);
 285                        kfree(sum);
 286                }
 287                kfree(entry);
 288        }
 289        return 0;
 290}
 291
 292/*
 293 * remove an ordered extent from the tree.  No references are dropped
 294 * but, anyone waiting on this extent is woken up.
 295 */
 296int btrfs_remove_ordered_extent(struct inode *inode,
 297                                struct btrfs_ordered_extent *entry)
 298{
 299        struct btrfs_ordered_inode_tree *tree;
 300        struct rb_node *node;
 301
 302        tree = &BTRFS_I(inode)->ordered_tree;
 303        mutex_lock(&tree->mutex);
 304        node = &entry->rb_node;
 305        rb_erase(node, &tree->tree);
 306        tree->last = NULL;
 307        set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
 308
 309        spin_lock(&BTRFS_I(inode)->accounting_lock);
 310        BTRFS_I(inode)->outstanding_extents--;
 311        spin_unlock(&BTRFS_I(inode)->accounting_lock);
 312        btrfs_unreserve_metadata_for_delalloc(BTRFS_I(inode)->root,
 313                                              inode, 1);
 314
 315        spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
 316        list_del_init(&entry->root_extent_list);
 317
 318        /*
 319         * we have no more ordered extents for this inode and
 320         * no dirty pages.  We can safely remove it from the
 321         * list of ordered extents
 322         */
 323        if (RB_EMPTY_ROOT(&tree->tree) &&
 324            !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
 325                list_del_init(&BTRFS_I(inode)->ordered_operations);
 326        }
 327        spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
 328
 329        mutex_unlock(&tree->mutex);
 330        wake_up(&entry->wait);
 331        return 0;
 332}
 333
 334/*
 335 * wait for all the ordered extents in a root.  This is done when balancing
 336 * space between drives.
 337 */
 338int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only)
 339{
 340        struct list_head splice;
 341        struct list_head *cur;
 342        struct btrfs_ordered_extent *ordered;
 343        struct inode *inode;
 344
 345        INIT_LIST_HEAD(&splice);
 346
 347        spin_lock(&root->fs_info->ordered_extent_lock);
 348        list_splice_init(&root->fs_info->ordered_extents, &splice);
 349        while (!list_empty(&splice)) {
 350                cur = splice.next;
 351                ordered = list_entry(cur, struct btrfs_ordered_extent,
 352                                     root_extent_list);
 353                if (nocow_only &&
 354                    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
 355                    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
 356                        list_move(&ordered->root_extent_list,
 357                                  &root->fs_info->ordered_extents);
 358                        cond_resched_lock(&root->fs_info->ordered_extent_lock);
 359                        continue;
 360                }
 361
 362                list_del_init(&ordered->root_extent_list);
 363                atomic_inc(&ordered->refs);
 364
 365                /*
 366                 * the inode may be getting freed (in sys_unlink path).
 367                 */
 368                inode = igrab(ordered->inode);
 369
 370                spin_unlock(&root->fs_info->ordered_extent_lock);
 371
 372                if (inode) {
 373                        btrfs_start_ordered_extent(inode, ordered, 1);
 374                        btrfs_put_ordered_extent(ordered);
 375                        iput(inode);
 376                } else {
 377                        btrfs_put_ordered_extent(ordered);
 378                }
 379
 380                spin_lock(&root->fs_info->ordered_extent_lock);
 381        }
 382        spin_unlock(&root->fs_info->ordered_extent_lock);
 383        return 0;
 384}
 385
 386/*
 387 * this is used during transaction commit to write all the inodes
 388 * added to the ordered operation list.  These files must be fully on
 389 * disk before the transaction commits.
 390 *
 391 * we have two modes here, one is to just start the IO via filemap_flush
 392 * and the other is to wait for all the io.  When we wait, we have an
 393 * extra check to make sure the ordered operation list really is empty
 394 * before we return
 395 */
 396int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
 397{
 398        struct btrfs_inode *btrfs_inode;
 399        struct inode *inode;
 400        struct list_head splice;
 401
 402        INIT_LIST_HEAD(&splice);
 403
 404        mutex_lock(&root->fs_info->ordered_operations_mutex);
 405        spin_lock(&root->fs_info->ordered_extent_lock);
 406again:
 407        list_splice_init(&root->fs_info->ordered_operations, &splice);
 408
 409        while (!list_empty(&splice)) {
 410                btrfs_inode = list_entry(splice.next, struct btrfs_inode,
 411                                   ordered_operations);
 412
 413                inode = &btrfs_inode->vfs_inode;
 414
 415                list_del_init(&btrfs_inode->ordered_operations);
 416
 417                /*
 418                 * the inode may be getting freed (in sys_unlink path).
 419                 */
 420                inode = igrab(inode);
 421
 422                if (!wait && inode) {
 423                        list_add_tail(&BTRFS_I(inode)->ordered_operations,
 424                              &root->fs_info->ordered_operations);
 425                }
 426                spin_unlock(&root->fs_info->ordered_extent_lock);
 427
 428                if (inode) {
 429                        if (wait)
 430                                btrfs_wait_ordered_range(inode, 0, (u64)-1);
 431                        else
 432                                filemap_flush(inode->i_mapping);
 433                        iput(inode);
 434                }
 435
 436                cond_resched();
 437                spin_lock(&root->fs_info->ordered_extent_lock);
 438        }
 439        if (wait && !list_empty(&root->fs_info->ordered_operations))
 440                goto again;
 441
 442        spin_unlock(&root->fs_info->ordered_extent_lock);
 443        mutex_unlock(&root->fs_info->ordered_operations_mutex);
 444
 445        return 0;
 446}
 447
 448/*
 449 * Used to start IO or wait for a given ordered extent to finish.
 450 *
 451 * If wait is one, this effectively waits on page writeback for all the pages
 452 * in the extent, and it waits on the io completion code to insert
 453 * metadata into the btree corresponding to the extent
 454 */
 455void btrfs_start_ordered_extent(struct inode *inode,
 456                                       struct btrfs_ordered_extent *entry,
 457                                       int wait)
 458{
 459        u64 start = entry->file_offset;
 460        u64 end = start + entry->len - 1;
 461
 462        /*
 463         * pages in the range can be dirty, clean or writeback.  We
 464         * start IO on any dirty ones so the wait doesn't stall waiting
 465         * for pdflush to find them
 466         */
 467        filemap_fdatawrite_range(inode->i_mapping, start, end);
 468        if (wait) {
 469                wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
 470                                                 &entry->flags));
 471        }
 472}
 473
 474/*
 475 * Used to wait on ordered extents across a large range of bytes.
 476 */
 477int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
 478{
 479        u64 end;
 480        u64 orig_end;
 481        u64 wait_end;
 482        struct btrfs_ordered_extent *ordered;
 483        int found;
 484
 485        if (start + len < start) {
 486                orig_end = INT_LIMIT(loff_t);
 487        } else {
 488                orig_end = start + len - 1;
 489                if (orig_end > INT_LIMIT(loff_t))
 490                        orig_end = INT_LIMIT(loff_t);
 491        }
 492        wait_end = orig_end;
 493again:
 494        /* start IO across the range first to instantiate any delalloc
 495         * extents
 496         */
 497        filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
 498
 499        /* The compression code will leave pages locked but return from
 500         * writepage without setting the page writeback.  Starting again
 501         * with WB_SYNC_ALL will end up waiting for the IO to actually start.
 502         */
 503        filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
 504
 505        filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 506
 507        end = orig_end;
 508        found = 0;
 509        while (1) {
 510                ordered = btrfs_lookup_first_ordered_extent(inode, end);
 511                if (!ordered)
 512                        break;
 513                if (ordered->file_offset > orig_end) {
 514                        btrfs_put_ordered_extent(ordered);
 515                        break;
 516                }
 517                if (ordered->file_offset + ordered->len < start) {
 518                        btrfs_put_ordered_extent(ordered);
 519                        break;
 520                }
 521                found++;
 522                btrfs_start_ordered_extent(inode, ordered, 1);
 523                end = ordered->file_offset;
 524                btrfs_put_ordered_extent(ordered);
 525                if (end == 0 || end == start)
 526                        break;
 527                end--;
 528        }
 529        if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
 530                           EXTENT_DELALLOC, 0, NULL)) {
 531                schedule_timeout(1);
 532                goto again;
 533        }
 534        return 0;
 535}
 536
 537/*
 538 * find an ordered extent corresponding to file_offset.  return NULL if
 539 * nothing is found, otherwise take a reference on the extent and return it
 540 */
 541struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
 542                                                         u64 file_offset)
 543{
 544        struct btrfs_ordered_inode_tree *tree;
 545        struct rb_node *node;
 546        struct btrfs_ordered_extent *entry = NULL;
 547
 548        tree = &BTRFS_I(inode)->ordered_tree;
 549        mutex_lock(&tree->mutex);
 550        node = tree_search(tree, file_offset);
 551        if (!node)
 552                goto out;
 553
 554        entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 555        if (!offset_in_entry(entry, file_offset))
 556                entry = NULL;
 557        if (entry)
 558                atomic_inc(&entry->refs);
 559out:
 560        mutex_unlock(&tree->mutex);
 561        return entry;
 562}
 563
 564/*
 565 * lookup and return any extent before 'file_offset'.  NULL is returned
 566 * if none is found
 567 */
 568struct btrfs_ordered_extent *
 569btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
 570{
 571        struct btrfs_ordered_inode_tree *tree;
 572        struct rb_node *node;
 573        struct btrfs_ordered_extent *entry = NULL;
 574
 575        tree = &BTRFS_I(inode)->ordered_tree;
 576        mutex_lock(&tree->mutex);
 577        node = tree_search(tree, file_offset);
 578        if (!node)
 579                goto out;
 580
 581        entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 582        atomic_inc(&entry->refs);
 583out:
 584        mutex_unlock(&tree->mutex);
 585        return entry;
 586}
 587
 588/*
 589 * After an extent is done, call this to conditionally update the on disk
 590 * i_size.  i_size is updated to cover any fully written part of the file.
 591 */
 592int btrfs_ordered_update_i_size(struct inode *inode,
 593                                struct btrfs_ordered_extent *ordered)
 594{
 595        struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
 596        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 597        u64 disk_i_size;
 598        u64 new_i_size;
 599        u64 i_size_test;
 600        struct rb_node *node;
 601        struct btrfs_ordered_extent *test;
 602
 603        mutex_lock(&tree->mutex);
 604        disk_i_size = BTRFS_I(inode)->disk_i_size;
 605
 606        /*
 607         * if the disk i_size is already at the inode->i_size, or
 608         * this ordered extent is inside the disk i_size, we're done
 609         */
 610        if (disk_i_size >= inode->i_size ||
 611            ordered->file_offset + ordered->len <= disk_i_size) {
 612                goto out;
 613        }
 614
 615        /*
 616         * we can't update the disk_isize if there are delalloc bytes
 617         * between disk_i_size and  this ordered extent
 618         */
 619        if (test_range_bit(io_tree, disk_i_size,
 620                           ordered->file_offset + ordered->len - 1,
 621                           EXTENT_DELALLOC, 0, NULL)) {
 622                goto out;
 623        }
 624        /*
 625         * walk backward from this ordered extent to disk_i_size.
 626         * if we find an ordered extent then we can't update disk i_size
 627         * yet
 628         */
 629        node = &ordered->rb_node;
 630        while (1) {
 631                node = rb_prev(node);
 632                if (!node)
 633                        break;
 634                test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 635                if (test->file_offset + test->len <= disk_i_size)
 636                        break;
 637                if (test->file_offset >= inode->i_size)
 638                        break;
 639                if (test->file_offset >= disk_i_size)
 640                        goto out;
 641        }
 642        new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode));
 643
 644        /*
 645         * at this point, we know we can safely update i_size to at least
 646         * the offset from this ordered extent.  But, we need to
 647         * walk forward and see if ios from higher up in the file have
 648         * finished.
 649         */
 650        node = rb_next(&ordered->rb_node);
 651        i_size_test = 0;
 652        if (node) {
 653                /*
 654                 * do we have an area where IO might have finished
 655                 * between our ordered extent and the next one.
 656                 */
 657                test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 658                if (test->file_offset > entry_end(ordered))
 659                        i_size_test = test->file_offset;
 660        } else {
 661                i_size_test = i_size_read(inode);
 662        }
 663
 664        /*
 665         * i_size_test is the end of a region after this ordered
 666         * extent where there are no ordered extents.  As long as there
 667         * are no delalloc bytes in this area, it is safe to update
 668         * disk_i_size to the end of the region.
 669         */
 670        if (i_size_test > entry_end(ordered) &&
 671            !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1,
 672                           EXTENT_DELALLOC, 0, NULL)) {
 673                new_i_size = min_t(u64, i_size_test, i_size_read(inode));
 674        }
 675        BTRFS_I(inode)->disk_i_size = new_i_size;
 676out:
 677        mutex_unlock(&tree->mutex);
 678        return 0;
 679}
 680
 681/*
 682 * search the ordered extents for one corresponding to 'offset' and
 683 * try to find a checksum.  This is used because we allow pages to
 684 * be reclaimed before their checksum is actually put into the btree
 685 */
 686int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
 687                           u32 *sum)
 688{
 689        struct btrfs_ordered_sum *ordered_sum;
 690        struct btrfs_sector_sum *sector_sums;
 691        struct btrfs_ordered_extent *ordered;
 692        struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
 693        unsigned long num_sectors;
 694        unsigned long i;
 695        u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
 696        int ret = 1;
 697
 698        ordered = btrfs_lookup_ordered_extent(inode, offset);
 699        if (!ordered)
 700                return 1;
 701
 702        mutex_lock(&tree->mutex);
 703        list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
 704                if (disk_bytenr >= ordered_sum->bytenr) {
 705                        num_sectors = ordered_sum->len / sectorsize;
 706                        sector_sums = ordered_sum->sums;
 707                        for (i = 0; i < num_sectors; i++) {
 708                                if (sector_sums[i].bytenr == disk_bytenr) {
 709                                        *sum = sector_sums[i].sum;
 710                                        ret = 0;
 711                                        goto out;
 712                                }
 713                        }
 714                }
 715        }
 716out:
 717        mutex_unlock(&tree->mutex);
 718        btrfs_put_ordered_extent(ordered);
 719        return ret;
 720}
 721
 722
 723/*
 724 * add a given inode to the list of inodes that must be fully on
 725 * disk before a transaction commit finishes.
 726 *
 727 * This basically gives us the ext3 style data=ordered mode, and it is mostly
 728 * used to make sure renamed files are fully on disk.
 729 *
 730 * It is a noop if the inode is already fully on disk.
 731 *
 732 * If trans is not null, we'll do a friendly check for a transaction that
 733 * is already flushing things and force the IO down ourselves.
 734 */
 735int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
 736                                struct btrfs_root *root,
 737                                struct inode *inode)
 738{
 739        u64 last_mod;
 740
 741        last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
 742
 743        /*
 744         * if this file hasn't been changed since the last transaction
 745         * commit, we can safely return without doing anything
 746         */
 747        if (last_mod < root->fs_info->last_trans_committed)
 748                return 0;
 749
 750        /*
 751         * the transaction is already committing.  Just start the IO and
 752         * don't bother with all of this list nonsense
 753         */
 754        if (trans && root->fs_info->running_transaction->blocked) {
 755                btrfs_wait_ordered_range(inode, 0, (u64)-1);
 756                return 0;
 757        }
 758
 759        spin_lock(&root->fs_info->ordered_extent_lock);
 760        if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
 761                list_add_tail(&BTRFS_I(inode)->ordered_operations,
 762                              &root->fs_info->ordered_operations);
 763        }
 764        spin_unlock(&root->fs_info->ordered_extent_lock);
 765
 766        return 0;
 767}
 768