linux/fs/ext4/move_extent.c
<<
>>
Prefs
   1// SPDX-License-Identifier: LGPL-2.1
   2/*
   3 * Copyright (c) 2008,2009 NEC Software Tohoku, Ltd.
   4 * Written by Takashi Sato <t-sato@yk.jp.nec.com>
   5 *            Akira Fujita <a-fujita@rs.jp.nec.com>
   6 */
   7
   8#include <linux/fs.h>
   9#include <linux/quotaops.h>
  10#include <linux/slab.h>
  11#include "ext4_jbd2.h"
  12#include "ext4.h"
  13#include "ext4_extents.h"
  14
  15/**
  16 * get_ext_path - Find an extent path for designated logical block number.
  17 *
  18 * @inode:      an inode which is searched
  19 * @lblock:     logical block number to find an extent path
  20 * @path:       pointer to an extent path pointer (for output)
  21 *
  22 * ext4_find_extent wrapper. Return 0 on success, or a negative error value
  23 * on failure.
  24 */
  25static inline int
  26get_ext_path(struct inode *inode, ext4_lblk_t lblock,
  27                struct ext4_ext_path **ppath)
  28{
  29        struct ext4_ext_path *path;
  30
  31        path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE);
  32        if (IS_ERR(path))
  33                return PTR_ERR(path);
  34        if (path[ext_depth(inode)].p_ext == NULL) {
  35                ext4_ext_drop_refs(path);
  36                kfree(path);
  37                *ppath = NULL;
  38                return -ENODATA;
  39        }
  40        *ppath = path;
  41        return 0;
  42}
  43
  44/**
  45 * ext4_double_down_write_data_sem - Acquire two inodes' write lock
  46 *                                   of i_data_sem
  47 *
  48 * Acquire write lock of i_data_sem of the two inodes
  49 */
  50void
  51ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
  52{
  53        if (first < second) {
  54                down_write(&EXT4_I(first)->i_data_sem);
  55                down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
  56        } else {
  57                down_write(&EXT4_I(second)->i_data_sem);
  58                down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
  59
  60        }
  61}
  62
  63/**
  64 * ext4_double_up_write_data_sem - Release two inodes' write lock of i_data_sem
  65 *
  66 * @orig_inode:         original inode structure to be released its lock first
  67 * @donor_inode:        donor inode structure to be released its lock second
  68 * Release write lock of i_data_sem of two inodes (orig and donor).
  69 */
  70void
  71ext4_double_up_write_data_sem(struct inode *orig_inode,
  72                              struct inode *donor_inode)
  73{
  74        up_write(&EXT4_I(orig_inode)->i_data_sem);
  75        up_write(&EXT4_I(donor_inode)->i_data_sem);
  76}
  77
  78/**
  79 * mext_check_coverage - Check that all extents in range has the same type
  80 *
  81 * @inode:              inode in question
  82 * @from:               block offset of inode
  83 * @count:              block count to be checked
  84 * @unwritten:          extents expected to be unwritten
  85 * @err:                pointer to save error value
  86 *
  87 * Return 1 if all extents in range has expected type, and zero otherwise.
  88 */
  89static int
  90mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
  91                    int unwritten, int *err)
  92{
  93        struct ext4_ext_path *path = NULL;
  94        struct ext4_extent *ext;
  95        int ret = 0;
  96        ext4_lblk_t last = from + count;
  97        while (from < last) {
  98                *err = get_ext_path(inode, from, &path);
  99                if (*err)
 100                        goto out;
 101                ext = path[ext_depth(inode)].p_ext;
 102                if (unwritten != ext4_ext_is_unwritten(ext))
 103                        goto out;
 104                from += ext4_ext_get_actual_len(ext);
 105                ext4_ext_drop_refs(path);
 106        }
 107        ret = 1;
 108out:
 109        ext4_ext_drop_refs(path);
 110        kfree(path);
 111        return ret;
 112}
 113
 114/**
 115 * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2
 116 *
 117 * @inode1:     the inode structure
 118 * @inode2:     the inode structure
 119 * @index1:     page index
 120 * @index2:     page index
 121 * @page:       result page vector
 122 *
 123 * Grab two locked pages for inode's by inode order
 124 */
 125static int
 126mext_page_double_lock(struct inode *inode1, struct inode *inode2,
 127                      pgoff_t index1, pgoff_t index2, struct page *page[2])
 128{
 129        struct address_space *mapping[2];
 130        unsigned fl = AOP_FLAG_NOFS;
 131
 132        BUG_ON(!inode1 || !inode2);
 133        if (inode1 < inode2) {
 134                mapping[0] = inode1->i_mapping;
 135                mapping[1] = inode2->i_mapping;
 136        } else {
 137                pgoff_t tmp = index1;
 138                index1 = index2;
 139                index2 = tmp;
 140                mapping[0] = inode2->i_mapping;
 141                mapping[1] = inode1->i_mapping;
 142        }
 143
 144        page[0] = grab_cache_page_write_begin(mapping[0], index1, fl);
 145        if (!page[0])
 146                return -ENOMEM;
 147
 148        page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
 149        if (!page[1]) {
 150                unlock_page(page[0]);
 151                put_page(page[0]);
 152                return -ENOMEM;
 153        }
 154        /*
 155         * grab_cache_page_write_begin() may not wait on page's writeback if
 156         * BDI not demand that. But it is reasonable to be very conservative
 157         * here and explicitly wait on page's writeback
 158         */
 159        wait_on_page_writeback(page[0]);
 160        wait_on_page_writeback(page[1]);
 161        if (inode1 > inode2)
 162                swap(page[0], page[1]);
 163
 164        return 0;
 165}
 166
 167/* Force page buffers uptodate w/o dropping page's lock */
 168static int
 169mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
 170{
 171        struct inode *inode = page->mapping->host;
 172        sector_t block;
 173        struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
 174        unsigned int blocksize, block_start, block_end;
 175        int i, err,  nr = 0, partial = 0;
 176        BUG_ON(!PageLocked(page));
 177        BUG_ON(PageWriteback(page));
 178
 179        if (PageUptodate(page))
 180                return 0;
 181
 182        blocksize = i_blocksize(inode);
 183        if (!page_has_buffers(page))
 184                create_empty_buffers(page, blocksize, 0);
 185
 186        head = page_buffers(page);
 187        block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
 188        for (bh = head, block_start = 0; bh != head || !block_start;
 189             block++, block_start = block_end, bh = bh->b_this_page) {
 190                block_end = block_start + blocksize;
 191                if (block_end <= from || block_start >= to) {
 192                        if (!buffer_uptodate(bh))
 193                                partial = 1;
 194                        continue;
 195                }
 196                if (buffer_uptodate(bh))
 197                        continue;
 198                if (!buffer_mapped(bh)) {
 199                        err = ext4_get_block(inode, block, bh, 0);
 200                        if (err) {
 201                                SetPageError(page);
 202                                return err;
 203                        }
 204                        if (!buffer_mapped(bh)) {
 205                                zero_user(page, block_start, blocksize);
 206                                set_buffer_uptodate(bh);
 207                                continue;
 208                        }
 209                }
 210                BUG_ON(nr >= MAX_BUF_PER_PAGE);
 211                arr[nr++] = bh;
 212        }
 213        /* No io required */
 214        if (!nr)
 215                goto out;
 216
 217        for (i = 0; i < nr; i++) {
 218                bh = arr[i];
 219                if (!bh_uptodate_or_lock(bh)) {
 220                        err = bh_submit_read(bh);
 221                        if (err)
 222                                return err;
 223                }
 224        }
 225out:
 226        if (!partial)
 227                SetPageUptodate(page);
 228        return 0;
 229}
 230
 231/**
 232 * move_extent_per_page - Move extent data per page
 233 *
 234 * @o_filp:                     file structure of original file
 235 * @donor_inode:                donor inode
 236 * @orig_page_offset:           page index on original file
 237 * @donor_page_offset:          page index on donor file
 238 * @data_offset_in_page:        block index where data swapping starts
 239 * @block_len_in_page:          the number of blocks to be swapped
 240 * @unwritten:                  orig extent is unwritten or not
 241 * @err:                        pointer to save return value
 242 *
 243 * Save the data in original inode blocks and replace original inode extents
 244 * with donor inode extents by calling ext4_swap_extents().
 245 * Finally, write out the saved data in new original inode blocks. Return
 246 * replaced block count.
 247 */
 248static int
 249move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 250                     pgoff_t orig_page_offset, pgoff_t donor_page_offset,
 251                     int data_offset_in_page,
 252                     int block_len_in_page, int unwritten, int *err)
 253{
 254        struct inode *orig_inode = file_inode(o_filp);
 255        struct page *pagep[2] = {NULL, NULL};
 256        handle_t *handle;
 257        ext4_lblk_t orig_blk_offset, donor_blk_offset;
 258        unsigned long blocksize = orig_inode->i_sb->s_blocksize;
 259        unsigned int tmp_data_size, data_size, replaced_size;
 260        int i, err2, jblocks, retries = 0;
 261        int replaced_count = 0;
 262        int from = data_offset_in_page << orig_inode->i_blkbits;
 263        int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
 264        struct super_block *sb = orig_inode->i_sb;
 265        struct buffer_head *bh = NULL;
 266
 267        /*
 268         * It needs twice the amount of ordinary journal buffers because
 269         * inode and donor_inode may change each different metadata blocks.
 270         */
 271again:
 272        *err = 0;
 273        jblocks = ext4_writepage_trans_blocks(orig_inode) * 2;
 274        handle = ext4_journal_start(orig_inode, EXT4_HT_MOVE_EXTENTS, jblocks);
 275        if (IS_ERR(handle)) {
 276                *err = PTR_ERR(handle);
 277                return 0;
 278        }
 279
 280        orig_blk_offset = orig_page_offset * blocks_per_page +
 281                data_offset_in_page;
 282
 283        donor_blk_offset = donor_page_offset * blocks_per_page +
 284                data_offset_in_page;
 285
 286        /* Calculate data_size */
 287        if ((orig_blk_offset + block_len_in_page - 1) ==
 288            ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
 289                /* Replace the last block */
 290                tmp_data_size = orig_inode->i_size & (blocksize - 1);
 291                /*
 292                 * If data_size equal zero, it shows data_size is multiples of
 293                 * blocksize. So we set appropriate value.
 294                 */
 295                if (tmp_data_size == 0)
 296                        tmp_data_size = blocksize;
 297
 298                data_size = tmp_data_size +
 299                        ((block_len_in_page - 1) << orig_inode->i_blkbits);
 300        } else
 301                data_size = block_len_in_page << orig_inode->i_blkbits;
 302
 303        replaced_size = data_size;
 304
 305        *err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
 306                                     donor_page_offset, pagep);
 307        if (unlikely(*err < 0))
 308                goto stop_journal;
 309        /*
 310         * If orig extent was unwritten it can become initialized
 311         * at any time after i_data_sem was dropped, in order to
 312         * serialize with delalloc we have recheck extent while we
 313         * hold page's lock, if it is still the case data copy is not
 314         * necessary, just swap data blocks between orig and donor.
 315         */
 316        if (unwritten) {
 317                ext4_double_down_write_data_sem(orig_inode, donor_inode);
 318                /* If any of extents in range became initialized we have to
 319                 * fallback to data copying */
 320                unwritten = mext_check_coverage(orig_inode, orig_blk_offset,
 321                                                block_len_in_page, 1, err);
 322                if (*err)
 323                        goto drop_data_sem;
 324
 325                unwritten &= mext_check_coverage(donor_inode, donor_blk_offset,
 326                                                 block_len_in_page, 1, err);
 327                if (*err)
 328                        goto drop_data_sem;
 329
 330                if (!unwritten) {
 331                        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 332                        goto data_copy;
 333                }
 334                if ((page_has_private(pagep[0]) &&
 335                     !try_to_release_page(pagep[0], 0)) ||
 336                    (page_has_private(pagep[1]) &&
 337                     !try_to_release_page(pagep[1], 0))) {
 338                        *err = -EBUSY;
 339                        goto drop_data_sem;
 340                }
 341                replaced_count = ext4_swap_extents(handle, orig_inode,
 342                                                   donor_inode, orig_blk_offset,
 343                                                   donor_blk_offset,
 344                                                   block_len_in_page, 1, err);
 345        drop_data_sem:
 346                ext4_double_up_write_data_sem(orig_inode, donor_inode);
 347                goto unlock_pages;
 348        }
 349data_copy:
 350        *err = mext_page_mkuptodate(pagep[0], from, from + replaced_size);
 351        if (*err)
 352                goto unlock_pages;
 353
 354        /* At this point all buffers in range are uptodate, old mapping layout
 355         * is no longer required, try to drop it now. */
 356        if ((page_has_private(pagep[0]) && !try_to_release_page(pagep[0], 0)) ||
 357            (page_has_private(pagep[1]) && !try_to_release_page(pagep[1], 0))) {
 358                *err = -EBUSY;
 359                goto unlock_pages;
 360        }
 361        ext4_double_down_write_data_sem(orig_inode, donor_inode);
 362        replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode,
 363                                               orig_blk_offset, donor_blk_offset,
 364                                           block_len_in_page, 1, err);
 365        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 366        if (*err) {
 367                if (replaced_count) {
 368                        block_len_in_page = replaced_count;
 369                        replaced_size =
 370                                block_len_in_page << orig_inode->i_blkbits;
 371                } else
 372                        goto unlock_pages;
 373        }
 374        /* Perform all necessary steps similar write_begin()/write_end()
 375         * but keeping in mind that i_size will not change */
 376        if (!page_has_buffers(pagep[0]))
 377                create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
 378        bh = page_buffers(pagep[0]);
 379        for (i = 0; i < data_offset_in_page; i++)
 380                bh = bh->b_this_page;
 381        for (i = 0; i < block_len_in_page; i++) {
 382                *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
 383                if (*err < 0)
 384                        break;
 385                bh = bh->b_this_page;
 386        }
 387        if (!*err)
 388                *err = block_commit_write(pagep[0], from, from + replaced_size);
 389
 390        if (unlikely(*err < 0))
 391                goto repair_branches;
 392
 393        /* Even in case of data=writeback it is reasonable to pin
 394         * inode to transaction, to prevent unexpected data loss */
 395        *err = ext4_jbd2_inode_add_write(handle, orig_inode);
 396
 397unlock_pages:
 398        unlock_page(pagep[0]);
 399        put_page(pagep[0]);
 400        unlock_page(pagep[1]);
 401        put_page(pagep[1]);
 402stop_journal:
 403        ext4_journal_stop(handle);
 404        if (*err == -ENOSPC &&
 405            ext4_should_retry_alloc(sb, &retries))
 406                goto again;
 407        /* Buffer was busy because probably is pinned to journal transaction,
 408         * force transaction commit may help to free it. */
 409        if (*err == -EBUSY && retries++ < 4 && EXT4_SB(sb)->s_journal &&
 410            jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal))
 411                goto again;
 412        return replaced_count;
 413
 414repair_branches:
 415        /*
 416         * This should never ever happen!
 417         * Extents are swapped already, but we are not able to copy data.
 418         * Try to swap extents to it's original places
 419         */
 420        ext4_double_down_write_data_sem(orig_inode, donor_inode);
 421        replaced_count = ext4_swap_extents(handle, donor_inode, orig_inode,
 422                                               orig_blk_offset, donor_blk_offset,
 423                                           block_len_in_page, 0, &err2);
 424        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 425        if (replaced_count != block_len_in_page) {
 426                EXT4_ERROR_INODE_BLOCK(orig_inode, (sector_t)(orig_blk_offset),
 427                                       "Unable to copy data block,"
 428                                       " data will be lost.");
 429                *err = -EIO;
 430        }
 431        replaced_count = 0;
 432        goto unlock_pages;
 433}
 434
 435/**
 436 * mext_check_arguments - Check whether move extent can be done
 437 *
 438 * @orig_inode:         original inode
 439 * @donor_inode:        donor inode
 440 * @orig_start:         logical start offset in block for orig
 441 * @donor_start:        logical start offset in block for donor
 442 * @len:                the number of blocks to be moved
 443 *
 444 * Check the arguments of ext4_move_extents() whether the files can be
 445 * exchanged with each other.
 446 * Return 0 on success, or a negative error value on failure.
 447 */
 448static int
 449mext_check_arguments(struct inode *orig_inode,
 450                     struct inode *donor_inode, __u64 orig_start,
 451                     __u64 donor_start, __u64 *len)
 452{
 453        __u64 orig_eof, donor_eof;
 454        unsigned int blkbits = orig_inode->i_blkbits;
 455        unsigned int blocksize = 1 << blkbits;
 456
 457        orig_eof = (i_size_read(orig_inode) + blocksize - 1) >> blkbits;
 458        donor_eof = (i_size_read(donor_inode) + blocksize - 1) >> blkbits;
 459
 460
 461        if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
 462                ext4_debug("ext4 move extent: suid or sgid is set"
 463                           " to donor file [ino:orig %lu, donor %lu]\n",
 464                           orig_inode->i_ino, donor_inode->i_ino);
 465                return -EINVAL;
 466        }
 467
 468        if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
 469                return -EPERM;
 470
 471        /* Ext4 move extent does not support swapfile */
 472        if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
 473                ext4_debug("ext4 move extent: The argument files should "
 474                        "not be swapfile [ino:orig %lu, donor %lu]\n",
 475                        orig_inode->i_ino, donor_inode->i_ino);
 476                return -EBUSY;
 477        }
 478
 479        if (ext4_is_quota_file(orig_inode) && ext4_is_quota_file(donor_inode)) {
 480                ext4_debug("ext4 move extent: The argument files should "
 481                        "not be quota files [ino:orig %lu, donor %lu]\n",
 482                        orig_inode->i_ino, donor_inode->i_ino);
 483                return -EBUSY;
 484        }
 485
 486        /* Ext4 move extent supports only extent based file */
 487        if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
 488                ext4_debug("ext4 move extent: orig file is not extents "
 489                        "based file [ino:orig %lu]\n", orig_inode->i_ino);
 490                return -EOPNOTSUPP;
 491        } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
 492                ext4_debug("ext4 move extent: donor file is not extents "
 493                        "based file [ino:donor %lu]\n", donor_inode->i_ino);
 494                return -EOPNOTSUPP;
 495        }
 496
 497        if ((!orig_inode->i_size) || (!donor_inode->i_size)) {
 498                ext4_debug("ext4 move extent: File size is 0 byte\n");
 499                return -EINVAL;
 500        }
 501
 502        /* Start offset should be same */
 503        if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
 504            (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
 505                ext4_debug("ext4 move extent: orig and donor's start "
 506                        "offsets are not aligned [ino:orig %lu, donor %lu]\n",
 507                        orig_inode->i_ino, donor_inode->i_ino);
 508                return -EINVAL;
 509        }
 510
 511        if ((orig_start >= EXT_MAX_BLOCKS) ||
 512            (donor_start >= EXT_MAX_BLOCKS) ||
 513            (*len > EXT_MAX_BLOCKS) ||
 514            (donor_start + *len >= EXT_MAX_BLOCKS) ||
 515            (orig_start + *len >= EXT_MAX_BLOCKS))  {
 516                ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
 517                        "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
 518                        orig_inode->i_ino, donor_inode->i_ino);
 519                return -EINVAL;
 520        }
 521        if (orig_eof < orig_start + *len - 1)
 522                *len = orig_eof - orig_start;
 523        if (donor_eof < donor_start + *len - 1)
 524                *len = donor_eof - donor_start;
 525        if (!*len) {
 526                ext4_debug("ext4 move extent: len should not be 0 "
 527                        "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino,
 528                        donor_inode->i_ino);
 529                return -EINVAL;
 530        }
 531
 532        return 0;
 533}
 534
 535/**
 536 * ext4_move_extents - Exchange the specified range of a file
 537 *
 538 * @o_filp:             file structure of the original file
 539 * @d_filp:             file structure of the donor file
 540 * @orig_blk:           start offset in block for orig
 541 * @donor_blk:          start offset in block for donor
 542 * @len:                the number of blocks to be moved
 543 * @moved_len:          moved block length
 544 *
 545 * This function returns 0 and moved block length is set in moved_len
 546 * if succeed, otherwise returns error value.
 547 *
 548 */
 549int
 550ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
 551                  __u64 donor_blk, __u64 len, __u64 *moved_len)
 552{
 553        struct inode *orig_inode = file_inode(o_filp);
 554        struct inode *donor_inode = file_inode(d_filp);
 555        struct ext4_ext_path *path = NULL;
 556        int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
 557        ext4_lblk_t o_end, o_start = orig_blk;
 558        ext4_lblk_t d_start = donor_blk;
 559        int ret;
 560
 561        if (orig_inode->i_sb != donor_inode->i_sb) {
 562                ext4_debug("ext4 move extent: The argument files "
 563                        "should be in same FS [ino:orig %lu, donor %lu]\n",
 564                        orig_inode->i_ino, donor_inode->i_ino);
 565                return -EINVAL;
 566        }
 567
 568        /* orig and donor should be different inodes */
 569        if (orig_inode == donor_inode) {
 570                ext4_debug("ext4 move extent: The argument files should not "
 571                        "be same inode [ino:orig %lu, donor %lu]\n",
 572                        orig_inode->i_ino, donor_inode->i_ino);
 573                return -EINVAL;
 574        }
 575
 576        /* Regular file check */
 577        if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
 578                ext4_debug("ext4 move extent: The argument files should be "
 579                        "regular file [ino:orig %lu, donor %lu]\n",
 580                        orig_inode->i_ino, donor_inode->i_ino);
 581                return -EINVAL;
 582        }
 583
 584        /* TODO: it's not obvious how to swap blocks for inodes with full
 585           journaling enabled */
 586        if (ext4_should_journal_data(orig_inode) ||
 587            ext4_should_journal_data(donor_inode)) {
 588                ext4_msg(orig_inode->i_sb, KERN_ERR,
 589                         "Online defrag not supported with data journaling");
 590                return -EOPNOTSUPP;
 591        }
 592
 593        if (ext4_encrypted_inode(orig_inode) ||
 594            ext4_encrypted_inode(donor_inode)) {
 595                ext4_msg(orig_inode->i_sb, KERN_ERR,
 596                         "Online defrag not supported for encrypted files");
 597                return -EOPNOTSUPP;
 598        }
 599
 600        /* Protect orig and donor inodes against a truncate */
 601        lock_two_nondirectories(orig_inode, donor_inode);
 602
 603        /* Wait for all existing dio workers */
 604        inode_dio_wait(orig_inode);
 605        inode_dio_wait(donor_inode);
 606
 607        /* Protect extent tree against block allocations via delalloc */
 608        ext4_double_down_write_data_sem(orig_inode, donor_inode);
 609        /* Check the filesystem environment whether move_extent can be done */
 610        ret = mext_check_arguments(orig_inode, donor_inode, orig_blk,
 611                                    donor_blk, &len);
 612        if (ret)
 613                goto out;
 614        o_end = o_start + len;
 615
 616        while (o_start < o_end) {
 617                struct ext4_extent *ex;
 618                ext4_lblk_t cur_blk, next_blk;
 619                pgoff_t orig_page_index, donor_page_index;
 620                int offset_in_page;
 621                int unwritten, cur_len;
 622
 623                ret = get_ext_path(orig_inode, o_start, &path);
 624                if (ret)
 625                        goto out;
 626                ex = path[path->p_depth].p_ext;
 627                next_blk = ext4_ext_next_allocated_block(path);
 628                cur_blk = le32_to_cpu(ex->ee_block);
 629                cur_len = ext4_ext_get_actual_len(ex);
 630                /* Check hole before the start pos */
 631                if (cur_blk + cur_len - 1 < o_start) {
 632                        if (next_blk == EXT_MAX_BLOCKS) {
 633                                o_start = o_end;
 634                                ret = -ENODATA;
 635                                goto out;
 636                        }
 637                        d_start += next_blk - o_start;
 638                        o_start = next_blk;
 639                        continue;
 640                /* Check hole after the start pos */
 641                } else if (cur_blk > o_start) {
 642                        /* Skip hole */
 643                        d_start += cur_blk - o_start;
 644                        o_start = cur_blk;
 645                        /* Extent inside requested range ?*/
 646                        if (cur_blk >= o_end)
 647                                goto out;
 648                } else { /* in_range(o_start, o_blk, o_len) */
 649                        cur_len += cur_blk - o_start;
 650                }
 651                unwritten = ext4_ext_is_unwritten(ex);
 652                if (o_end - o_start < cur_len)
 653                        cur_len = o_end - o_start;
 654
 655                orig_page_index = o_start >> (PAGE_SHIFT -
 656                                               orig_inode->i_blkbits);
 657                donor_page_index = d_start >> (PAGE_SHIFT -
 658                                               donor_inode->i_blkbits);
 659                offset_in_page = o_start % blocks_per_page;
 660                if (cur_len > blocks_per_page- offset_in_page)
 661                        cur_len = blocks_per_page - offset_in_page;
 662                /*
 663                 * Up semaphore to avoid following problems:
 664                 * a. transaction deadlock among ext4_journal_start,
 665                 *    ->write_begin via pagefault, and jbd2_journal_commit
 666                 * b. racing with ->readpage, ->write_begin, and ext4_get_block
 667                 *    in move_extent_per_page
 668                 */
 669                ext4_double_up_write_data_sem(orig_inode, donor_inode);
 670                /* Swap original branches with new branches */
 671                move_extent_per_page(o_filp, donor_inode,
 672                                     orig_page_index, donor_page_index,
 673                                     offset_in_page, cur_len,
 674                                     unwritten, &ret);
 675                ext4_double_down_write_data_sem(orig_inode, donor_inode);
 676                if (ret < 0)
 677                        break;
 678                o_start += cur_len;
 679                d_start += cur_len;
 680        }
 681        *moved_len = o_start - orig_blk;
 682        if (*moved_len > len)
 683                *moved_len = len;
 684
 685out:
 686        if (*moved_len) {
 687                ext4_discard_preallocations(orig_inode);
 688                ext4_discard_preallocations(donor_inode);
 689        }
 690
 691        ext4_ext_drop_refs(path);
 692        kfree(path);
 693        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 694        unlock_two_nondirectories(orig_inode, donor_inode);
 695
 696        return ret;
 697}
 698