linux/fs/ext4/move_extent.c
<<
>>
Prefs
   1// SPDX-License-Identifier: LGPL-2.1
   2/*
   3 * Copyright (c) 2008,2009 NEC Software Tohoku, Ltd.
   4 * Written by Takashi Sato <t-sato@yk.jp.nec.com>
   5 *            Akira Fujita <a-fujita@rs.jp.nec.com>
   6 */
   7
   8#include <linux/fs.h>
   9#include <linux/quotaops.h>
  10#include <linux/slab.h>
  11#include "ext4_jbd2.h"
  12#include "ext4.h"
  13#include "ext4_extents.h"
  14
  15/**
  16 * get_ext_path - Find an extent path for designated logical block number.
  17 *
  18 * @inode:      an inode which is searched
  19 * @lblock:     logical block number to find an extent path
  20 * @path:       pointer to an extent path pointer (for output)
  21 *
  22 * ext4_find_extent wrapper. Return 0 on success, or a negative error value
  23 * on failure.
  24 */
  25static inline int
  26get_ext_path(struct inode *inode, ext4_lblk_t lblock,
  27                struct ext4_ext_path **ppath)
  28{
  29        struct ext4_ext_path *path;
  30
  31        path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE);
  32        if (IS_ERR(path))
  33                return PTR_ERR(path);
  34        if (path[ext_depth(inode)].p_ext == NULL) {
  35                ext4_ext_drop_refs(path);
  36                kfree(path);
  37                *ppath = NULL;
  38                return -ENODATA;
  39        }
  40        *ppath = path;
  41        return 0;
  42}
  43
  44/**
  45 * ext4_double_down_write_data_sem - Acquire two inodes' write lock
  46 *                                   of i_data_sem
  47 *
  48 * Acquire write lock of i_data_sem of the two inodes
  49 */
  50void
  51ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
  52{
  53        if (first < second) {
  54                down_write(&EXT4_I(first)->i_data_sem);
  55                down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
  56        } else {
  57                down_write(&EXT4_I(second)->i_data_sem);
  58                down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
  59
  60        }
  61}
  62
  63/**
  64 * ext4_double_up_write_data_sem - Release two inodes' write lock of i_data_sem
  65 *
  66 * @orig_inode:         original inode structure to be released its lock first
  67 * @donor_inode:        donor inode structure to be released its lock second
  68 * Release write lock of i_data_sem of two inodes (orig and donor).
  69 */
  70void
  71ext4_double_up_write_data_sem(struct inode *orig_inode,
  72                              struct inode *donor_inode)
  73{
  74        up_write(&EXT4_I(orig_inode)->i_data_sem);
  75        up_write(&EXT4_I(donor_inode)->i_data_sem);
  76}
  77
  78/**
  79 * mext_check_coverage - Check that all extents in range has the same type
  80 *
  81 * @inode:              inode in question
  82 * @from:               block offset of inode
  83 * @count:              block count to be checked
  84 * @unwritten:          extents expected to be unwritten
  85 * @err:                pointer to save error value
  86 *
  87 * Return 1 if all extents in range has expected type, and zero otherwise.
  88 */
  89static int
  90mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
  91                    int unwritten, int *err)
  92{
  93        struct ext4_ext_path *path = NULL;
  94        struct ext4_extent *ext;
  95        int ret = 0;
  96        ext4_lblk_t last = from + count;
  97        while (from < last) {
  98                *err = get_ext_path(inode, from, &path);
  99                if (*err)
 100                        goto out;
 101                ext = path[ext_depth(inode)].p_ext;
 102                if (unwritten != ext4_ext_is_unwritten(ext))
 103                        goto out;
 104                from += ext4_ext_get_actual_len(ext);
 105                ext4_ext_drop_refs(path);
 106        }
 107        ret = 1;
 108out:
 109        ext4_ext_drop_refs(path);
 110        kfree(path);
 111        return ret;
 112}
 113
 114/**
 115 * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2
 116 *
 117 * @inode1:     the inode structure
 118 * @inode2:     the inode structure
 119 * @index1:     page index
 120 * @index2:     page index
 121 * @page:       result page vector
 122 *
 123 * Grab two locked pages for inode's by inode order
 124 */
 125static int
 126mext_page_double_lock(struct inode *inode1, struct inode *inode2,
 127                      pgoff_t index1, pgoff_t index2, struct page *page[2])
 128{
 129        struct address_space *mapping[2];
 130        unsigned fl = AOP_FLAG_NOFS;
 131
 132        BUG_ON(!inode1 || !inode2);
 133        if (inode1 < inode2) {
 134                mapping[0] = inode1->i_mapping;
 135                mapping[1] = inode2->i_mapping;
 136        } else {
 137                swap(index1, index2);
 138                mapping[0] = inode2->i_mapping;
 139                mapping[1] = inode1->i_mapping;
 140        }
 141
 142        page[0] = grab_cache_page_write_begin(mapping[0], index1, fl);
 143        if (!page[0])
 144                return -ENOMEM;
 145
 146        page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
 147        if (!page[1]) {
 148                unlock_page(page[0]);
 149                put_page(page[0]);
 150                return -ENOMEM;
 151        }
 152        /*
 153         * grab_cache_page_write_begin() may not wait on page's writeback if
 154         * BDI not demand that. But it is reasonable to be very conservative
 155         * here and explicitly wait on page's writeback
 156         */
 157        wait_on_page_writeback(page[0]);
 158        wait_on_page_writeback(page[1]);
 159        if (inode1 > inode2)
 160                swap(page[0], page[1]);
 161
 162        return 0;
 163}
 164
 165/* Force page buffers uptodate w/o dropping page's lock */
 166static int
 167mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
 168{
 169        struct inode *inode = page->mapping->host;
 170        sector_t block;
 171        struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
 172        unsigned int blocksize, block_start, block_end;
 173        int i, err,  nr = 0, partial = 0;
 174        BUG_ON(!PageLocked(page));
 175        BUG_ON(PageWriteback(page));
 176
 177        if (PageUptodate(page))
 178                return 0;
 179
 180        blocksize = i_blocksize(inode);
 181        if (!page_has_buffers(page))
 182                create_empty_buffers(page, blocksize, 0);
 183
 184        head = page_buffers(page);
 185        block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
 186        for (bh = head, block_start = 0; bh != head || !block_start;
 187             block++, block_start = block_end, bh = bh->b_this_page) {
 188                block_end = block_start + blocksize;
 189                if (block_end <= from || block_start >= to) {
 190                        if (!buffer_uptodate(bh))
 191                                partial = 1;
 192                        continue;
 193                }
 194                if (buffer_uptodate(bh))
 195                        continue;
 196                if (!buffer_mapped(bh)) {
 197                        err = ext4_get_block(inode, block, bh, 0);
 198                        if (err) {
 199                                SetPageError(page);
 200                                return err;
 201                        }
 202                        if (!buffer_mapped(bh)) {
 203                                zero_user(page, block_start, blocksize);
 204                                set_buffer_uptodate(bh);
 205                                continue;
 206                        }
 207                }
 208                BUG_ON(nr >= MAX_BUF_PER_PAGE);
 209                arr[nr++] = bh;
 210        }
 211        /* No io required */
 212        if (!nr)
 213                goto out;
 214
 215        for (i = 0; i < nr; i++) {
 216                bh = arr[i];
 217                if (!bh_uptodate_or_lock(bh)) {
 218                        err = bh_submit_read(bh);
 219                        if (err)
 220                                return err;
 221                }
 222        }
 223out:
 224        if (!partial)
 225                SetPageUptodate(page);
 226        return 0;
 227}
 228
 229/**
 230 * move_extent_per_page - Move extent data per page
 231 *
 232 * @o_filp:                     file structure of original file
 233 * @donor_inode:                donor inode
 234 * @orig_page_offset:           page index on original file
 235 * @donor_page_offset:          page index on donor file
 236 * @data_offset_in_page:        block index where data swapping starts
 237 * @block_len_in_page:          the number of blocks to be swapped
 238 * @unwritten:                  orig extent is unwritten or not
 239 * @err:                        pointer to save return value
 240 *
 241 * Save the data in original inode blocks and replace original inode extents
 242 * with donor inode extents by calling ext4_swap_extents().
 243 * Finally, write out the saved data in new original inode blocks. Return
 244 * replaced block count.
 245 */
 246static int
 247move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 248                     pgoff_t orig_page_offset, pgoff_t donor_page_offset,
 249                     int data_offset_in_page,
 250                     int block_len_in_page, int unwritten, int *err)
 251{
 252        struct inode *orig_inode = file_inode(o_filp);
 253        struct page *pagep[2] = {NULL, NULL};
 254        handle_t *handle;
 255        ext4_lblk_t orig_blk_offset, donor_blk_offset;
 256        unsigned long blocksize = orig_inode->i_sb->s_blocksize;
 257        unsigned int tmp_data_size, data_size, replaced_size;
 258        int i, err2, jblocks, retries = 0;
 259        int replaced_count = 0;
 260        int from = data_offset_in_page << orig_inode->i_blkbits;
 261        int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
 262        struct super_block *sb = orig_inode->i_sb;
 263        struct buffer_head *bh = NULL;
 264
 265        /*
 266         * It needs twice the amount of ordinary journal buffers because
 267         * inode and donor_inode may change each different metadata blocks.
 268         */
 269again:
 270        *err = 0;
 271        jblocks = ext4_writepage_trans_blocks(orig_inode) * 2;
 272        handle = ext4_journal_start(orig_inode, EXT4_HT_MOVE_EXTENTS, jblocks);
 273        if (IS_ERR(handle)) {
 274                *err = PTR_ERR(handle);
 275                return 0;
 276        }
 277
 278        orig_blk_offset = orig_page_offset * blocks_per_page +
 279                data_offset_in_page;
 280
 281        donor_blk_offset = donor_page_offset * blocks_per_page +
 282                data_offset_in_page;
 283
 284        /* Calculate data_size */
 285        if ((orig_blk_offset + block_len_in_page - 1) ==
 286            ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
 287                /* Replace the last block */
 288                tmp_data_size = orig_inode->i_size & (blocksize - 1);
 289                /*
 290                 * If data_size equal zero, it shows data_size is multiples of
 291                 * blocksize. So we set appropriate value.
 292                 */
 293                if (tmp_data_size == 0)
 294                        tmp_data_size = blocksize;
 295
 296                data_size = tmp_data_size +
 297                        ((block_len_in_page - 1) << orig_inode->i_blkbits);
 298        } else
 299                data_size = block_len_in_page << orig_inode->i_blkbits;
 300
 301        replaced_size = data_size;
 302
 303        *err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
 304                                     donor_page_offset, pagep);
 305        if (unlikely(*err < 0))
 306                goto stop_journal;
 307        /*
 308         * If orig extent was unwritten it can become initialized
 309         * at any time after i_data_sem was dropped, in order to
 310         * serialize with delalloc we have recheck extent while we
 311         * hold page's lock, if it is still the case data copy is not
 312         * necessary, just swap data blocks between orig and donor.
 313         */
 314        if (unwritten) {
 315                ext4_double_down_write_data_sem(orig_inode, donor_inode);
 316                /* If any of extents in range became initialized we have to
 317                 * fallback to data copying */
 318                unwritten = mext_check_coverage(orig_inode, orig_blk_offset,
 319                                                block_len_in_page, 1, err);
 320                if (*err)
 321                        goto drop_data_sem;
 322
 323                unwritten &= mext_check_coverage(donor_inode, donor_blk_offset,
 324                                                 block_len_in_page, 1, err);
 325                if (*err)
 326                        goto drop_data_sem;
 327
 328                if (!unwritten) {
 329                        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 330                        goto data_copy;
 331                }
 332                if ((page_has_private(pagep[0]) &&
 333                     !try_to_release_page(pagep[0], 0)) ||
 334                    (page_has_private(pagep[1]) &&
 335                     !try_to_release_page(pagep[1], 0))) {
 336                        *err = -EBUSY;
 337                        goto drop_data_sem;
 338                }
 339                replaced_count = ext4_swap_extents(handle, orig_inode,
 340                                                   donor_inode, orig_blk_offset,
 341                                                   donor_blk_offset,
 342                                                   block_len_in_page, 1, err);
 343        drop_data_sem:
 344                ext4_double_up_write_data_sem(orig_inode, donor_inode);
 345                goto unlock_pages;
 346        }
 347data_copy:
 348        *err = mext_page_mkuptodate(pagep[0], from, from + replaced_size);
 349        if (*err)
 350                goto unlock_pages;
 351
 352        /* At this point all buffers in range are uptodate, old mapping layout
 353         * is no longer required, try to drop it now. */
 354        if ((page_has_private(pagep[0]) && !try_to_release_page(pagep[0], 0)) ||
 355            (page_has_private(pagep[1]) && !try_to_release_page(pagep[1], 0))) {
 356                *err = -EBUSY;
 357                goto unlock_pages;
 358        }
 359        ext4_double_down_write_data_sem(orig_inode, donor_inode);
 360        replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode,
 361                                               orig_blk_offset, donor_blk_offset,
 362                                           block_len_in_page, 1, err);
 363        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 364        if (*err) {
 365                if (replaced_count) {
 366                        block_len_in_page = replaced_count;
 367                        replaced_size =
 368                                block_len_in_page << orig_inode->i_blkbits;
 369                } else
 370                        goto unlock_pages;
 371        }
 372        /* Perform all necessary steps similar write_begin()/write_end()
 373         * but keeping in mind that i_size will not change */
 374        if (!page_has_buffers(pagep[0]))
 375                create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
 376        bh = page_buffers(pagep[0]);
 377        for (i = 0; i < data_offset_in_page; i++)
 378                bh = bh->b_this_page;
 379        for (i = 0; i < block_len_in_page; i++) {
 380                *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
 381                if (*err < 0)
 382                        break;
 383                bh = bh->b_this_page;
 384        }
 385        if (!*err)
 386                *err = block_commit_write(pagep[0], from, from + replaced_size);
 387
 388        if (unlikely(*err < 0))
 389                goto repair_branches;
 390
 391        /* Even in case of data=writeback it is reasonable to pin
 392         * inode to transaction, to prevent unexpected data loss */
 393        *err = ext4_jbd2_inode_add_write(handle, orig_inode);
 394
 395unlock_pages:
 396        unlock_page(pagep[0]);
 397        put_page(pagep[0]);
 398        unlock_page(pagep[1]);
 399        put_page(pagep[1]);
 400stop_journal:
 401        ext4_journal_stop(handle);
 402        if (*err == -ENOSPC &&
 403            ext4_should_retry_alloc(sb, &retries))
 404                goto again;
 405        /* Buffer was busy because probably is pinned to journal transaction,
 406         * force transaction commit may help to free it. */
 407        if (*err == -EBUSY && retries++ < 4 && EXT4_SB(sb)->s_journal &&
 408            jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal))
 409                goto again;
 410        return replaced_count;
 411
 412repair_branches:
 413        /*
 414         * This should never ever happen!
 415         * Extents are swapped already, but we are not able to copy data.
 416         * Try to swap extents to it's original places
 417         */
 418        ext4_double_down_write_data_sem(orig_inode, donor_inode);
 419        replaced_count = ext4_swap_extents(handle, donor_inode, orig_inode,
 420                                               orig_blk_offset, donor_blk_offset,
 421                                           block_len_in_page, 0, &err2);
 422        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 423        if (replaced_count != block_len_in_page) {
 424                EXT4_ERROR_INODE_BLOCK(orig_inode, (sector_t)(orig_blk_offset),
 425                                       "Unable to copy data block,"
 426                                       " data will be lost.");
 427                *err = -EIO;
 428        }
 429        replaced_count = 0;
 430        goto unlock_pages;
 431}
 432
 433/**
 434 * mext_check_arguments - Check whether move extent can be done
 435 *
 436 * @orig_inode:         original inode
 437 * @donor_inode:        donor inode
 438 * @orig_start:         logical start offset in block for orig
 439 * @donor_start:        logical start offset in block for donor
 440 * @len:                the number of blocks to be moved
 441 *
 442 * Check the arguments of ext4_move_extents() whether the files can be
 443 * exchanged with each other.
 444 * Return 0 on success, or a negative error value on failure.
 445 */
 446static int
 447mext_check_arguments(struct inode *orig_inode,
 448                     struct inode *donor_inode, __u64 orig_start,
 449                     __u64 donor_start, __u64 *len)
 450{
 451        __u64 orig_eof, donor_eof;
 452        unsigned int blkbits = orig_inode->i_blkbits;
 453        unsigned int blocksize = 1 << blkbits;
 454
 455        orig_eof = (i_size_read(orig_inode) + blocksize - 1) >> blkbits;
 456        donor_eof = (i_size_read(donor_inode) + blocksize - 1) >> blkbits;
 457
 458
 459        if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
 460                ext4_debug("ext4 move extent: suid or sgid is set"
 461                           " to donor file [ino:orig %lu, donor %lu]\n",
 462                           orig_inode->i_ino, donor_inode->i_ino);
 463                return -EINVAL;
 464        }
 465
 466        if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
 467                return -EPERM;
 468
 469        /* Ext4 move extent does not support swapfile */
 470        if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
 471                ext4_debug("ext4 move extent: The argument files should "
 472                        "not be swapfile [ino:orig %lu, donor %lu]\n",
 473                        orig_inode->i_ino, donor_inode->i_ino);
 474                return -EBUSY;
 475        }
 476
 477        if (ext4_is_quota_file(orig_inode) && ext4_is_quota_file(donor_inode)) {
 478                ext4_debug("ext4 move extent: The argument files should "
 479                        "not be quota files [ino:orig %lu, donor %lu]\n",
 480                        orig_inode->i_ino, donor_inode->i_ino);
 481                return -EBUSY;
 482        }
 483
 484        /* Ext4 move extent supports only extent based file */
 485        if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
 486                ext4_debug("ext4 move extent: orig file is not extents "
 487                        "based file [ino:orig %lu]\n", orig_inode->i_ino);
 488                return -EOPNOTSUPP;
 489        } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
 490                ext4_debug("ext4 move extent: donor file is not extents "
 491                        "based file [ino:donor %lu]\n", donor_inode->i_ino);
 492                return -EOPNOTSUPP;
 493        }
 494
 495        if ((!orig_inode->i_size) || (!donor_inode->i_size)) {
 496                ext4_debug("ext4 move extent: File size is 0 byte\n");
 497                return -EINVAL;
 498        }
 499
 500        /* Start offset should be same */
 501        if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
 502            (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
 503                ext4_debug("ext4 move extent: orig and donor's start "
 504                        "offsets are not aligned [ino:orig %lu, donor %lu]\n",
 505                        orig_inode->i_ino, donor_inode->i_ino);
 506                return -EINVAL;
 507        }
 508
 509        if ((orig_start >= EXT_MAX_BLOCKS) ||
 510            (donor_start >= EXT_MAX_BLOCKS) ||
 511            (*len > EXT_MAX_BLOCKS) ||
 512            (donor_start + *len >= EXT_MAX_BLOCKS) ||
 513            (orig_start + *len >= EXT_MAX_BLOCKS))  {
 514                ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
 515                        "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
 516                        orig_inode->i_ino, donor_inode->i_ino);
 517                return -EINVAL;
 518        }
 519        if (orig_eof <= orig_start)
 520                *len = 0;
 521        else if (orig_eof < orig_start + *len - 1)
 522                *len = orig_eof - orig_start;
 523        if (donor_eof <= donor_start)
 524                *len = 0;
 525        else if (donor_eof < donor_start + *len - 1)
 526                *len = donor_eof - donor_start;
 527        if (!*len) {
 528                ext4_debug("ext4 move extent: len should not be 0 "
 529                        "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino,
 530                        donor_inode->i_ino);
 531                return -EINVAL;
 532        }
 533
 534        return 0;
 535}
 536
 537/**
 538 * ext4_move_extents - Exchange the specified range of a file
 539 *
 540 * @o_filp:             file structure of the original file
 541 * @d_filp:             file structure of the donor file
 542 * @orig_blk:           start offset in block for orig
 543 * @donor_blk:          start offset in block for donor
 544 * @len:                the number of blocks to be moved
 545 * @moved_len:          moved block length
 546 *
 547 * This function returns 0 and moved block length is set in moved_len
 548 * if succeed, otherwise returns error value.
 549 *
 550 */
 551int
 552ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
 553                  __u64 donor_blk, __u64 len, __u64 *moved_len)
 554{
 555        struct inode *orig_inode = file_inode(o_filp);
 556        struct inode *donor_inode = file_inode(d_filp);
 557        struct ext4_ext_path *path = NULL;
 558        int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
 559        ext4_lblk_t o_end, o_start = orig_blk;
 560        ext4_lblk_t d_start = donor_blk;
 561        int ret;
 562
 563        if (orig_inode->i_sb != donor_inode->i_sb) {
 564                ext4_debug("ext4 move extent: The argument files "
 565                        "should be in same FS [ino:orig %lu, donor %lu]\n",
 566                        orig_inode->i_ino, donor_inode->i_ino);
 567                return -EINVAL;
 568        }
 569
 570        /* orig and donor should be different inodes */
 571        if (orig_inode == donor_inode) {
 572                ext4_debug("ext4 move extent: The argument files should not "
 573                        "be same inode [ino:orig %lu, donor %lu]\n",
 574                        orig_inode->i_ino, donor_inode->i_ino);
 575                return -EINVAL;
 576        }
 577
 578        /* Regular file check */
 579        if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
 580                ext4_debug("ext4 move extent: The argument files should be "
 581                        "regular file [ino:orig %lu, donor %lu]\n",
 582                        orig_inode->i_ino, donor_inode->i_ino);
 583                return -EINVAL;
 584        }
 585
 586        /* TODO: it's not obvious how to swap blocks for inodes with full
 587           journaling enabled */
 588        if (ext4_should_journal_data(orig_inode) ||
 589            ext4_should_journal_data(donor_inode)) {
 590                ext4_msg(orig_inode->i_sb, KERN_ERR,
 591                         "Online defrag not supported with data journaling");
 592                return -EOPNOTSUPP;
 593        }
 594
 595        if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) {
 596                ext4_msg(orig_inode->i_sb, KERN_ERR,
 597                         "Online defrag not supported for encrypted files");
 598                return -EOPNOTSUPP;
 599        }
 600
 601        /* Protect orig and donor inodes against a truncate */
 602        lock_two_nondirectories(orig_inode, donor_inode);
 603
 604        /* Wait for all existing dio workers */
 605        inode_dio_wait(orig_inode);
 606        inode_dio_wait(donor_inode);
 607
 608        /* Protect extent tree against block allocations via delalloc */
 609        ext4_double_down_write_data_sem(orig_inode, donor_inode);
 610        /* Check the filesystem environment whether move_extent can be done */
 611        ret = mext_check_arguments(orig_inode, donor_inode, orig_blk,
 612                                    donor_blk, &len);
 613        if (ret)
 614                goto out;
 615        o_end = o_start + len;
 616
 617        while (o_start < o_end) {
 618                struct ext4_extent *ex;
 619                ext4_lblk_t cur_blk, next_blk;
 620                pgoff_t orig_page_index, donor_page_index;
 621                int offset_in_page;
 622                int unwritten, cur_len;
 623
 624                ret = get_ext_path(orig_inode, o_start, &path);
 625                if (ret)
 626                        goto out;
 627                ex = path[path->p_depth].p_ext;
 628                next_blk = ext4_ext_next_allocated_block(path);
 629                cur_blk = le32_to_cpu(ex->ee_block);
 630                cur_len = ext4_ext_get_actual_len(ex);
 631                /* Check hole before the start pos */
 632                if (cur_blk + cur_len - 1 < o_start) {
 633                        if (next_blk == EXT_MAX_BLOCKS) {
 634                                o_start = o_end;
 635                                ret = -ENODATA;
 636                                goto out;
 637                        }
 638                        d_start += next_blk - o_start;
 639                        o_start = next_blk;
 640                        continue;
 641                /* Check hole after the start pos */
 642                } else if (cur_blk > o_start) {
 643                        /* Skip hole */
 644                        d_start += cur_blk - o_start;
 645                        o_start = cur_blk;
 646                        /* Extent inside requested range ?*/
 647                        if (cur_blk >= o_end)
 648                                goto out;
 649                } else { /* in_range(o_start, o_blk, o_len) */
 650                        cur_len += cur_blk - o_start;
 651                }
 652                unwritten = ext4_ext_is_unwritten(ex);
 653                if (o_end - o_start < cur_len)
 654                        cur_len = o_end - o_start;
 655
 656                orig_page_index = o_start >> (PAGE_SHIFT -
 657                                               orig_inode->i_blkbits);
 658                donor_page_index = d_start >> (PAGE_SHIFT -
 659                                               donor_inode->i_blkbits);
 660                offset_in_page = o_start % blocks_per_page;
 661                if (cur_len > blocks_per_page- offset_in_page)
 662                        cur_len = blocks_per_page - offset_in_page;
 663                /*
 664                 * Up semaphore to avoid following problems:
 665                 * a. transaction deadlock among ext4_journal_start,
 666                 *    ->write_begin via pagefault, and jbd2_journal_commit
 667                 * b. racing with ->readpage, ->write_begin, and ext4_get_block
 668                 *    in move_extent_per_page
 669                 */
 670                ext4_double_up_write_data_sem(orig_inode, donor_inode);
 671                /* Swap original branches with new branches */
 672                move_extent_per_page(o_filp, donor_inode,
 673                                     orig_page_index, donor_page_index,
 674                                     offset_in_page, cur_len,
 675                                     unwritten, &ret);
 676                ext4_double_down_write_data_sem(orig_inode, donor_inode);
 677                if (ret < 0)
 678                        break;
 679                o_start += cur_len;
 680                d_start += cur_len;
 681        }
 682        *moved_len = o_start - orig_blk;
 683        if (*moved_len > len)
 684                *moved_len = len;
 685
 686out:
 687        if (*moved_len) {
 688                ext4_discard_preallocations(orig_inode);
 689                ext4_discard_preallocations(donor_inode);
 690        }
 691
 692        ext4_ext_drop_refs(path);
 693        kfree(path);
 694        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 695        unlock_two_nondirectories(orig_inode, donor_inode);
 696
 697        return ret;
 698}
 699