linux/fs/ext4/move_extent.c
<<
>>
Prefs
   1// SPDX-License-Identifier: LGPL-2.1
   2/*
   3 * Copyright (c) 2008,2009 NEC Software Tohoku, Ltd.
   4 * Written by Takashi Sato <t-sato@yk.jp.nec.com>
   5 *            Akira Fujita <a-fujita@rs.jp.nec.com>
   6 */
   7
   8#include <linux/fs.h>
   9#include <linux/quotaops.h>
  10#include <linux/slab.h>
  11#include "ext4_jbd2.h"
  12#include "ext4.h"
  13#include "ext4_extents.h"
  14
  15/**
  16 * get_ext_path - Find an extent path for designated logical block number.
  17 *
  18 * @inode:      an inode which is searched
  19 * @lblock:     logical block number to find an extent path
  20 * @path:       pointer to an extent path pointer (for output)
  21 *
  22 * ext4_find_extent wrapper. Return 0 on success, or a negative error value
  23 * on failure.
  24 */
  25static inline int
  26get_ext_path(struct inode *inode, ext4_lblk_t lblock,
  27                struct ext4_ext_path **ppath)
  28{
  29        struct ext4_ext_path *path;
  30
  31        path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE);
  32        if (IS_ERR(path))
  33                return PTR_ERR(path);
  34        if (path[ext_depth(inode)].p_ext == NULL) {
  35                ext4_ext_drop_refs(path);
  36                kfree(path);
  37                *ppath = NULL;
  38                return -ENODATA;
  39        }
  40        *ppath = path;
  41        return 0;
  42}
  43
  44/**
  45 * ext4_double_down_write_data_sem - Acquire two inodes' write lock
  46 *                                   of i_data_sem
  47 *
  48 * Acquire write lock of i_data_sem of the two inodes
  49 */
  50void
  51ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
  52{
  53        if (first < second) {
  54                down_write(&EXT4_I(first)->i_data_sem);
  55                down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
  56        } else {
  57                down_write(&EXT4_I(second)->i_data_sem);
  58                down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
  59
  60        }
  61}
  62
  63/**
  64 * ext4_double_up_write_data_sem - Release two inodes' write lock of i_data_sem
  65 *
  66 * @orig_inode:         original inode structure to be released its lock first
  67 * @donor_inode:        donor inode structure to be released its lock second
  68 * Release write lock of i_data_sem of two inodes (orig and donor).
  69 */
  70void
  71ext4_double_up_write_data_sem(struct inode *orig_inode,
  72                              struct inode *donor_inode)
  73{
  74        up_write(&EXT4_I(orig_inode)->i_data_sem);
  75        up_write(&EXT4_I(donor_inode)->i_data_sem);
  76}
  77
  78/**
  79 * mext_check_coverage - Check that all extents in range has the same type
  80 *
  81 * @inode:              inode in question
  82 * @from:               block offset of inode
  83 * @count:              block count to be checked
  84 * @unwritten:          extents expected to be unwritten
  85 * @err:                pointer to save error value
  86 *
  87 * Return 1 if all extents in range has expected type, and zero otherwise.
  88 */
  89static int
  90mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
  91                    int unwritten, int *err)
  92{
  93        struct ext4_ext_path *path = NULL;
  94        struct ext4_extent *ext;
  95        int ret = 0;
  96        ext4_lblk_t last = from + count;
  97        while (from < last) {
  98                *err = get_ext_path(inode, from, &path);
  99                if (*err)
 100                        goto out;
 101                ext = path[ext_depth(inode)].p_ext;
 102                if (unwritten != ext4_ext_is_unwritten(ext))
 103                        goto out;
 104                from += ext4_ext_get_actual_len(ext);
 105                ext4_ext_drop_refs(path);
 106        }
 107        ret = 1;
 108out:
 109        ext4_ext_drop_refs(path);
 110        kfree(path);
 111        return ret;
 112}
 113
 114/**
 115 * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2
 116 *
 117 * @inode1:     the inode structure
 118 * @inode2:     the inode structure
 119 * @index1:     page index
 120 * @index2:     page index
 121 * @page:       result page vector
 122 *
 123 * Grab two locked pages for inode's by inode order
 124 */
 125static int
 126mext_page_double_lock(struct inode *inode1, struct inode *inode2,
 127                      pgoff_t index1, pgoff_t index2, struct page *page[2])
 128{
 129        struct address_space *mapping[2];
 130        unsigned fl = AOP_FLAG_NOFS;
 131
 132        BUG_ON(!inode1 || !inode2);
 133        if (inode1 < inode2) {
 134                mapping[0] = inode1->i_mapping;
 135                mapping[1] = inode2->i_mapping;
 136        } else {
 137                swap(index1, index2);
 138                mapping[0] = inode2->i_mapping;
 139                mapping[1] = inode1->i_mapping;
 140        }
 141
 142        page[0] = grab_cache_page_write_begin(mapping[0], index1, fl);
 143        if (!page[0])
 144                return -ENOMEM;
 145
 146        page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
 147        if (!page[1]) {
 148                unlock_page(page[0]);
 149                put_page(page[0]);
 150                return -ENOMEM;
 151        }
 152        /*
 153         * grab_cache_page_write_begin() may not wait on page's writeback if
 154         * BDI not demand that. But it is reasonable to be very conservative
 155         * here and explicitly wait on page's writeback
 156         */
 157        wait_on_page_writeback(page[0]);
 158        wait_on_page_writeback(page[1]);
 159        if (inode1 > inode2)
 160                swap(page[0], page[1]);
 161
 162        return 0;
 163}
 164
 165/* Force page buffers uptodate w/o dropping page's lock */
 166static int
 167mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
 168{
 169        struct inode *inode = page->mapping->host;
 170        sector_t block;
 171        struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
 172        unsigned int blocksize, block_start, block_end;
 173        int i, err,  nr = 0, partial = 0;
 174        BUG_ON(!PageLocked(page));
 175        BUG_ON(PageWriteback(page));
 176
 177        if (PageUptodate(page))
 178                return 0;
 179
 180        blocksize = i_blocksize(inode);
 181        if (!page_has_buffers(page))
 182                create_empty_buffers(page, blocksize, 0);
 183
 184        head = page_buffers(page);
 185        block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
 186        for (bh = head, block_start = 0; bh != head || !block_start;
 187             block++, block_start = block_end, bh = bh->b_this_page) {
 188                block_end = block_start + blocksize;
 189                if (block_end <= from || block_start >= to) {
 190                        if (!buffer_uptodate(bh))
 191                                partial = 1;
 192                        continue;
 193                }
 194                if (buffer_uptodate(bh))
 195                        continue;
 196                if (!buffer_mapped(bh)) {
 197                        err = ext4_get_block(inode, block, bh, 0);
 198                        if (err) {
 199                                SetPageError(page);
 200                                return err;
 201                        }
 202                        if (!buffer_mapped(bh)) {
 203                                zero_user(page, block_start, blocksize);
 204                                set_buffer_uptodate(bh);
 205                                continue;
 206                        }
 207                }
 208                BUG_ON(nr >= MAX_BUF_PER_PAGE);
 209                arr[nr++] = bh;
 210        }
 211        /* No io required */
 212        if (!nr)
 213                goto out;
 214
 215        for (i = 0; i < nr; i++) {
 216                bh = arr[i];
 217                if (!bh_uptodate_or_lock(bh)) {
 218                        err = bh_submit_read(bh);
 219                        if (err)
 220                                return err;
 221                }
 222        }
 223out:
 224        if (!partial)
 225                SetPageUptodate(page);
 226        return 0;
 227}
 228
 229/**
 230 * move_extent_per_page - Move extent data per page
 231 *
 232 * @o_filp:                     file structure of original file
 233 * @donor_inode:                donor inode
 234 * @orig_page_offset:           page index on original file
 235 * @donor_page_offset:          page index on donor file
 236 * @data_offset_in_page:        block index where data swapping starts
 237 * @block_len_in_page:          the number of blocks to be swapped
 238 * @unwritten:                  orig extent is unwritten or not
 239 * @err:                        pointer to save return value
 240 *
 241 * Save the data in original inode blocks and replace original inode extents
 242 * with donor inode extents by calling ext4_swap_extents().
 243 * Finally, write out the saved data in new original inode blocks. Return
 244 * replaced block count.
 245 */
 246static int
 247move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 248                     pgoff_t orig_page_offset, pgoff_t donor_page_offset,
 249                     int data_offset_in_page,
 250                     int block_len_in_page, int unwritten, int *err)
 251{
 252        struct inode *orig_inode = file_inode(o_filp);
 253        struct page *pagep[2] = {NULL, NULL};
 254        handle_t *handle;
 255        ext4_lblk_t orig_blk_offset, donor_blk_offset;
 256        unsigned long blocksize = orig_inode->i_sb->s_blocksize;
 257        unsigned int tmp_data_size, data_size, replaced_size;
 258        int i, err2, jblocks, retries = 0;
 259        int replaced_count = 0;
 260        int from = data_offset_in_page << orig_inode->i_blkbits;
 261        int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
 262        struct super_block *sb = orig_inode->i_sb;
 263        struct buffer_head *bh = NULL;
 264
 265        /*
 266         * It needs twice the amount of ordinary journal buffers because
 267         * inode and donor_inode may change each different metadata blocks.
 268         */
 269again:
 270        *err = 0;
 271        jblocks = ext4_writepage_trans_blocks(orig_inode) * 2;
 272        handle = ext4_journal_start(orig_inode, EXT4_HT_MOVE_EXTENTS, jblocks);
 273        if (IS_ERR(handle)) {
 274                *err = PTR_ERR(handle);
 275                return 0;
 276        }
 277
 278        orig_blk_offset = orig_page_offset * blocks_per_page +
 279                data_offset_in_page;
 280
 281        donor_blk_offset = donor_page_offset * blocks_per_page +
 282                data_offset_in_page;
 283
 284        /* Calculate data_size */
 285        if ((orig_blk_offset + block_len_in_page - 1) ==
 286            ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
 287                /* Replace the last block */
 288                tmp_data_size = orig_inode->i_size & (blocksize - 1);
 289                /*
 290                 * If data_size equal zero, it shows data_size is multiples of
 291                 * blocksize. So we set appropriate value.
 292                 */
 293                if (tmp_data_size == 0)
 294                        tmp_data_size = blocksize;
 295
 296                data_size = tmp_data_size +
 297                        ((block_len_in_page - 1) << orig_inode->i_blkbits);
 298        } else
 299                data_size = block_len_in_page << orig_inode->i_blkbits;
 300
 301        replaced_size = data_size;
 302
 303        *err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
 304                                     donor_page_offset, pagep);
 305        if (unlikely(*err < 0))
 306                goto stop_journal;
 307        /*
 308         * If orig extent was unwritten it can become initialized
 309         * at any time after i_data_sem was dropped, in order to
 310         * serialize with delalloc we have recheck extent while we
 311         * hold page's lock, if it is still the case data copy is not
 312         * necessary, just swap data blocks between orig and donor.
 313         */
 314        if (unwritten) {
 315                ext4_double_down_write_data_sem(orig_inode, donor_inode);
 316                /* If any of extents in range became initialized we have to
 317                 * fallback to data copying */
 318                unwritten = mext_check_coverage(orig_inode, orig_blk_offset,
 319                                                block_len_in_page, 1, err);
 320                if (*err)
 321                        goto drop_data_sem;
 322
 323                unwritten &= mext_check_coverage(donor_inode, donor_blk_offset,
 324                                                 block_len_in_page, 1, err);
 325                if (*err)
 326                        goto drop_data_sem;
 327
 328                if (!unwritten) {
 329                        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 330                        goto data_copy;
 331                }
 332                if ((page_has_private(pagep[0]) &&
 333                     !try_to_release_page(pagep[0], 0)) ||
 334                    (page_has_private(pagep[1]) &&
 335                     !try_to_release_page(pagep[1], 0))) {
 336                        *err = -EBUSY;
 337                        goto drop_data_sem;
 338                }
 339                replaced_count = ext4_swap_extents(handle, orig_inode,
 340                                                   donor_inode, orig_blk_offset,
 341                                                   donor_blk_offset,
 342                                                   block_len_in_page, 1, err);
 343        drop_data_sem:
 344                ext4_double_up_write_data_sem(orig_inode, donor_inode);
 345                goto unlock_pages;
 346        }
 347data_copy:
 348        *err = mext_page_mkuptodate(pagep[0], from, from + replaced_size);
 349        if (*err)
 350                goto unlock_pages;
 351
 352        /* At this point all buffers in range are uptodate, old mapping layout
 353         * is no longer required, try to drop it now. */
 354        if ((page_has_private(pagep[0]) && !try_to_release_page(pagep[0], 0)) ||
 355            (page_has_private(pagep[1]) && !try_to_release_page(pagep[1], 0))) {
 356                *err = -EBUSY;
 357                goto unlock_pages;
 358        }
 359        ext4_double_down_write_data_sem(orig_inode, donor_inode);
 360        replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode,
 361                                               orig_blk_offset, donor_blk_offset,
 362                                           block_len_in_page, 1, err);
 363        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 364        if (*err) {
 365                if (replaced_count) {
 366                        block_len_in_page = replaced_count;
 367                        replaced_size =
 368                                block_len_in_page << orig_inode->i_blkbits;
 369                } else
 370                        goto unlock_pages;
 371        }
 372        /* Perform all necessary steps similar write_begin()/write_end()
 373         * but keeping in mind that i_size will not change */
 374        if (!page_has_buffers(pagep[0]))
 375                create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
 376        bh = page_buffers(pagep[0]);
 377        for (i = 0; i < data_offset_in_page; i++)
 378                bh = bh->b_this_page;
 379        for (i = 0; i < block_len_in_page; i++) {
 380                *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
 381                if (*err < 0)
 382                        break;
 383                bh = bh->b_this_page;
 384        }
 385        if (!*err)
 386                *err = block_commit_write(pagep[0], from, from + replaced_size);
 387
 388        if (unlikely(*err < 0))
 389                goto repair_branches;
 390
 391        /* Even in case of data=writeback it is reasonable to pin
 392         * inode to transaction, to prevent unexpected data loss */
 393        *err = ext4_jbd2_inode_add_write(handle, orig_inode,
 394                        (loff_t)orig_page_offset << PAGE_SHIFT, replaced_size);
 395
 396unlock_pages:
 397        unlock_page(pagep[0]);
 398        put_page(pagep[0]);
 399        unlock_page(pagep[1]);
 400        put_page(pagep[1]);
 401stop_journal:
 402        ext4_journal_stop(handle);
 403        if (*err == -ENOSPC &&
 404            ext4_should_retry_alloc(sb, &retries))
 405                goto again;
 406        /* Buffer was busy because probably is pinned to journal transaction,
 407         * force transaction commit may help to free it. */
 408        if (*err == -EBUSY && retries++ < 4 && EXT4_SB(sb)->s_journal &&
 409            jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal))
 410                goto again;
 411        return replaced_count;
 412
 413repair_branches:
 414        /*
 415         * This should never ever happen!
 416         * Extents are swapped already, but we are not able to copy data.
 417         * Try to swap extents to it's original places
 418         */
 419        ext4_double_down_write_data_sem(orig_inode, donor_inode);
 420        replaced_count = ext4_swap_extents(handle, donor_inode, orig_inode,
 421                                               orig_blk_offset, donor_blk_offset,
 422                                           block_len_in_page, 0, &err2);
 423        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 424        if (replaced_count != block_len_in_page) {
 425                ext4_error_inode_block(orig_inode, (sector_t)(orig_blk_offset),
 426                                       EIO, "Unable to copy data block,"
 427                                       " data will be lost.");
 428                *err = -EIO;
 429        }
 430        replaced_count = 0;
 431        goto unlock_pages;
 432}
 433
 434/**
 435 * mext_check_arguments - Check whether move extent can be done
 436 *
 437 * @orig_inode:         original inode
 438 * @donor_inode:        donor inode
 439 * @orig_start:         logical start offset in block for orig
 440 * @donor_start:        logical start offset in block for donor
 441 * @len:                the number of blocks to be moved
 442 *
 443 * Check the arguments of ext4_move_extents() whether the files can be
 444 * exchanged with each other.
 445 * Return 0 on success, or a negative error value on failure.
 446 */
 447static int
 448mext_check_arguments(struct inode *orig_inode,
 449                     struct inode *donor_inode, __u64 orig_start,
 450                     __u64 donor_start, __u64 *len)
 451{
 452        __u64 orig_eof, donor_eof;
 453        unsigned int blkbits = orig_inode->i_blkbits;
 454        unsigned int blocksize = 1 << blkbits;
 455
 456        orig_eof = (i_size_read(orig_inode) + blocksize - 1) >> blkbits;
 457        donor_eof = (i_size_read(donor_inode) + blocksize - 1) >> blkbits;
 458
 459
 460        if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
 461                ext4_debug("ext4 move extent: suid or sgid is set"
 462                           " to donor file [ino:orig %lu, donor %lu]\n",
 463                           orig_inode->i_ino, donor_inode->i_ino);
 464                return -EINVAL;
 465        }
 466
 467        if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
 468                return -EPERM;
 469
 470        /* Ext4 move extent does not support swapfile */
 471        if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
 472                ext4_debug("ext4 move extent: The argument files should "
 473                        "not be swapfile [ino:orig %lu, donor %lu]\n",
 474                        orig_inode->i_ino, donor_inode->i_ino);
 475                return -EBUSY;
 476        }
 477
 478        if (ext4_is_quota_file(orig_inode) && ext4_is_quota_file(donor_inode)) {
 479                ext4_debug("ext4 move extent: The argument files should "
 480                        "not be quota files [ino:orig %lu, donor %lu]\n",
 481                        orig_inode->i_ino, donor_inode->i_ino);
 482                return -EBUSY;
 483        }
 484
 485        /* Ext4 move extent supports only extent based file */
 486        if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
 487                ext4_debug("ext4 move extent: orig file is not extents "
 488                        "based file [ino:orig %lu]\n", orig_inode->i_ino);
 489                return -EOPNOTSUPP;
 490        } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
 491                ext4_debug("ext4 move extent: donor file is not extents "
 492                        "based file [ino:donor %lu]\n", donor_inode->i_ino);
 493                return -EOPNOTSUPP;
 494        }
 495
 496        if ((!orig_inode->i_size) || (!donor_inode->i_size)) {
 497                ext4_debug("ext4 move extent: File size is 0 byte\n");
 498                return -EINVAL;
 499        }
 500
 501        /* Start offset should be same */
 502        if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
 503            (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
 504                ext4_debug("ext4 move extent: orig and donor's start "
 505                        "offsets are not aligned [ino:orig %lu, donor %lu]\n",
 506                        orig_inode->i_ino, donor_inode->i_ino);
 507                return -EINVAL;
 508        }
 509
 510        if ((orig_start >= EXT_MAX_BLOCKS) ||
 511            (donor_start >= EXT_MAX_BLOCKS) ||
 512            (*len > EXT_MAX_BLOCKS) ||
 513            (donor_start + *len >= EXT_MAX_BLOCKS) ||
 514            (orig_start + *len >= EXT_MAX_BLOCKS))  {
 515                ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
 516                        "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
 517                        orig_inode->i_ino, donor_inode->i_ino);
 518                return -EINVAL;
 519        }
 520        if (orig_eof <= orig_start)
 521                *len = 0;
 522        else if (orig_eof < orig_start + *len - 1)
 523                *len = orig_eof - orig_start;
 524        if (donor_eof <= donor_start)
 525                *len = 0;
 526        else if (donor_eof < donor_start + *len - 1)
 527                *len = donor_eof - donor_start;
 528        if (!*len) {
 529                ext4_debug("ext4 move extent: len should not be 0 "
 530                        "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino,
 531                        donor_inode->i_ino);
 532                return -EINVAL;
 533        }
 534
 535        return 0;
 536}
 537
 538/**
 539 * ext4_move_extents - Exchange the specified range of a file
 540 *
 541 * @o_filp:             file structure of the original file
 542 * @d_filp:             file structure of the donor file
 543 * @orig_blk:           start offset in block for orig
 544 * @donor_blk:          start offset in block for donor
 545 * @len:                the number of blocks to be moved
 546 * @moved_len:          moved block length
 547 *
 548 * This function returns 0 and moved block length is set in moved_len
 549 * if succeed, otherwise returns error value.
 550 *
 551 */
 552int
 553ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
 554                  __u64 donor_blk, __u64 len, __u64 *moved_len)
 555{
 556        struct inode *orig_inode = file_inode(o_filp);
 557        struct inode *donor_inode = file_inode(d_filp);
 558        struct ext4_ext_path *path = NULL;
 559        int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
 560        ext4_lblk_t o_end, o_start = orig_blk;
 561        ext4_lblk_t d_start = donor_blk;
 562        int ret;
 563
 564        if (orig_inode->i_sb != donor_inode->i_sb) {
 565                ext4_debug("ext4 move extent: The argument files "
 566                        "should be in same FS [ino:orig %lu, donor %lu]\n",
 567                        orig_inode->i_ino, donor_inode->i_ino);
 568                return -EINVAL;
 569        }
 570
 571        /* orig and donor should be different inodes */
 572        if (orig_inode == donor_inode) {
 573                ext4_debug("ext4 move extent: The argument files should not "
 574                        "be same inode [ino:orig %lu, donor %lu]\n",
 575                        orig_inode->i_ino, donor_inode->i_ino);
 576                return -EINVAL;
 577        }
 578
 579        /* Regular file check */
 580        if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
 581                ext4_debug("ext4 move extent: The argument files should be "
 582                        "regular file [ino:orig %lu, donor %lu]\n",
 583                        orig_inode->i_ino, donor_inode->i_ino);
 584                return -EINVAL;
 585        }
 586
 587        /* TODO: it's not obvious how to swap blocks for inodes with full
 588           journaling enabled */
 589        if (ext4_should_journal_data(orig_inode) ||
 590            ext4_should_journal_data(donor_inode)) {
 591                ext4_msg(orig_inode->i_sb, KERN_ERR,
 592                         "Online defrag not supported with data journaling");
 593                return -EOPNOTSUPP;
 594        }
 595
 596        if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) {
 597                ext4_msg(orig_inode->i_sb, KERN_ERR,
 598                         "Online defrag not supported for encrypted files");
 599                return -EOPNOTSUPP;
 600        }
 601
 602        /* Protect orig and donor inodes against a truncate */
 603        lock_two_nondirectories(orig_inode, donor_inode);
 604
 605        /* Wait for all existing dio workers */
 606        inode_dio_wait(orig_inode);
 607        inode_dio_wait(donor_inode);
 608
 609        /* Protect extent tree against block allocations via delalloc */
 610        ext4_double_down_write_data_sem(orig_inode, donor_inode);
 611        /* Check the filesystem environment whether move_extent can be done */
 612        ret = mext_check_arguments(orig_inode, donor_inode, orig_blk,
 613                                    donor_blk, &len);
 614        if (ret)
 615                goto out;
 616        o_end = o_start + len;
 617
 618        while (o_start < o_end) {
 619                struct ext4_extent *ex;
 620                ext4_lblk_t cur_blk, next_blk;
 621                pgoff_t orig_page_index, donor_page_index;
 622                int offset_in_page;
 623                int unwritten, cur_len;
 624
 625                ret = get_ext_path(orig_inode, o_start, &path);
 626                if (ret)
 627                        goto out;
 628                ex = path[path->p_depth].p_ext;
 629                next_blk = ext4_ext_next_allocated_block(path);
 630                cur_blk = le32_to_cpu(ex->ee_block);
 631                cur_len = ext4_ext_get_actual_len(ex);
 632                /* Check hole before the start pos */
 633                if (cur_blk + cur_len - 1 < o_start) {
 634                        if (next_blk == EXT_MAX_BLOCKS) {
 635                                o_start = o_end;
 636                                ret = -ENODATA;
 637                                goto out;
 638                        }
 639                        d_start += next_blk - o_start;
 640                        o_start = next_blk;
 641                        continue;
 642                /* Check hole after the start pos */
 643                } else if (cur_blk > o_start) {
 644                        /* Skip hole */
 645                        d_start += cur_blk - o_start;
 646                        o_start = cur_blk;
 647                        /* Extent inside requested range ?*/
 648                        if (cur_blk >= o_end)
 649                                goto out;
 650                } else { /* in_range(o_start, o_blk, o_len) */
 651                        cur_len += cur_blk - o_start;
 652                }
 653                unwritten = ext4_ext_is_unwritten(ex);
 654                if (o_end - o_start < cur_len)
 655                        cur_len = o_end - o_start;
 656
 657                orig_page_index = o_start >> (PAGE_SHIFT -
 658                                               orig_inode->i_blkbits);
 659                donor_page_index = d_start >> (PAGE_SHIFT -
 660                                               donor_inode->i_blkbits);
 661                offset_in_page = o_start % blocks_per_page;
 662                if (cur_len > blocks_per_page- offset_in_page)
 663                        cur_len = blocks_per_page - offset_in_page;
 664                /*
 665                 * Up semaphore to avoid following problems:
 666                 * a. transaction deadlock among ext4_journal_start,
 667                 *    ->write_begin via pagefault, and jbd2_journal_commit
 668                 * b. racing with ->readpage, ->write_begin, and ext4_get_block
 669                 *    in move_extent_per_page
 670                 */
 671                ext4_double_up_write_data_sem(orig_inode, donor_inode);
 672                /* Swap original branches with new branches */
 673                move_extent_per_page(o_filp, donor_inode,
 674                                     orig_page_index, donor_page_index,
 675                                     offset_in_page, cur_len,
 676                                     unwritten, &ret);
 677                ext4_double_down_write_data_sem(orig_inode, donor_inode);
 678                if (ret < 0)
 679                        break;
 680                o_start += cur_len;
 681                d_start += cur_len;
 682        }
 683        *moved_len = o_start - orig_blk;
 684        if (*moved_len > len)
 685                *moved_len = len;
 686
 687out:
 688        if (*moved_len) {
 689                ext4_discard_preallocations(orig_inode);
 690                ext4_discard_preallocations(donor_inode);
 691        }
 692
 693        ext4_ext_drop_refs(path);
 694        kfree(path);
 695        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 696        unlock_two_nondirectories(orig_inode, donor_inode);
 697
 698        return ret;
 699}
 700