linux/fs/ext4/move_extent.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2008,2009 NEC Software Tohoku, Ltd.
   3 * Written by Takashi Sato <t-sato@yk.jp.nec.com>
   4 *            Akira Fujita <a-fujita@rs.jp.nec.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of version 2.1 of the GNU Lesser General Public License
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 */
  15
  16#include <linux/fs.h>
  17#include <linux/quotaops.h>
  18#include <linux/slab.h>
  19#include "ext4_jbd2.h"
  20#include "ext4.h"
  21#include "ext4_extents.h"
  22
  23/**
  24 * get_ext_path - Find an extent path for designated logical block number.
  25 *
  26 * @inode:      an inode which is searched
  27 * @lblock:     logical block number to find an extent path
  28 * @path:       pointer to an extent path pointer (for output)
  29 *
  30 * ext4_find_extent wrapper. Return 0 on success, or a negative error value
  31 * on failure.
  32 */
  33static inline int
  34get_ext_path(struct inode *inode, ext4_lblk_t lblock,
  35                struct ext4_ext_path **ppath)
  36{
  37        struct ext4_ext_path *path;
  38
  39        path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE);
  40        if (IS_ERR(path))
  41                return PTR_ERR(path);
  42        if (path[ext_depth(inode)].p_ext == NULL) {
  43                ext4_ext_drop_refs(path);
  44                kfree(path);
  45                *ppath = NULL;
  46                return -ENODATA;
  47        }
  48        *ppath = path;
  49        return 0;
  50}
  51
  52/**
  53 * ext4_double_down_write_data_sem - Acquire two inodes' write lock
  54 *                                   of i_data_sem
  55 *
  56 * Acquire write lock of i_data_sem of the two inodes
  57 */
  58void
  59ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
  60{
  61        if (first < second) {
  62                down_write(&EXT4_I(first)->i_data_sem);
  63                down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
  64        } else {
  65                down_write(&EXT4_I(second)->i_data_sem);
  66                down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
  67
  68        }
  69}
  70
  71/**
  72 * ext4_double_up_write_data_sem - Release two inodes' write lock of i_data_sem
  73 *
  74 * @orig_inode:         original inode structure to be released its lock first
  75 * @donor_inode:        donor inode structure to be released its lock second
  76 * Release write lock of i_data_sem of two inodes (orig and donor).
  77 */
  78void
  79ext4_double_up_write_data_sem(struct inode *orig_inode,
  80                              struct inode *donor_inode)
  81{
  82        up_write(&EXT4_I(orig_inode)->i_data_sem);
  83        up_write(&EXT4_I(donor_inode)->i_data_sem);
  84}
  85
  86/**
  87 * mext_check_coverage - Check that all extents in range has the same type
  88 *
  89 * @inode:              inode in question
  90 * @from:               block offset of inode
  91 * @count:              block count to be checked
  92 * @unwritten:          extents expected to be unwritten
  93 * @err:                pointer to save error value
  94 *
  95 * Return 1 if all extents in range has expected type, and zero otherwise.
  96 */
  97static int
  98mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
  99                    int unwritten, int *err)
 100{
 101        struct ext4_ext_path *path = NULL;
 102        struct ext4_extent *ext;
 103        int ret = 0;
 104        ext4_lblk_t last = from + count;
 105        while (from < last) {
 106                *err = get_ext_path(inode, from, &path);
 107                if (*err)
 108                        goto out;
 109                ext = path[ext_depth(inode)].p_ext;
 110                if (unwritten != ext4_ext_is_unwritten(ext))
 111                        goto out;
 112                from += ext4_ext_get_actual_len(ext);
 113                ext4_ext_drop_refs(path);
 114        }
 115        ret = 1;
 116out:
 117        ext4_ext_drop_refs(path);
 118        kfree(path);
 119        return ret;
 120}
 121
 122/**
 123 * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2
 124 *
 125 * @inode1:     the inode structure
 126 * @inode2:     the inode structure
 127 * @index1:     page index
 128 * @index2:     page index
 129 * @page:       result page vector
 130 *
 131 * Grab two locked pages for inode's by inode order
 132 */
 133static int
 134mext_page_double_lock(struct inode *inode1, struct inode *inode2,
 135                      pgoff_t index1, pgoff_t index2, struct page *page[2])
 136{
 137        struct address_space *mapping[2];
 138        unsigned fl = AOP_FLAG_NOFS;
 139
 140        BUG_ON(!inode1 || !inode2);
 141        if (inode1 < inode2) {
 142                mapping[0] = inode1->i_mapping;
 143                mapping[1] = inode2->i_mapping;
 144        } else {
 145                pgoff_t tmp = index1;
 146                index1 = index2;
 147                index2 = tmp;
 148                mapping[0] = inode2->i_mapping;
 149                mapping[1] = inode1->i_mapping;
 150        }
 151
 152        page[0] = grab_cache_page_write_begin(mapping[0], index1, fl);
 153        if (!page[0])
 154                return -ENOMEM;
 155
 156        page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
 157        if (!page[1]) {
 158                unlock_page(page[0]);
 159                put_page(page[0]);
 160                return -ENOMEM;
 161        }
 162        /*
 163         * grab_cache_page_write_begin() may not wait on page's writeback if
 164         * BDI not demand that. But it is reasonable to be very conservative
 165         * here and explicitly wait on page's writeback
 166         */
 167        wait_on_page_writeback(page[0]);
 168        wait_on_page_writeback(page[1]);
 169        if (inode1 > inode2)
 170                swap(page[0], page[1]);
 171
 172        return 0;
 173}
 174
 175/* Force page buffers uptodate w/o dropping page's lock */
 176static int
 177mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
 178{
 179        struct inode *inode = page->mapping->host;
 180        sector_t block;
 181        struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
 182        unsigned int blocksize, block_start, block_end;
 183        int i, err,  nr = 0, partial = 0;
 184        BUG_ON(!PageLocked(page));
 185        BUG_ON(PageWriteback(page));
 186
 187        if (PageUptodate(page))
 188                return 0;
 189
 190        blocksize = 1 << inode->i_blkbits;
 191        if (!page_has_buffers(page))
 192                create_empty_buffers(page, blocksize, 0);
 193
 194        head = page_buffers(page);
 195        block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
 196        for (bh = head, block_start = 0; bh != head || !block_start;
 197             block++, block_start = block_end, bh = bh->b_this_page) {
 198                block_end = block_start + blocksize;
 199                if (block_end <= from || block_start >= to) {
 200                        if (!buffer_uptodate(bh))
 201                                partial = 1;
 202                        continue;
 203                }
 204                if (buffer_uptodate(bh))
 205                        continue;
 206                if (!buffer_mapped(bh)) {
 207                        err = ext4_get_block(inode, block, bh, 0);
 208                        if (err) {
 209                                SetPageError(page);
 210                                return err;
 211                        }
 212                        if (!buffer_mapped(bh)) {
 213                                zero_user(page, block_start, blocksize);
 214                                set_buffer_uptodate(bh);
 215                                continue;
 216                        }
 217                }
 218                BUG_ON(nr >= MAX_BUF_PER_PAGE);
 219                arr[nr++] = bh;
 220        }
 221        /* No io required */
 222        if (!nr)
 223                goto out;
 224
 225        for (i = 0; i < nr; i++) {
 226                bh = arr[i];
 227                if (!bh_uptodate_or_lock(bh)) {
 228                        err = bh_submit_read(bh);
 229                        if (err)
 230                                return err;
 231                }
 232        }
 233out:
 234        if (!partial)
 235                SetPageUptodate(page);
 236        return 0;
 237}
 238
 239/**
 240 * move_extent_per_page - Move extent data per page
 241 *
 242 * @o_filp:                     file structure of original file
 243 * @donor_inode:                donor inode
 244 * @orig_page_offset:           page index on original file
 245 * @donor_page_offset:          page index on donor file
 246 * @data_offset_in_page:        block index where data swapping starts
 247 * @block_len_in_page:          the number of blocks to be swapped
 248 * @unwritten:                  orig extent is unwritten or not
 249 * @err:                        pointer to save return value
 250 *
 251 * Save the data in original inode blocks and replace original inode extents
 252 * with donor inode extents by calling ext4_swap_extents().
 253 * Finally, write out the saved data in new original inode blocks. Return
 254 * replaced block count.
 255 */
 256static int
 257move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 258                     pgoff_t orig_page_offset, pgoff_t donor_page_offset,
 259                     int data_offset_in_page,
 260                     int block_len_in_page, int unwritten, int *err)
 261{
 262        struct inode *orig_inode = file_inode(o_filp);
 263        struct page *pagep[2] = {NULL, NULL};
 264        handle_t *handle;
 265        ext4_lblk_t orig_blk_offset, donor_blk_offset;
 266        unsigned long blocksize = orig_inode->i_sb->s_blocksize;
 267        unsigned int tmp_data_size, data_size, replaced_size;
 268        int i, err2, jblocks, retries = 0;
 269        int replaced_count = 0;
 270        int from = data_offset_in_page << orig_inode->i_blkbits;
 271        int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
 272        struct super_block *sb = orig_inode->i_sb;
 273        struct buffer_head *bh = NULL;
 274
 275        /*
 276         * It needs twice the amount of ordinary journal buffers because
 277         * inode and donor_inode may change each different metadata blocks.
 278         */
 279again:
 280        *err = 0;
 281        jblocks = ext4_writepage_trans_blocks(orig_inode) * 2;
 282        handle = ext4_journal_start(orig_inode, EXT4_HT_MOVE_EXTENTS, jblocks);
 283        if (IS_ERR(handle)) {
 284                *err = PTR_ERR(handle);
 285                return 0;
 286        }
 287
 288        orig_blk_offset = orig_page_offset * blocks_per_page +
 289                data_offset_in_page;
 290
 291        donor_blk_offset = donor_page_offset * blocks_per_page +
 292                data_offset_in_page;
 293
 294        /* Calculate data_size */
 295        if ((orig_blk_offset + block_len_in_page - 1) ==
 296            ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
 297                /* Replace the last block */
 298                tmp_data_size = orig_inode->i_size & (blocksize - 1);
 299                /*
 300                 * If data_size equal zero, it shows data_size is multiples of
 301                 * blocksize. So we set appropriate value.
 302                 */
 303                if (tmp_data_size == 0)
 304                        tmp_data_size = blocksize;
 305
 306                data_size = tmp_data_size +
 307                        ((block_len_in_page - 1) << orig_inode->i_blkbits);
 308        } else
 309                data_size = block_len_in_page << orig_inode->i_blkbits;
 310
 311        replaced_size = data_size;
 312
 313        *err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
 314                                     donor_page_offset, pagep);
 315        if (unlikely(*err < 0))
 316                goto stop_journal;
 317        /*
 318         * If orig extent was unwritten it can become initialized
 319         * at any time after i_data_sem was dropped, in order to
 320         * serialize with delalloc we have recheck extent while we
 321         * hold page's lock, if it is still the case data copy is not
 322         * necessary, just swap data blocks between orig and donor.
 323         */
 324        if (unwritten) {
 325                ext4_double_down_write_data_sem(orig_inode, donor_inode);
 326                /* If any of extents in range became initialized we have to
 327                 * fallback to data copying */
 328                unwritten = mext_check_coverage(orig_inode, orig_blk_offset,
 329                                                block_len_in_page, 1, err);
 330                if (*err)
 331                        goto drop_data_sem;
 332
 333                unwritten &= mext_check_coverage(donor_inode, donor_blk_offset,
 334                                                 block_len_in_page, 1, err);
 335                if (*err)
 336                        goto drop_data_sem;
 337
 338                if (!unwritten) {
 339                        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 340                        goto data_copy;
 341                }
 342                if ((page_has_private(pagep[0]) &&
 343                     !try_to_release_page(pagep[0], 0)) ||
 344                    (page_has_private(pagep[1]) &&
 345                     !try_to_release_page(pagep[1], 0))) {
 346                        *err = -EBUSY;
 347                        goto drop_data_sem;
 348                }
 349                replaced_count = ext4_swap_extents(handle, orig_inode,
 350                                                   donor_inode, orig_blk_offset,
 351                                                   donor_blk_offset,
 352                                                   block_len_in_page, 1, err);
 353        drop_data_sem:
 354                ext4_double_up_write_data_sem(orig_inode, donor_inode);
 355                goto unlock_pages;
 356        }
 357data_copy:
 358        *err = mext_page_mkuptodate(pagep[0], from, from + replaced_size);
 359        if (*err)
 360                goto unlock_pages;
 361
 362        /* At this point all buffers in range are uptodate, old mapping layout
 363         * is no longer required, try to drop it now. */
 364        if ((page_has_private(pagep[0]) && !try_to_release_page(pagep[0], 0)) ||
 365            (page_has_private(pagep[1]) && !try_to_release_page(pagep[1], 0))) {
 366                *err = -EBUSY;
 367                goto unlock_pages;
 368        }
 369        ext4_double_down_write_data_sem(orig_inode, donor_inode);
 370        replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode,
 371                                               orig_blk_offset, donor_blk_offset,
 372                                           block_len_in_page, 1, err);
 373        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 374        if (*err) {
 375                if (replaced_count) {
 376                        block_len_in_page = replaced_count;
 377                        replaced_size =
 378                                block_len_in_page << orig_inode->i_blkbits;
 379                } else
 380                        goto unlock_pages;
 381        }
 382        /* Perform all necessary steps similar write_begin()/write_end()
 383         * but keeping in mind that i_size will not change */
 384        if (!page_has_buffers(pagep[0]))
 385                create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
 386        bh = page_buffers(pagep[0]);
 387        for (i = 0; i < data_offset_in_page; i++)
 388                bh = bh->b_this_page;
 389        for (i = 0; i < block_len_in_page; i++) {
 390                *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
 391                if (*err < 0)
 392                        break;
 393                bh = bh->b_this_page;
 394        }
 395        if (!*err)
 396                *err = block_commit_write(pagep[0], from, from + replaced_size);
 397
 398        if (unlikely(*err < 0))
 399                goto repair_branches;
 400
 401        /* Even in case of data=writeback it is reasonable to pin
 402         * inode to transaction, to prevent unexpected data loss */
 403        *err = ext4_jbd2_inode_add_write(handle, orig_inode);
 404
 405unlock_pages:
 406        unlock_page(pagep[0]);
 407        put_page(pagep[0]);
 408        unlock_page(pagep[1]);
 409        put_page(pagep[1]);
 410stop_journal:
 411        ext4_journal_stop(handle);
 412        if (*err == -ENOSPC &&
 413            ext4_should_retry_alloc(sb, &retries))
 414                goto again;
 415        /* Buffer was busy because probably is pinned to journal transaction,
 416         * force transaction commit may help to free it. */
 417        if (*err == -EBUSY && retries++ < 4 && EXT4_SB(sb)->s_journal &&
 418            jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal))
 419                goto again;
 420        return replaced_count;
 421
 422repair_branches:
 423        /*
 424         * This should never ever happen!
 425         * Extents are swapped already, but we are not able to copy data.
 426         * Try to swap extents to it's original places
 427         */
 428        ext4_double_down_write_data_sem(orig_inode, donor_inode);
 429        replaced_count = ext4_swap_extents(handle, donor_inode, orig_inode,
 430                                               orig_blk_offset, donor_blk_offset,
 431                                           block_len_in_page, 0, &err2);
 432        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 433        if (replaced_count != block_len_in_page) {
 434                EXT4_ERROR_INODE_BLOCK(orig_inode, (sector_t)(orig_blk_offset),
 435                                       "Unable to copy data block,"
 436                                       " data will be lost.");
 437                *err = -EIO;
 438        }
 439        replaced_count = 0;
 440        goto unlock_pages;
 441}
 442
 443/**
 444 * mext_check_arguments - Check whether move extent can be done
 445 *
 446 * @orig_inode:         original inode
 447 * @donor_inode:        donor inode
 448 * @orig_start:         logical start offset in block for orig
 449 * @donor_start:        logical start offset in block for donor
 450 * @len:                the number of blocks to be moved
 451 *
 452 * Check the arguments of ext4_move_extents() whether the files can be
 453 * exchanged with each other.
 454 * Return 0 on success, or a negative error value on failure.
 455 */
 456static int
 457mext_check_arguments(struct inode *orig_inode,
 458                     struct inode *donor_inode, __u64 orig_start,
 459                     __u64 donor_start, __u64 *len)
 460{
 461        __u64 orig_eof, donor_eof;
 462        unsigned int blkbits = orig_inode->i_blkbits;
 463        unsigned int blocksize = 1 << blkbits;
 464
 465        orig_eof = (i_size_read(orig_inode) + blocksize - 1) >> blkbits;
 466        donor_eof = (i_size_read(donor_inode) + blocksize - 1) >> blkbits;
 467
 468
 469        if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
 470                ext4_debug("ext4 move extent: suid or sgid is set"
 471                           " to donor file [ino:orig %lu, donor %lu]\n",
 472                           orig_inode->i_ino, donor_inode->i_ino);
 473                return -EINVAL;
 474        }
 475
 476        if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
 477                return -EPERM;
 478
 479        /* Ext4 move extent does not support swapfile */
 480        if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
 481                ext4_debug("ext4 move extent: The argument files should "
 482                        "not be swapfile [ino:orig %lu, donor %lu]\n",
 483                        orig_inode->i_ino, donor_inode->i_ino);
 484                return -EBUSY;
 485        }
 486
 487        if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
 488                ext4_debug("ext4 move extent: The argument files should "
 489                        "not be quota files [ino:orig %lu, donor %lu]\n",
 490                        orig_inode->i_ino, donor_inode->i_ino);
 491                return -EBUSY;
 492        }
 493
 494        /* Ext4 move extent supports only extent based file */
 495        if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
 496                ext4_debug("ext4 move extent: orig file is not extents "
 497                        "based file [ino:orig %lu]\n", orig_inode->i_ino);
 498                return -EOPNOTSUPP;
 499        } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
 500                ext4_debug("ext4 move extent: donor file is not extents "
 501                        "based file [ino:donor %lu]\n", donor_inode->i_ino);
 502                return -EOPNOTSUPP;
 503        }
 504
 505        if ((!orig_inode->i_size) || (!donor_inode->i_size)) {
 506                ext4_debug("ext4 move extent: File size is 0 byte\n");
 507                return -EINVAL;
 508        }
 509
 510        /* Start offset should be same */
 511        if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
 512            (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
 513                ext4_debug("ext4 move extent: orig and donor's start "
 514                        "offset are not alligned [ino:orig %lu, donor %lu]\n",
 515                        orig_inode->i_ino, donor_inode->i_ino);
 516                return -EINVAL;
 517        }
 518
 519        if ((orig_start >= EXT_MAX_BLOCKS) ||
 520            (donor_start >= EXT_MAX_BLOCKS) ||
 521            (*len > EXT_MAX_BLOCKS) ||
 522            (donor_start + *len >= EXT_MAX_BLOCKS) ||
 523            (orig_start + *len >= EXT_MAX_BLOCKS))  {
 524                ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
 525                        "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
 526                        orig_inode->i_ino, donor_inode->i_ino);
 527                return -EINVAL;
 528        }
 529        if (orig_eof < orig_start + *len - 1)
 530                *len = orig_eof - orig_start;
 531        if (donor_eof < donor_start + *len - 1)
 532                *len = donor_eof - donor_start;
 533        if (!*len) {
 534                ext4_debug("ext4 move extent: len should not be 0 "
 535                        "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino,
 536                        donor_inode->i_ino);
 537                return -EINVAL;
 538        }
 539
 540        return 0;
 541}
 542
 543/**
 544 * ext4_move_extents - Exchange the specified range of a file
 545 *
 546 * @o_filp:             file structure of the original file
 547 * @d_filp:             file structure of the donor file
 548 * @orig_blk:           start offset in block for orig
 549 * @donor_blk:          start offset in block for donor
 550 * @len:                the number of blocks to be moved
 551 * @moved_len:          moved block length
 552 *
 553 * This function returns 0 and moved block length is set in moved_len
 554 * if succeed, otherwise returns error value.
 555 *
 556 */
 557int
 558ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
 559                  __u64 donor_blk, __u64 len, __u64 *moved_len)
 560{
 561        struct inode *orig_inode = file_inode(o_filp);
 562        struct inode *donor_inode = file_inode(d_filp);
 563        struct ext4_ext_path *path = NULL;
 564        int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
 565        ext4_lblk_t o_end, o_start = orig_blk;
 566        ext4_lblk_t d_start = donor_blk;
 567        int ret;
 568
 569        if (orig_inode->i_sb != donor_inode->i_sb) {
 570                ext4_debug("ext4 move extent: The argument files "
 571                        "should be in same FS [ino:orig %lu, donor %lu]\n",
 572                        orig_inode->i_ino, donor_inode->i_ino);
 573                return -EINVAL;
 574        }
 575
 576        /* orig and donor should be different inodes */
 577        if (orig_inode == donor_inode) {
 578                ext4_debug("ext4 move extent: The argument files should not "
 579                        "be same inode [ino:orig %lu, donor %lu]\n",
 580                        orig_inode->i_ino, donor_inode->i_ino);
 581                return -EINVAL;
 582        }
 583
 584        /* Regular file check */
 585        if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
 586                ext4_debug("ext4 move extent: The argument files should be "
 587                        "regular file [ino:orig %lu, donor %lu]\n",
 588                        orig_inode->i_ino, donor_inode->i_ino);
 589                return -EINVAL;
 590        }
 591
 592        /* TODO: it's not obvious how to swap blocks for inodes with full
 593           journaling enabled */
 594        if (ext4_should_journal_data(orig_inode) ||
 595            ext4_should_journal_data(donor_inode)) {
 596                ext4_msg(orig_inode->i_sb, KERN_ERR,
 597                         "Online defrag not supported with data journaling");
 598                return -EOPNOTSUPP;
 599        }
 600
 601        if (ext4_encrypted_inode(orig_inode) ||
 602            ext4_encrypted_inode(donor_inode)) {
 603                ext4_msg(orig_inode->i_sb, KERN_ERR,
 604                         "Online defrag not supported for encrypted files");
 605                return -EOPNOTSUPP;
 606        }
 607
 608        /* Protect orig and donor inodes against a truncate */
 609        lock_two_nondirectories(orig_inode, donor_inode);
 610
 611        /* Wait for all existing dio workers */
 612        ext4_inode_block_unlocked_dio(orig_inode);
 613        ext4_inode_block_unlocked_dio(donor_inode);
 614        inode_dio_wait(orig_inode);
 615        inode_dio_wait(donor_inode);
 616
 617        /* Protect extent tree against block allocations via delalloc */
 618        ext4_double_down_write_data_sem(orig_inode, donor_inode);
 619        /* Check the filesystem environment whether move_extent can be done */
 620        ret = mext_check_arguments(orig_inode, donor_inode, orig_blk,
 621                                    donor_blk, &len);
 622        if (ret)
 623                goto out;
 624        o_end = o_start + len;
 625
 626        while (o_start < o_end) {
 627                struct ext4_extent *ex;
 628                ext4_lblk_t cur_blk, next_blk;
 629                pgoff_t orig_page_index, donor_page_index;
 630                int offset_in_page;
 631                int unwritten, cur_len;
 632
 633                ret = get_ext_path(orig_inode, o_start, &path);
 634                if (ret)
 635                        goto out;
 636                ex = path[path->p_depth].p_ext;
 637                next_blk = ext4_ext_next_allocated_block(path);
 638                cur_blk = le32_to_cpu(ex->ee_block);
 639                cur_len = ext4_ext_get_actual_len(ex);
 640                /* Check hole before the start pos */
 641                if (cur_blk + cur_len - 1 < o_start) {
 642                        if (next_blk == EXT_MAX_BLOCKS) {
 643                                o_start = o_end;
 644                                ret = -ENODATA;
 645                                goto out;
 646                        }
 647                        d_start += next_blk - o_start;
 648                        o_start = next_blk;
 649                        continue;
 650                /* Check hole after the start pos */
 651                } else if (cur_blk > o_start) {
 652                        /* Skip hole */
 653                        d_start += cur_blk - o_start;
 654                        o_start = cur_blk;
 655                        /* Extent inside requested range ?*/
 656                        if (cur_blk >= o_end)
 657                                goto out;
 658                } else { /* in_range(o_start, o_blk, o_len) */
 659                        cur_len += cur_blk - o_start;
 660                }
 661                unwritten = ext4_ext_is_unwritten(ex);
 662                if (o_end - o_start < cur_len)
 663                        cur_len = o_end - o_start;
 664
 665                orig_page_index = o_start >> (PAGE_SHIFT -
 666                                               orig_inode->i_blkbits);
 667                donor_page_index = d_start >> (PAGE_SHIFT -
 668                                               donor_inode->i_blkbits);
 669                offset_in_page = o_start % blocks_per_page;
 670                if (cur_len > blocks_per_page- offset_in_page)
 671                        cur_len = blocks_per_page - offset_in_page;
 672                /*
 673                 * Up semaphore to avoid following problems:
 674                 * a. transaction deadlock among ext4_journal_start,
 675                 *    ->write_begin via pagefault, and jbd2_journal_commit
 676                 * b. racing with ->readpage, ->write_begin, and ext4_get_block
 677                 *    in move_extent_per_page
 678                 */
 679                ext4_double_up_write_data_sem(orig_inode, donor_inode);
 680                /* Swap original branches with new branches */
 681                move_extent_per_page(o_filp, donor_inode,
 682                                     orig_page_index, donor_page_index,
 683                                     offset_in_page, cur_len,
 684                                     unwritten, &ret);
 685                ext4_double_down_write_data_sem(orig_inode, donor_inode);
 686                if (ret < 0)
 687                        break;
 688                o_start += cur_len;
 689                d_start += cur_len;
 690        }
 691        *moved_len = o_start - orig_blk;
 692        if (*moved_len > len)
 693                *moved_len = len;
 694
 695out:
 696        if (*moved_len) {
 697                ext4_discard_preallocations(orig_inode);
 698                ext4_discard_preallocations(donor_inode);
 699        }
 700
 701        ext4_ext_drop_refs(path);
 702        kfree(path);
 703        ext4_double_up_write_data_sem(orig_inode, donor_inode);
 704        ext4_inode_resume_unlocked_dio(orig_inode);
 705        ext4_inode_resume_unlocked_dio(donor_inode);
 706        unlock_two_nondirectories(orig_inode, donor_inode);
 707
 708        return ret;
 709}
 710