linux/fs/ext4/migrate.c
<<
>>
Prefs
   1// SPDX-License-Identifier: LGPL-2.1
   2/*
   3 * Copyright IBM Corporation, 2007
   4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
   5 *
   6 */
   7
   8#include <linux/slab.h>
   9#include "ext4_jbd2.h"
  10#include "ext4_extents.h"
  11
  12/*
  13 * The contiguous blocks details which can be
  14 * represented by a single extent
  15 */
  16struct migrate_struct {
  17        ext4_lblk_t first_block, last_block, curr_block;
  18        ext4_fsblk_t first_pblock, last_pblock;
  19};
  20
  21static int finish_range(handle_t *handle, struct inode *inode,
  22                                struct migrate_struct *lb)
  23
  24{
  25        int retval = 0, needed;
  26        struct ext4_extent newext;
  27        struct ext4_ext_path *path;
  28        if (lb->first_pblock == 0)
  29                return 0;
  30
  31        /* Add the extent to temp inode*/
  32        newext.ee_block = cpu_to_le32(lb->first_block);
  33        newext.ee_len   = cpu_to_le16(lb->last_block - lb->first_block + 1);
  34        ext4_ext_store_pblock(&newext, lb->first_pblock);
  35        /* Locking only for convinience since we are operating on temp inode */
  36        down_write(&EXT4_I(inode)->i_data_sem);
  37        path = ext4_find_extent(inode, lb->first_block, NULL, 0);
  38        if (IS_ERR(path)) {
  39                retval = PTR_ERR(path);
  40                path = NULL;
  41                goto err_out;
  42        }
  43
  44        /*
  45         * Calculate the credit needed to inserting this extent
  46         * Since we are doing this in loop we may accumalate extra
  47         * credit. But below we try to not accumalate too much
  48         * of them by restarting the journal.
  49         */
  50        needed = ext4_ext_calc_credits_for_single_extent(inode,
  51                    lb->last_block - lb->first_block + 1, path);
  52
  53        /*
  54         * Make sure the credit we accumalated is not really high
  55         */
  56        if (needed && ext4_handle_has_enough_credits(handle,
  57                                                EXT4_RESERVE_TRANS_BLOCKS)) {
  58                up_write((&EXT4_I(inode)->i_data_sem));
  59                retval = ext4_journal_restart(handle, needed);
  60                down_write((&EXT4_I(inode)->i_data_sem));
  61                if (retval)
  62                        goto err_out;
  63        } else if (needed) {
  64                retval = ext4_journal_extend(handle, needed);
  65                if (retval) {
  66                        /*
  67                         * IF not able to extend the journal restart the journal
  68                         */
  69                        up_write((&EXT4_I(inode)->i_data_sem));
  70                        retval = ext4_journal_restart(handle, needed);
  71                        down_write((&EXT4_I(inode)->i_data_sem));
  72                        if (retval)
  73                                goto err_out;
  74                }
  75        }
  76        retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
  77err_out:
  78        up_write((&EXT4_I(inode)->i_data_sem));
  79        ext4_ext_drop_refs(path);
  80        kfree(path);
  81        lb->first_pblock = 0;
  82        return retval;
  83}
  84
  85static int update_extent_range(handle_t *handle, struct inode *inode,
  86                               ext4_fsblk_t pblock, struct migrate_struct *lb)
  87{
  88        int retval;
  89        /*
  90         * See if we can add on to the existing range (if it exists)
  91         */
  92        if (lb->first_pblock &&
  93                (lb->last_pblock+1 == pblock) &&
  94                (lb->last_block+1 == lb->curr_block)) {
  95                lb->last_pblock = pblock;
  96                lb->last_block = lb->curr_block;
  97                lb->curr_block++;
  98                return 0;
  99        }
 100        /*
 101         * Start a new range.
 102         */
 103        retval = finish_range(handle, inode, lb);
 104        lb->first_pblock = lb->last_pblock = pblock;
 105        lb->first_block = lb->last_block = lb->curr_block;
 106        lb->curr_block++;
 107        return retval;
 108}
 109
 110static int update_ind_extent_range(handle_t *handle, struct inode *inode,
 111                                   ext4_fsblk_t pblock,
 112                                   struct migrate_struct *lb)
 113{
 114        struct buffer_head *bh;
 115        __le32 *i_data;
 116        int i, retval = 0;
 117        unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 118
 119        bh = sb_bread(inode->i_sb, pblock);
 120        if (!bh)
 121                return -EIO;
 122
 123        i_data = (__le32 *)bh->b_data;
 124        for (i = 0; i < max_entries; i++) {
 125                if (i_data[i]) {
 126                        retval = update_extent_range(handle, inode,
 127                                                le32_to_cpu(i_data[i]), lb);
 128                        if (retval)
 129                                break;
 130                } else {
 131                        lb->curr_block++;
 132                }
 133        }
 134        put_bh(bh);
 135        return retval;
 136
 137}
 138
 139static int update_dind_extent_range(handle_t *handle, struct inode *inode,
 140                                    ext4_fsblk_t pblock,
 141                                    struct migrate_struct *lb)
 142{
 143        struct buffer_head *bh;
 144        __le32 *i_data;
 145        int i, retval = 0;
 146        unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 147
 148        bh = sb_bread(inode->i_sb, pblock);
 149        if (!bh)
 150                return -EIO;
 151
 152        i_data = (__le32 *)bh->b_data;
 153        for (i = 0; i < max_entries; i++) {
 154                if (i_data[i]) {
 155                        retval = update_ind_extent_range(handle, inode,
 156                                                le32_to_cpu(i_data[i]), lb);
 157                        if (retval)
 158                                break;
 159                } else {
 160                        /* Only update the file block number */
 161                        lb->curr_block += max_entries;
 162                }
 163        }
 164        put_bh(bh);
 165        return retval;
 166
 167}
 168
 169static int update_tind_extent_range(handle_t *handle, struct inode *inode,
 170                                    ext4_fsblk_t pblock,
 171                                    struct migrate_struct *lb)
 172{
 173        struct buffer_head *bh;
 174        __le32 *i_data;
 175        int i, retval = 0;
 176        unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 177
 178        bh = sb_bread(inode->i_sb, pblock);
 179        if (!bh)
 180                return -EIO;
 181
 182        i_data = (__le32 *)bh->b_data;
 183        for (i = 0; i < max_entries; i++) {
 184                if (i_data[i]) {
 185                        retval = update_dind_extent_range(handle, inode,
 186                                                le32_to_cpu(i_data[i]), lb);
 187                        if (retval)
 188                                break;
 189                } else {
 190                        /* Only update the file block number */
 191                        lb->curr_block += max_entries * max_entries;
 192                }
 193        }
 194        put_bh(bh);
 195        return retval;
 196
 197}
 198
 199static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
 200{
 201        int retval = 0, needed;
 202
 203        if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
 204                return 0;
 205        /*
 206         * We are freeing a blocks. During this we touch
 207         * superblock, group descriptor and block bitmap.
 208         * So allocate a credit of 3. We may update
 209         * quota (user and group).
 210         */
 211        needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
 212
 213        if (ext4_journal_extend(handle, needed) != 0)
 214                retval = ext4_journal_restart(handle, needed);
 215
 216        return retval;
 217}
 218
 219static int free_dind_blocks(handle_t *handle,
 220                                struct inode *inode, __le32 i_data)
 221{
 222        int i;
 223        __le32 *tmp_idata;
 224        struct buffer_head *bh;
 225        unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 226
 227        bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
 228        if (!bh)
 229                return -EIO;
 230
 231        tmp_idata = (__le32 *)bh->b_data;
 232        for (i = 0; i < max_entries; i++) {
 233                if (tmp_idata[i]) {
 234                        extend_credit_for_blkdel(handle, inode);
 235                        ext4_free_blocks(handle, inode, NULL,
 236                                         le32_to_cpu(tmp_idata[i]), 1,
 237                                         EXT4_FREE_BLOCKS_METADATA |
 238                                         EXT4_FREE_BLOCKS_FORGET);
 239                }
 240        }
 241        put_bh(bh);
 242        extend_credit_for_blkdel(handle, inode);
 243        ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
 244                         EXT4_FREE_BLOCKS_METADATA |
 245                         EXT4_FREE_BLOCKS_FORGET);
 246        return 0;
 247}
 248
 249static int free_tind_blocks(handle_t *handle,
 250                                struct inode *inode, __le32 i_data)
 251{
 252        int i, retval = 0;
 253        __le32 *tmp_idata;
 254        struct buffer_head *bh;
 255        unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 256
 257        bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
 258        if (!bh)
 259                return -EIO;
 260
 261        tmp_idata = (__le32 *)bh->b_data;
 262        for (i = 0; i < max_entries; i++) {
 263                if (tmp_idata[i]) {
 264                        retval = free_dind_blocks(handle,
 265                                        inode, tmp_idata[i]);
 266                        if (retval) {
 267                                put_bh(bh);
 268                                return retval;
 269                        }
 270                }
 271        }
 272        put_bh(bh);
 273        extend_credit_for_blkdel(handle, inode);
 274        ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
 275                         EXT4_FREE_BLOCKS_METADATA |
 276                         EXT4_FREE_BLOCKS_FORGET);
 277        return 0;
 278}
 279
 280static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
 281{
 282        int retval;
 283
 284        /* ei->i_data[EXT4_IND_BLOCK] */
 285        if (i_data[0]) {
 286                extend_credit_for_blkdel(handle, inode);
 287                ext4_free_blocks(handle, inode, NULL,
 288                                le32_to_cpu(i_data[0]), 1,
 289                                 EXT4_FREE_BLOCKS_METADATA |
 290                                 EXT4_FREE_BLOCKS_FORGET);
 291        }
 292
 293        /* ei->i_data[EXT4_DIND_BLOCK] */
 294        if (i_data[1]) {
 295                retval = free_dind_blocks(handle, inode, i_data[1]);
 296                if (retval)
 297                        return retval;
 298        }
 299
 300        /* ei->i_data[EXT4_TIND_BLOCK] */
 301        if (i_data[2]) {
 302                retval = free_tind_blocks(handle, inode, i_data[2]);
 303                if (retval)
 304                        return retval;
 305        }
 306        return 0;
 307}
 308
 309static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
 310                                                struct inode *tmp_inode)
 311{
 312        int retval;
 313        __le32  i_data[3];
 314        struct ext4_inode_info *ei = EXT4_I(inode);
 315        struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
 316
 317        /*
 318         * One credit accounted for writing the
 319         * i_data field of the original inode
 320         */
 321        retval = ext4_journal_extend(handle, 1);
 322        if (retval) {
 323                retval = ext4_journal_restart(handle, 1);
 324                if (retval)
 325                        goto err_out;
 326        }
 327
 328        i_data[0] = ei->i_data[EXT4_IND_BLOCK];
 329        i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
 330        i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
 331
 332        down_write(&EXT4_I(inode)->i_data_sem);
 333        /*
 334         * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
 335         * happened after we started the migrate. We need to
 336         * fail the migrate
 337         */
 338        if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
 339                retval = -EAGAIN;
 340                up_write(&EXT4_I(inode)->i_data_sem);
 341                goto err_out;
 342        } else
 343                ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
 344        /*
 345         * We have the extent map build with the tmp inode.
 346         * Now copy the i_data across
 347         */
 348        ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
 349        memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
 350
 351        /*
 352         * Update i_blocks with the new blocks that got
 353         * allocated while adding extents for extent index
 354         * blocks.
 355         *
 356         * While converting to extents we need not
 357         * update the original inode i_blocks for extent blocks
 358         * via quota APIs. The quota update happened via tmp_inode already.
 359         */
 360        spin_lock(&inode->i_lock);
 361        inode->i_blocks += tmp_inode->i_blocks;
 362        spin_unlock(&inode->i_lock);
 363        up_write(&EXT4_I(inode)->i_data_sem);
 364
 365        /*
 366         * We mark the inode dirty after, because we decrement the
 367         * i_blocks when freeing the indirect meta-data blocks
 368         */
 369        retval = free_ind_block(handle, inode, i_data);
 370        ext4_mark_inode_dirty(handle, inode);
 371
 372err_out:
 373        return retval;
 374}
 375
 376static int free_ext_idx(handle_t *handle, struct inode *inode,
 377                                        struct ext4_extent_idx *ix)
 378{
 379        int i, retval = 0;
 380        ext4_fsblk_t block;
 381        struct buffer_head *bh;
 382        struct ext4_extent_header *eh;
 383
 384        block = ext4_idx_pblock(ix);
 385        bh = sb_bread(inode->i_sb, block);
 386        if (!bh)
 387                return -EIO;
 388
 389        eh = (struct ext4_extent_header *)bh->b_data;
 390        if (eh->eh_depth != 0) {
 391                ix = EXT_FIRST_INDEX(eh);
 392                for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
 393                        retval = free_ext_idx(handle, inode, ix);
 394                        if (retval)
 395                                break;
 396                }
 397        }
 398        put_bh(bh);
 399        extend_credit_for_blkdel(handle, inode);
 400        ext4_free_blocks(handle, inode, NULL, block, 1,
 401                         EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
 402        return retval;
 403}
 404
 405/*
 406 * Free the extent meta data blocks only
 407 */
 408static int free_ext_block(handle_t *handle, struct inode *inode)
 409{
 410        int i, retval = 0;
 411        struct ext4_inode_info *ei = EXT4_I(inode);
 412        struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
 413        struct ext4_extent_idx *ix;
 414        if (eh->eh_depth == 0)
 415                /*
 416                 * No extra blocks allocated for extent meta data
 417                 */
 418                return 0;
 419        ix = EXT_FIRST_INDEX(eh);
 420        for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
 421                retval = free_ext_idx(handle, inode, ix);
 422                if (retval)
 423                        return retval;
 424        }
 425        return retval;
 426}
 427
 428int ext4_ext_migrate(struct inode *inode)
 429{
 430        handle_t *handle;
 431        int retval = 0, i;
 432        __le32 *i_data;
 433        struct ext4_inode_info *ei;
 434        struct inode *tmp_inode = NULL;
 435        struct migrate_struct lb;
 436        unsigned long max_entries;
 437        __u32 goal;
 438        uid_t owner[2];
 439
 440        /*
 441         * If the filesystem does not support extents, or the inode
 442         * already is extent-based, error out.
 443         */
 444        if (!ext4_has_feature_extents(inode->i_sb) ||
 445            (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 446                return -EINVAL;
 447
 448        if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
 449                /*
 450                 * don't migrate fast symlink
 451                 */
 452                return retval;
 453
 454        /*
 455         * Worst case we can touch the allocation bitmaps, a bgd
 456         * block, and a block to link in the orphan list.  We do need
 457         * need to worry about credits for modifying the quota inode.
 458         */
 459        handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
 460                4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
 461
 462        if (IS_ERR(handle)) {
 463                retval = PTR_ERR(handle);
 464                return retval;
 465        }
 466        goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
 467                EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
 468        owner[0] = i_uid_read(inode);
 469        owner[1] = i_gid_read(inode);
 470        tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
 471                                   S_IFREG, NULL, goal, owner, 0);
 472        if (IS_ERR(tmp_inode)) {
 473                retval = PTR_ERR(tmp_inode);
 474                ext4_journal_stop(handle);
 475                return retval;
 476        }
 477        i_size_write(tmp_inode, i_size_read(inode));
 478        /*
 479         * Set the i_nlink to zero so it will be deleted later
 480         * when we drop inode reference.
 481         */
 482        clear_nlink(tmp_inode);
 483
 484        ext4_ext_tree_init(handle, tmp_inode);
 485        ext4_orphan_add(handle, tmp_inode);
 486        ext4_journal_stop(handle);
 487
 488        /*
 489         * start with one credit accounted for
 490         * superblock modification.
 491         *
 492         * For the tmp_inode we already have committed the
 493         * transaction that created the inode. Later as and
 494         * when we add extents we extent the journal
 495         */
 496        /*
 497         * Even though we take i_mutex we can still cause block
 498         * allocation via mmap write to holes. If we have allocated
 499         * new blocks we fail migrate.  New block allocation will
 500         * clear EXT4_STATE_EXT_MIGRATE flag.  The flag is updated
 501         * with i_data_sem held to prevent racing with block
 502         * allocation.
 503         */
 504        down_read(&EXT4_I(inode)->i_data_sem);
 505        ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
 506        up_read((&EXT4_I(inode)->i_data_sem));
 507
 508        handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
 509        if (IS_ERR(handle)) {
 510                /*
 511                 * It is impossible to update on-disk structures without
 512                 * a handle, so just rollback in-core changes and live other
 513                 * work to orphan_list_cleanup()
 514                 */
 515                ext4_orphan_del(NULL, tmp_inode);
 516                retval = PTR_ERR(handle);
 517                goto out;
 518        }
 519
 520        ei = EXT4_I(inode);
 521        i_data = ei->i_data;
 522        memset(&lb, 0, sizeof(lb));
 523
 524        /* 32 bit block address 4 bytes */
 525        max_entries = inode->i_sb->s_blocksize >> 2;
 526        for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
 527                if (i_data[i]) {
 528                        retval = update_extent_range(handle, tmp_inode,
 529                                                le32_to_cpu(i_data[i]), &lb);
 530                        if (retval)
 531                                goto err_out;
 532                } else
 533                        lb.curr_block++;
 534        }
 535        if (i_data[EXT4_IND_BLOCK]) {
 536                retval = update_ind_extent_range(handle, tmp_inode,
 537                                le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
 538                        if (retval)
 539                                goto err_out;
 540        } else
 541                lb.curr_block += max_entries;
 542        if (i_data[EXT4_DIND_BLOCK]) {
 543                retval = update_dind_extent_range(handle, tmp_inode,
 544                                le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
 545                        if (retval)
 546                                goto err_out;
 547        } else
 548                lb.curr_block += max_entries * max_entries;
 549        if (i_data[EXT4_TIND_BLOCK]) {
 550                retval = update_tind_extent_range(handle, tmp_inode,
 551                                le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
 552                        if (retval)
 553                                goto err_out;
 554        }
 555        /*
 556         * Build the last extent
 557         */
 558        retval = finish_range(handle, tmp_inode, &lb);
 559err_out:
 560        if (retval)
 561                /*
 562                 * Failure case delete the extent information with the
 563                 * tmp_inode
 564                 */
 565                free_ext_block(handle, tmp_inode);
 566        else {
 567                retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
 568                if (retval)
 569                        /*
 570                         * if we fail to swap inode data free the extent
 571                         * details of the tmp inode
 572                         */
 573                        free_ext_block(handle, tmp_inode);
 574        }
 575
 576        /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
 577        if (ext4_journal_extend(handle, 1) != 0)
 578                ext4_journal_restart(handle, 1);
 579
 580        /*
 581         * Mark the tmp_inode as of size zero
 582         */
 583        i_size_write(tmp_inode, 0);
 584
 585        /*
 586         * set the  i_blocks count to zero
 587         * so that the ext4_evict_inode() does the
 588         * right job
 589         *
 590         * We don't need to take the i_lock because
 591         * the inode is not visible to user space.
 592         */
 593        tmp_inode->i_blocks = 0;
 594
 595        /* Reset the extent details */
 596        ext4_ext_tree_init(handle, tmp_inode);
 597        ext4_journal_stop(handle);
 598out:
 599        unlock_new_inode(tmp_inode);
 600        iput(tmp_inode);
 601
 602        return retval;
 603}
 604
 605/*
 606 * Migrate a simple extent-based inode to use the i_blocks[] array
 607 */
 608int ext4_ind_migrate(struct inode *inode)
 609{
 610        struct ext4_extent_header       *eh;
 611        struct ext4_super_block         *es = EXT4_SB(inode->i_sb)->s_es;
 612        struct ext4_inode_info          *ei = EXT4_I(inode);
 613        struct ext4_extent              *ex;
 614        unsigned int                    i, len;
 615        ext4_lblk_t                     start, end;
 616        ext4_fsblk_t                    blk;
 617        handle_t                        *handle;
 618        int                             ret;
 619
 620        if (!ext4_has_feature_extents(inode->i_sb) ||
 621            (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 622                return -EINVAL;
 623
 624        if (ext4_has_feature_bigalloc(inode->i_sb))
 625                return -EOPNOTSUPP;
 626
 627        /*
 628         * In order to get correct extent info, force all delayed allocation
 629         * blocks to be allocated, otherwise delayed allocation blocks may not
 630         * be reflected and bypass the checks on extent header.
 631         */
 632        if (test_opt(inode->i_sb, DELALLOC))
 633                ext4_alloc_da_blocks(inode);
 634
 635        handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
 636        if (IS_ERR(handle))
 637                return PTR_ERR(handle);
 638
 639        down_write(&EXT4_I(inode)->i_data_sem);
 640        ret = ext4_ext_check_inode(inode);
 641        if (ret)
 642                goto errout;
 643
 644        eh = ext_inode_hdr(inode);
 645        ex  = EXT_FIRST_EXTENT(eh);
 646        if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
 647            eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
 648                ret = -EOPNOTSUPP;
 649                goto errout;
 650        }
 651        if (eh->eh_entries == 0)
 652                blk = len = start = end = 0;
 653        else {
 654                len = le16_to_cpu(ex->ee_len);
 655                blk = ext4_ext_pblock(ex);
 656                start = le32_to_cpu(ex->ee_block);
 657                end = start + len - 1;
 658                if (end >= EXT4_NDIR_BLOCKS) {
 659                        ret = -EOPNOTSUPP;
 660                        goto errout;
 661                }
 662        }
 663
 664        ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
 665        memset(ei->i_data, 0, sizeof(ei->i_data));
 666        for (i = start; i <= end; i++)
 667                ei->i_data[i] = cpu_to_le32(blk++);
 668        ext4_mark_inode_dirty(handle, inode);
 669errout:
 670        ext4_journal_stop(handle);
 671        up_write(&EXT4_I(inode)->i_data_sem);
 672        return ret;
 673}
 674