linux/fs/ext4/ialloc.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/ext4/ialloc.c
   3 *
   4 * Copyright (C) 1992, 1993, 1994, 1995
   5 * Remy Card (card@masi.ibp.fr)
   6 * Laboratoire MASI - Institut Blaise Pascal
   7 * Universite Pierre et Marie Curie (Paris VI)
   8 *
   9 *  BSD ufs-inspired inode and directory allocation by
  10 *  Stephen Tweedie (sct@redhat.com), 1993
  11 *  Big-endian to little-endian byte-swapping/bitmaps by
  12 *        David S. Miller (davem@caip.rutgers.edu), 1995
  13 */
  14
  15#include <linux/time.h>
  16#include <linux/fs.h>
  17#include <linux/jbd2.h>
  18#include <linux/stat.h>
  19#include <linux/string.h>
  20#include <linux/quotaops.h>
  21#include <linux/buffer_head.h>
  22#include <linux/random.h>
  23#include <linux/bitops.h>
  24#include <linux/blkdev.h>
  25#include <asm/byteorder.h>
  26
  27#include "ext4.h"
  28#include "ext4_jbd2.h"
  29#include "xattr.h"
  30#include "acl.h"
  31
  32#include <trace/events/ext4.h>
  33
  34/*
  35 * ialloc.c contains the inodes allocation and deallocation routines
  36 */
  37
  38/*
  39 * The free inodes are managed by bitmaps.  A file system contains several
  40 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
  41 * block for inodes, N blocks for the inode table and data blocks.
  42 *
  43 * The file system contains group descriptors which are located after the
  44 * super block.  Each descriptor contains the number of the bitmap block and
  45 * the free blocks count in the block.
  46 */
  47
  48/*
  49 * To avoid calling the atomic setbit hundreds or thousands of times, we only
  50 * need to use it within a single byte (to ensure we get endianness right).
  51 * We can use memset for the rest of the bitmap as there are no other users.
  52 */
  53void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
  54{
  55        int i;
  56
  57        if (start_bit >= end_bit)
  58                return;
  59
  60        ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
  61        for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
  62                ext4_set_bit(i, bitmap);
  63        if (i < end_bit)
  64                memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
  65}
  66
  67/* Initializes an uninitialized inode bitmap */
  68static unsigned ext4_init_inode_bitmap(struct super_block *sb,
  69                                       struct buffer_head *bh,
  70                                       ext4_group_t block_group,
  71                                       struct ext4_group_desc *gdp)
  72{
  73        J_ASSERT_BH(bh, buffer_locked(bh));
  74
  75        /* If checksum is bad mark all blocks and inodes use to prevent
  76         * allocation, essentially implementing a per-group read-only flag. */
  77        if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
  78                ext4_error(sb, "Checksum bad for group %u", block_group);
  79                ext4_free_group_clusters_set(sb, gdp, 0);
  80                ext4_free_inodes_set(sb, gdp, 0);
  81                ext4_itable_unused_set(sb, gdp, 0);
  82                memset(bh->b_data, 0xff, sb->s_blocksize);
  83                ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
  84                                           EXT4_INODES_PER_GROUP(sb) / 8);
  85                return 0;
  86        }
  87
  88        memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
  89        ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
  90                        bh->b_data);
  91        ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
  92                                   EXT4_INODES_PER_GROUP(sb) / 8);
  93        ext4_group_desc_csum_set(sb, block_group, gdp);
  94
  95        return EXT4_INODES_PER_GROUP(sb);
  96}
  97
  98void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
  99{
 100        if (uptodate) {
 101                set_buffer_uptodate(bh);
 102                set_bitmap_uptodate(bh);
 103        }
 104        unlock_buffer(bh);
 105        put_bh(bh);
 106}
 107
 108/*
 109 * Read the inode allocation bitmap for a given block_group, reading
 110 * into the specified slot in the superblock's bitmap cache.
 111 *
 112 * Return buffer_head of bitmap on success or NULL.
 113 */
 114static struct buffer_head *
 115ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
 116{
 117        struct ext4_group_desc *desc;
 118        struct buffer_head *bh = NULL;
 119        ext4_fsblk_t bitmap_blk;
 120
 121        desc = ext4_get_group_desc(sb, block_group, NULL);
 122        if (!desc)
 123                return NULL;
 124
 125        bitmap_blk = ext4_inode_bitmap(sb, desc);
 126        bh = sb_getblk(sb, bitmap_blk);
 127        if (unlikely(!bh)) {
 128                ext4_error(sb, "Cannot read inode bitmap - "
 129                            "block_group = %u, inode_bitmap = %llu",
 130                            block_group, bitmap_blk);
 131                return NULL;
 132        }
 133        if (bitmap_uptodate(bh))
 134                goto verify;
 135
 136        lock_buffer(bh);
 137        if (bitmap_uptodate(bh)) {
 138                unlock_buffer(bh);
 139                goto verify;
 140        }
 141
 142        ext4_lock_group(sb, block_group);
 143        if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
 144                ext4_init_inode_bitmap(sb, bh, block_group, desc);
 145                set_bitmap_uptodate(bh);
 146                set_buffer_uptodate(bh);
 147                set_buffer_verified(bh);
 148                ext4_unlock_group(sb, block_group);
 149                unlock_buffer(bh);
 150                return bh;
 151        }
 152        ext4_unlock_group(sb, block_group);
 153
 154        if (buffer_uptodate(bh)) {
 155                /*
 156                 * if not uninit if bh is uptodate,
 157                 * bitmap is also uptodate
 158                 */
 159                set_bitmap_uptodate(bh);
 160                unlock_buffer(bh);
 161                goto verify;
 162        }
 163        /*
 164         * submit the buffer_head for reading
 165         */
 166        trace_ext4_load_inode_bitmap(sb, block_group);
 167        bh->b_end_io = ext4_end_bitmap_read;
 168        get_bh(bh);
 169        submit_bh(READ | REQ_META | REQ_PRIO, bh);
 170        wait_on_buffer(bh);
 171        if (!buffer_uptodate(bh)) {
 172                put_bh(bh);
 173                ext4_error(sb, "Cannot read inode bitmap - "
 174                           "block_group = %u, inode_bitmap = %llu",
 175                           block_group, bitmap_blk);
 176                return NULL;
 177        }
 178
 179verify:
 180        ext4_lock_group(sb, block_group);
 181        if (!buffer_verified(bh) &&
 182            !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
 183                                           EXT4_INODES_PER_GROUP(sb) / 8)) {
 184                ext4_unlock_group(sb, block_group);
 185                put_bh(bh);
 186                ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
 187                           "inode_bitmap = %llu", block_group, bitmap_blk);
 188                return NULL;
 189        }
 190        ext4_unlock_group(sb, block_group);
 191        set_buffer_verified(bh);
 192        return bh;
 193}
 194
 195/*
 196 * NOTE! When we get the inode, we're the only people
 197 * that have access to it, and as such there are no
 198 * race conditions we have to worry about. The inode
 199 * is not on the hash-lists, and it cannot be reached
 200 * through the filesystem because the directory entry
 201 * has been deleted earlier.
 202 *
 203 * HOWEVER: we must make sure that we get no aliases,
 204 * which means that we have to call "clear_inode()"
 205 * _before_ we mark the inode not in use in the inode
 206 * bitmaps. Otherwise a newly created file might use
 207 * the same inode number (not actually the same pointer
 208 * though), and then we'd have two inodes sharing the
 209 * same inode number and space on the harddisk.
 210 */
 211void ext4_free_inode(handle_t *handle, struct inode *inode)
 212{
 213        struct super_block *sb = inode->i_sb;
 214        int is_directory;
 215        unsigned long ino;
 216        struct buffer_head *bitmap_bh = NULL;
 217        struct buffer_head *bh2;
 218        ext4_group_t block_group;
 219        unsigned long bit;
 220        struct ext4_group_desc *gdp;
 221        struct ext4_super_block *es;
 222        struct ext4_sb_info *sbi;
 223        int fatal = 0, err, count, cleared;
 224
 225        if (!sb) {
 226                printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
 227                       "nonexistent device\n", __func__, __LINE__);
 228                return;
 229        }
 230        if (atomic_read(&inode->i_count) > 1) {
 231                ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
 232                         __func__, __LINE__, inode->i_ino,
 233                         atomic_read(&inode->i_count));
 234                return;
 235        }
 236        if (inode->i_nlink) {
 237                ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
 238                         __func__, __LINE__, inode->i_ino, inode->i_nlink);
 239                return;
 240        }
 241        sbi = EXT4_SB(sb);
 242
 243        ino = inode->i_ino;
 244        ext4_debug("freeing inode %lu\n", ino);
 245        trace_ext4_free_inode(inode);
 246
 247        /*
 248         * Note: we must free any quota before locking the superblock,
 249         * as writing the quota to disk may need the lock as well.
 250         */
 251        dquot_initialize(inode);
 252        ext4_xattr_delete_inode(handle, inode);
 253        dquot_free_inode(inode);
 254        dquot_drop(inode);
 255
 256        is_directory = S_ISDIR(inode->i_mode);
 257
 258        /* Do this BEFORE marking the inode not in use or returning an error */
 259        ext4_clear_inode(inode);
 260
 261        es = EXT4_SB(sb)->s_es;
 262        if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
 263                ext4_error(sb, "reserved or nonexistent inode %lu", ino);
 264                goto error_return;
 265        }
 266        block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
 267        bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
 268        bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
 269        if (!bitmap_bh)
 270                goto error_return;
 271
 272        BUFFER_TRACE(bitmap_bh, "get_write_access");
 273        fatal = ext4_journal_get_write_access(handle, bitmap_bh);
 274        if (fatal)
 275                goto error_return;
 276
 277        fatal = -ESRCH;
 278        gdp = ext4_get_group_desc(sb, block_group, &bh2);
 279        if (gdp) {
 280                BUFFER_TRACE(bh2, "get_write_access");
 281                fatal = ext4_journal_get_write_access(handle, bh2);
 282        }
 283        ext4_lock_group(sb, block_group);
 284        cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
 285        if (fatal || !cleared) {
 286                ext4_unlock_group(sb, block_group);
 287                goto out;
 288        }
 289
 290        count = ext4_free_inodes_count(sb, gdp) + 1;
 291        ext4_free_inodes_set(sb, gdp, count);
 292        if (is_directory) {
 293                count = ext4_used_dirs_count(sb, gdp) - 1;
 294                ext4_used_dirs_set(sb, gdp, count);
 295                percpu_counter_dec(&sbi->s_dirs_counter);
 296        }
 297        ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
 298                                   EXT4_INODES_PER_GROUP(sb) / 8);
 299        ext4_group_desc_csum_set(sb, block_group, gdp);
 300        ext4_unlock_group(sb, block_group);
 301
 302        percpu_counter_inc(&sbi->s_freeinodes_counter);
 303        if (sbi->s_log_groups_per_flex) {
 304                ext4_group_t f = ext4_flex_group(sbi, block_group);
 305
 306                atomic_inc(&sbi->s_flex_groups[f].free_inodes);
 307                if (is_directory)
 308                        atomic_dec(&sbi->s_flex_groups[f].used_dirs);
 309        }
 310        BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
 311        fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
 312out:
 313        if (cleared) {
 314                BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
 315                err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
 316                if (!fatal)
 317                        fatal = err;
 318        } else
 319                ext4_error(sb, "bit already cleared for inode %lu", ino);
 320
 321error_return:
 322        brelse(bitmap_bh);
 323        ext4_std_error(sb, fatal);
 324}
 325
 326struct orlov_stats {
 327        __u64 free_clusters;
 328        __u32 free_inodes;
 329        __u32 used_dirs;
 330};
 331
 332/*
 333 * Helper function for Orlov's allocator; returns critical information
 334 * for a particular block group or flex_bg.  If flex_size is 1, then g
 335 * is a block group number; otherwise it is flex_bg number.
 336 */
 337static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
 338                            int flex_size, struct orlov_stats *stats)
 339{
 340        struct ext4_group_desc *desc;
 341        struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
 342
 343        if (flex_size > 1) {
 344                stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
 345                stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
 346                stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
 347                return;
 348        }
 349
 350        desc = ext4_get_group_desc(sb, g, NULL);
 351        if (desc) {
 352                stats->free_inodes = ext4_free_inodes_count(sb, desc);
 353                stats->free_clusters = ext4_free_group_clusters(sb, desc);
 354                stats->used_dirs = ext4_used_dirs_count(sb, desc);
 355        } else {
 356                stats->free_inodes = 0;
 357                stats->free_clusters = 0;
 358                stats->used_dirs = 0;
 359        }
 360}
 361
 362/*
 363 * Orlov's allocator for directories.
 364 *
 365 * We always try to spread first-level directories.
 366 *
 367 * If there are blockgroups with both free inodes and free blocks counts
 368 * not worse than average we return one with smallest directory count.
 369 * Otherwise we simply return a random group.
 370 *
 371 * For the rest rules look so:
 372 *
 373 * It's OK to put directory into a group unless
 374 * it has too many directories already (max_dirs) or
 375 * it has too few free inodes left (min_inodes) or
 376 * it has too few free blocks left (min_blocks) or
 377 * Parent's group is preferred, if it doesn't satisfy these
 378 * conditions we search cyclically through the rest. If none
 379 * of the groups look good we just look for a group with more
 380 * free inodes than average (starting at parent's group).
 381 */
 382
 383static int find_group_orlov(struct super_block *sb, struct inode *parent,
 384                            ext4_group_t *group, umode_t mode,
 385                            const struct qstr *qstr)
 386{
 387        ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
 388        struct ext4_sb_info *sbi = EXT4_SB(sb);
 389        ext4_group_t real_ngroups = ext4_get_groups_count(sb);
 390        int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
 391        unsigned int freei, avefreei, grp_free;
 392        ext4_fsblk_t freeb, avefreec;
 393        unsigned int ndirs;
 394        int max_dirs, min_inodes;
 395        ext4_grpblk_t min_clusters;
 396        ext4_group_t i, grp, g, ngroups;
 397        struct ext4_group_desc *desc;
 398        struct orlov_stats stats;
 399        int flex_size = ext4_flex_bg_size(sbi);
 400        struct dx_hash_info hinfo;
 401
 402        ngroups = real_ngroups;
 403        if (flex_size > 1) {
 404                ngroups = (real_ngroups + flex_size - 1) >>
 405                        sbi->s_log_groups_per_flex;
 406                parent_group >>= sbi->s_log_groups_per_flex;
 407        }
 408
 409        freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
 410        avefreei = freei / ngroups;
 411        freeb = EXT4_C2B(sbi,
 412                percpu_counter_read_positive(&sbi->s_freeclusters_counter));
 413        avefreec = freeb;
 414        do_div(avefreec, ngroups);
 415        ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
 416
 417        if (S_ISDIR(mode) &&
 418            ((parent == sb->s_root->d_inode) ||
 419             (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
 420                int best_ndir = inodes_per_group;
 421                int ret = -1;
 422
 423                if (qstr) {
 424                        hinfo.hash_version = DX_HASH_HALF_MD4;
 425                        hinfo.seed = sbi->s_hash_seed;
 426                        ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
 427                        grp = hinfo.hash;
 428                } else
 429                        get_random_bytes(&grp, sizeof(grp));
 430                parent_group = (unsigned)grp % ngroups;
 431                for (i = 0; i < ngroups; i++) {
 432                        g = (parent_group + i) % ngroups;
 433                        get_orlov_stats(sb, g, flex_size, &stats);
 434                        if (!stats.free_inodes)
 435                                continue;
 436                        if (stats.used_dirs >= best_ndir)
 437                                continue;
 438                        if (stats.free_inodes < avefreei)
 439                                continue;
 440                        if (stats.free_clusters < avefreec)
 441                                continue;
 442                        grp = g;
 443                        ret = 0;
 444                        best_ndir = stats.used_dirs;
 445                }
 446                if (ret)
 447                        goto fallback;
 448        found_flex_bg:
 449                if (flex_size == 1) {
 450                        *group = grp;
 451                        return 0;
 452                }
 453
 454                /*
 455                 * We pack inodes at the beginning of the flexgroup's
 456                 * inode tables.  Block allocation decisions will do
 457                 * something similar, although regular files will
 458                 * start at 2nd block group of the flexgroup.  See
 459                 * ext4_ext_find_goal() and ext4_find_near().
 460                 */
 461                grp *= flex_size;
 462                for (i = 0; i < flex_size; i++) {
 463                        if (grp+i >= real_ngroups)
 464                                break;
 465                        desc = ext4_get_group_desc(sb, grp+i, NULL);
 466                        if (desc && ext4_free_inodes_count(sb, desc)) {
 467                                *group = grp+i;
 468                                return 0;
 469                        }
 470                }
 471                goto fallback;
 472        }
 473
 474        max_dirs = ndirs / ngroups + inodes_per_group / 16;
 475        min_inodes = avefreei - inodes_per_group*flex_size / 4;
 476        if (min_inodes < 1)
 477                min_inodes = 1;
 478        min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
 479
 480        /*
 481         * Start looking in the flex group where we last allocated an
 482         * inode for this parent directory
 483         */
 484        if (EXT4_I(parent)->i_last_alloc_group != ~0) {
 485                parent_group = EXT4_I(parent)->i_last_alloc_group;
 486                if (flex_size > 1)
 487                        parent_group >>= sbi->s_log_groups_per_flex;
 488        }
 489
 490        for (i = 0; i < ngroups; i++) {
 491                grp = (parent_group + i) % ngroups;
 492                get_orlov_stats(sb, grp, flex_size, &stats);
 493                if (stats.used_dirs >= max_dirs)
 494                        continue;
 495                if (stats.free_inodes < min_inodes)
 496                        continue;
 497                if (stats.free_clusters < min_clusters)
 498                        continue;
 499                goto found_flex_bg;
 500        }
 501
 502fallback:
 503        ngroups = real_ngroups;
 504        avefreei = freei / ngroups;
 505fallback_retry:
 506        parent_group = EXT4_I(parent)->i_block_group;
 507        for (i = 0; i < ngroups; i++) {
 508                grp = (parent_group + i) % ngroups;
 509                desc = ext4_get_group_desc(sb, grp, NULL);
 510                if (desc) {
 511                        grp_free = ext4_free_inodes_count(sb, desc);
 512                        if (grp_free && grp_free >= avefreei) {
 513                                *group = grp;
 514                                return 0;
 515                        }
 516                }
 517        }
 518
 519        if (avefreei) {
 520                /*
 521                 * The free-inodes counter is approximate, and for really small
 522                 * filesystems the above test can fail to find any blockgroups
 523                 */
 524                avefreei = 0;
 525                goto fallback_retry;
 526        }
 527
 528        return -1;
 529}
 530
 531static int find_group_other(struct super_block *sb, struct inode *parent,
 532                            ext4_group_t *group, umode_t mode)
 533{
 534        ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
 535        ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
 536        struct ext4_group_desc *desc;
 537        int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
 538
 539        /*
 540         * Try to place the inode is the same flex group as its
 541         * parent.  If we can't find space, use the Orlov algorithm to
 542         * find another flex group, and store that information in the
 543         * parent directory's inode information so that use that flex
 544         * group for future allocations.
 545         */
 546        if (flex_size > 1) {
 547                int retry = 0;
 548
 549        try_again:
 550                parent_group &= ~(flex_size-1);
 551                last = parent_group + flex_size;
 552                if (last > ngroups)
 553                        last = ngroups;
 554                for  (i = parent_group; i < last; i++) {
 555                        desc = ext4_get_group_desc(sb, i, NULL);
 556                        if (desc && ext4_free_inodes_count(sb, desc)) {
 557                                *group = i;
 558                                return 0;
 559                        }
 560                }
 561                if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
 562                        retry = 1;
 563                        parent_group = EXT4_I(parent)->i_last_alloc_group;
 564                        goto try_again;
 565                }
 566                /*
 567                 * If this didn't work, use the Orlov search algorithm
 568                 * to find a new flex group; we pass in the mode to
 569                 * avoid the topdir algorithms.
 570                 */
 571                *group = parent_group + flex_size;
 572                if (*group > ngroups)
 573                        *group = 0;
 574                return find_group_orlov(sb, parent, group, mode, NULL);
 575        }
 576
 577        /*
 578         * Try to place the inode in its parent directory
 579         */
 580        *group = parent_group;
 581        desc = ext4_get_group_desc(sb, *group, NULL);
 582        if (desc && ext4_free_inodes_count(sb, desc) &&
 583            ext4_free_group_clusters(sb, desc))
 584                return 0;
 585
 586        /*
 587         * We're going to place this inode in a different blockgroup from its
 588         * parent.  We want to cause files in a common directory to all land in
 589         * the same blockgroup.  But we want files which are in a different
 590         * directory which shares a blockgroup with our parent to land in a
 591         * different blockgroup.
 592         *
 593         * So add our directory's i_ino into the starting point for the hash.
 594         */
 595        *group = (*group + parent->i_ino) % ngroups;
 596
 597        /*
 598         * Use a quadratic hash to find a group with a free inode and some free
 599         * blocks.
 600         */
 601        for (i = 1; i < ngroups; i <<= 1) {
 602                *group += i;
 603                if (*group >= ngroups)
 604                        *group -= ngroups;
 605                desc = ext4_get_group_desc(sb, *group, NULL);
 606                if (desc && ext4_free_inodes_count(sb, desc) &&
 607                    ext4_free_group_clusters(sb, desc))
 608                        return 0;
 609        }
 610
 611        /*
 612         * That failed: try linear search for a free inode, even if that group
 613         * has no free blocks.
 614         */
 615        *group = parent_group;
 616        for (i = 0; i < ngroups; i++) {
 617                if (++*group >= ngroups)
 618                        *group = 0;
 619                desc = ext4_get_group_desc(sb, *group, NULL);
 620                if (desc && ext4_free_inodes_count(sb, desc))
 621                        return 0;
 622        }
 623
 624        return -1;
 625}
 626
 627/*
 628 * There are two policies for allocating an inode.  If the new inode is
 629 * a directory, then a forward search is made for a block group with both
 630 * free space and a low directory-to-inode ratio; if that fails, then of
 631 * the groups with above-average free space, that group with the fewest
 632 * directories already is chosen.
 633 *
 634 * For other inodes, search forward from the parent directory's block
 635 * group to find a free inode.
 636 */
 637struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
 638                               umode_t mode, const struct qstr *qstr,
 639                               __u32 goal, uid_t *owner, int handle_type,
 640                               unsigned int line_no, int nblocks)
 641{
 642        struct super_block *sb;
 643        struct buffer_head *inode_bitmap_bh = NULL;
 644        struct buffer_head *group_desc_bh;
 645        ext4_group_t ngroups, group = 0;
 646        unsigned long ino = 0;
 647        struct inode *inode;
 648        struct ext4_group_desc *gdp = NULL;
 649        struct ext4_inode_info *ei;
 650        struct ext4_sb_info *sbi;
 651        int ret2, err = 0;
 652        struct inode *ret;
 653        ext4_group_t i;
 654        ext4_group_t flex_group;
 655
 656        /* Cannot create files in a deleted directory */
 657        if (!dir || !dir->i_nlink)
 658                return ERR_PTR(-EPERM);
 659
 660        sb = dir->i_sb;
 661        ngroups = ext4_get_groups_count(sb);
 662        trace_ext4_request_inode(dir, mode);
 663        inode = new_inode(sb);
 664        if (!inode)
 665                return ERR_PTR(-ENOMEM);
 666        ei = EXT4_I(inode);
 667        sbi = EXT4_SB(sb);
 668
 669        /*
 670         * Initalize owners and quota early so that we don't have to account
 671         * for quota initialization worst case in standard inode creating
 672         * transaction
 673         */
 674        if (owner) {
 675                inode->i_mode = mode;
 676                i_uid_write(inode, owner[0]);
 677                i_gid_write(inode, owner[1]);
 678        } else if (test_opt(sb, GRPID)) {
 679                inode->i_mode = mode;
 680                inode->i_uid = current_fsuid();
 681                inode->i_gid = dir->i_gid;
 682        } else
 683                inode_init_owner(inode, dir, mode);
 684        dquot_initialize(inode);
 685
 686        if (!goal)
 687                goal = sbi->s_inode_goal;
 688
 689        if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
 690                group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
 691                ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
 692                ret2 = 0;
 693                goto got_group;
 694        }
 695
 696        if (S_ISDIR(mode))
 697                ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
 698        else
 699                ret2 = find_group_other(sb, dir, &group, mode);
 700
 701got_group:
 702        EXT4_I(dir)->i_last_alloc_group = group;
 703        err = -ENOSPC;
 704        if (ret2 == -1)
 705                goto out;
 706
 707        /*
 708         * Normally we will only go through one pass of this loop,
 709         * unless we get unlucky and it turns out the group we selected
 710         * had its last inode grabbed by someone else.
 711         */
 712        for (i = 0; i < ngroups; i++, ino = 0) {
 713                err = -EIO;
 714
 715                gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
 716                if (!gdp)
 717                        goto out;
 718
 719                /*
 720                 * Check free inodes count before loading bitmap.
 721                 */
 722                if (ext4_free_inodes_count(sb, gdp) == 0) {
 723                        if (++group == ngroups)
 724                                group = 0;
 725                        continue;
 726                }
 727
 728                brelse(inode_bitmap_bh);
 729                inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
 730                if (!inode_bitmap_bh)
 731                        goto out;
 732
 733repeat_in_this_group:
 734                ino = ext4_find_next_zero_bit((unsigned long *)
 735                                              inode_bitmap_bh->b_data,
 736                                              EXT4_INODES_PER_GROUP(sb), ino);
 737                if (ino >= EXT4_INODES_PER_GROUP(sb))
 738                        goto next_group;
 739                if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
 740                        ext4_error(sb, "reserved inode found cleared - "
 741                                   "inode=%lu", ino + 1);
 742                        continue;
 743                }
 744                if (!handle) {
 745                        BUG_ON(nblocks <= 0);
 746                        handle = __ext4_journal_start_sb(dir->i_sb, line_no,
 747                                                         handle_type, nblocks,
 748                                                         0);
 749                        if (IS_ERR(handle)) {
 750                                err = PTR_ERR(handle);
 751                                ext4_std_error(sb, err);
 752                                goto out;
 753                        }
 754                }
 755                BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
 756                err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
 757                if (err) {
 758                        ext4_std_error(sb, err);
 759                        goto out;
 760                }
 761                ext4_lock_group(sb, group);
 762                ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
 763                ext4_unlock_group(sb, group);
 764                ino++;          /* the inode bitmap is zero-based */
 765                if (!ret2)
 766                        goto got; /* we grabbed the inode! */
 767                if (ino < EXT4_INODES_PER_GROUP(sb))
 768                        goto repeat_in_this_group;
 769next_group:
 770                if (++group == ngroups)
 771                        group = 0;
 772        }
 773        err = -ENOSPC;
 774        goto out;
 775
 776got:
 777        BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
 778        err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
 779        if (err) {
 780                ext4_std_error(sb, err);
 781                goto out;
 782        }
 783
 784        /* We may have to initialize the block bitmap if it isn't already */
 785        if (ext4_has_group_desc_csum(sb) &&
 786            gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
 787                struct buffer_head *block_bitmap_bh;
 788
 789                block_bitmap_bh = ext4_read_block_bitmap(sb, group);
 790                BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
 791                err = ext4_journal_get_write_access(handle, block_bitmap_bh);
 792                if (err) {
 793                        brelse(block_bitmap_bh);
 794                        ext4_std_error(sb, err);
 795                        goto out;
 796                }
 797
 798                BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
 799                err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
 800
 801                /* recheck and clear flag under lock if we still need to */
 802                ext4_lock_group(sb, group);
 803                if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
 804                        gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
 805                        ext4_free_group_clusters_set(sb, gdp,
 806                                ext4_free_clusters_after_init(sb, group, gdp));
 807                        ext4_block_bitmap_csum_set(sb, group, gdp,
 808                                                   block_bitmap_bh);
 809                        ext4_group_desc_csum_set(sb, group, gdp);
 810                }
 811                ext4_unlock_group(sb, group);
 812                brelse(block_bitmap_bh);
 813
 814                if (err) {
 815                        ext4_std_error(sb, err);
 816                        goto out;
 817                }
 818        }
 819
 820        BUFFER_TRACE(group_desc_bh, "get_write_access");
 821        err = ext4_journal_get_write_access(handle, group_desc_bh);
 822        if (err) {
 823                ext4_std_error(sb, err);
 824                goto out;
 825        }
 826
 827        /* Update the relevant bg descriptor fields */
 828        if (ext4_has_group_desc_csum(sb)) {
 829                int free;
 830                struct ext4_group_info *grp = ext4_get_group_info(sb, group);
 831
 832                down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
 833                ext4_lock_group(sb, group); /* while we modify the bg desc */
 834                free = EXT4_INODES_PER_GROUP(sb) -
 835                        ext4_itable_unused_count(sb, gdp);
 836                if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
 837                        gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
 838                        free = 0;
 839                }
 840                /*
 841                 * Check the relative inode number against the last used
 842                 * relative inode number in this group. if it is greater
 843                 * we need to update the bg_itable_unused count
 844                 */
 845                if (ino > free)
 846                        ext4_itable_unused_set(sb, gdp,
 847                                        (EXT4_INODES_PER_GROUP(sb) - ino));
 848                up_read(&grp->alloc_sem);
 849        } else {
 850                ext4_lock_group(sb, group);
 851        }
 852
 853        ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
 854        if (S_ISDIR(mode)) {
 855                ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
 856                if (sbi->s_log_groups_per_flex) {
 857                        ext4_group_t f = ext4_flex_group(sbi, group);
 858
 859                        atomic_inc(&sbi->s_flex_groups[f].used_dirs);
 860                }
 861        }
 862        if (ext4_has_group_desc_csum(sb)) {
 863                ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
 864                                           EXT4_INODES_PER_GROUP(sb) / 8);
 865                ext4_group_desc_csum_set(sb, group, gdp);
 866        }
 867        ext4_unlock_group(sb, group);
 868
 869        BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
 870        err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
 871        if (err) {
 872                ext4_std_error(sb, err);
 873                goto out;
 874        }
 875
 876        percpu_counter_dec(&sbi->s_freeinodes_counter);
 877        if (S_ISDIR(mode))
 878                percpu_counter_inc(&sbi->s_dirs_counter);
 879
 880        if (sbi->s_log_groups_per_flex) {
 881                flex_group = ext4_flex_group(sbi, group);
 882                atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
 883        }
 884
 885        inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
 886        /* This is the optimal IO size (for stat), not the fs block size */
 887        inode->i_blocks = 0;
 888        inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
 889                                                       ext4_current_time(inode);
 890
 891        memset(ei->i_data, 0, sizeof(ei->i_data));
 892        ei->i_dir_start_lookup = 0;
 893        ei->i_disksize = 0;
 894
 895        /* Don't inherit extent flag from directory, amongst others. */
 896        ei->i_flags =
 897                ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
 898        ei->i_file_acl = 0;
 899        ei->i_dtime = 0;
 900        ei->i_block_group = group;
 901        ei->i_last_alloc_group = ~0;
 902
 903        ext4_set_inode_flags(inode);
 904        if (IS_DIRSYNC(inode))
 905                ext4_handle_sync(handle);
 906        if (insert_inode_locked(inode) < 0) {
 907                /*
 908                 * Likely a bitmap corruption causing inode to be allocated
 909                 * twice.
 910                 */
 911                err = -EIO;
 912                ext4_error(sb, "failed to insert inode %lu: doubly allocated?",
 913                           inode->i_ino);
 914                goto out;
 915        }
 916        spin_lock(&sbi->s_next_gen_lock);
 917        inode->i_generation = sbi->s_next_generation++;
 918        spin_unlock(&sbi->s_next_gen_lock);
 919
 920        /* Precompute checksum seed for inode metadata */
 921        if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
 922                        EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
 923                __u32 csum;
 924                __le32 inum = cpu_to_le32(inode->i_ino);
 925                __le32 gen = cpu_to_le32(inode->i_generation);
 926                csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
 927                                   sizeof(inum));
 928                ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
 929                                              sizeof(gen));
 930        }
 931
 932        ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
 933        ext4_set_inode_state(inode, EXT4_STATE_NEW);
 934
 935        ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
 936
 937        ei->i_inline_off = 0;
 938        if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA))
 939                ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
 940
 941        ret = inode;
 942        err = dquot_alloc_inode(inode);
 943        if (err)
 944                goto fail_drop;
 945
 946        err = ext4_init_acl(handle, inode, dir);
 947        if (err)
 948                goto fail_free_drop;
 949
 950        err = ext4_init_security(handle, inode, dir, qstr);
 951        if (err)
 952                goto fail_free_drop;
 953
 954        if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
 955                /* set extent flag only for directory, file and normal symlink*/
 956                if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
 957                        ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
 958                        ext4_ext_tree_init(handle, inode);
 959                }
 960        }
 961
 962        if (ext4_handle_valid(handle)) {
 963                ei->i_sync_tid = handle->h_transaction->t_tid;
 964                ei->i_datasync_tid = handle->h_transaction->t_tid;
 965        }
 966
 967        err = ext4_mark_inode_dirty(handle, inode);
 968        if (err) {
 969                ext4_std_error(sb, err);
 970                goto fail_free_drop;
 971        }
 972
 973        ext4_debug("allocating inode %lu\n", inode->i_ino);
 974        trace_ext4_allocate_inode(inode, dir, mode);
 975        brelse(inode_bitmap_bh);
 976        return ret;
 977
 978fail_free_drop:
 979        dquot_free_inode(inode);
 980fail_drop:
 981        clear_nlink(inode);
 982        unlock_new_inode(inode);
 983out:
 984        dquot_drop(inode);
 985        inode->i_flags |= S_NOQUOTA;
 986        iput(inode);
 987        brelse(inode_bitmap_bh);
 988        return ERR_PTR(err);
 989}
 990
 991/* Verify that we are loading a valid orphan from disk */
 992struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
 993{
 994        unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
 995        ext4_group_t block_group;
 996        int bit;
 997        struct buffer_head *bitmap_bh;
 998        struct inode *inode = NULL;
 999        long err = -EIO;
1000
1001        /* Error cases - e2fsck has already cleaned up for us */
1002        if (ino > max_ino) {
1003                ext4_warning(sb, "bad orphan ino %lu!  e2fsck was run?", ino);
1004                goto error;
1005        }
1006
1007        block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
1008        bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1009        bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1010        if (!bitmap_bh) {
1011                ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
1012                goto error;
1013        }
1014
1015        /* Having the inode bit set should be a 100% indicator that this
1016         * is a valid orphan (no e2fsck run on fs).  Orphans also include
1017         * inodes that were being truncated, so we can't check i_nlink==0.
1018         */
1019        if (!ext4_test_bit(bit, bitmap_bh->b_data))
1020                goto bad_orphan;
1021
1022        inode = ext4_iget(sb, ino);
1023        if (IS_ERR(inode))
1024                goto iget_failed;
1025
1026        /*
1027         * If the orphans has i_nlinks > 0 then it should be able to be
1028         * truncated, otherwise it won't be removed from the orphan list
1029         * during processing and an infinite loop will result.
1030         */
1031        if (inode->i_nlink && !ext4_can_truncate(inode))
1032                goto bad_orphan;
1033
1034        if (NEXT_ORPHAN(inode) > max_ino)
1035                goto bad_orphan;
1036        brelse(bitmap_bh);
1037        return inode;
1038
1039iget_failed:
1040        err = PTR_ERR(inode);
1041        inode = NULL;
1042bad_orphan:
1043        ext4_warning(sb, "bad orphan inode %lu!  e2fsck was run?", ino);
1044        printk(KERN_WARNING "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1045               bit, (unsigned long long)bitmap_bh->b_blocknr,
1046               ext4_test_bit(bit, bitmap_bh->b_data));
1047        printk(KERN_WARNING "inode=%p\n", inode);
1048        if (inode) {
1049                printk(KERN_WARNING "is_bad_inode(inode)=%d\n",
1050                       is_bad_inode(inode));
1051                printk(KERN_WARNING "NEXT_ORPHAN(inode)=%u\n",
1052                       NEXT_ORPHAN(inode));
1053                printk(KERN_WARNING "max_ino=%lu\n", max_ino);
1054                printk(KERN_WARNING "i_nlink=%u\n", inode->i_nlink);
1055                /* Avoid freeing blocks if we got a bad deleted inode */
1056                if (inode->i_nlink == 0)
1057                        inode->i_blocks = 0;
1058                iput(inode);
1059        }
1060        brelse(bitmap_bh);
1061error:
1062        return ERR_PTR(err);
1063}
1064
1065unsigned long ext4_count_free_inodes(struct super_block *sb)
1066{
1067        unsigned long desc_count;
1068        struct ext4_group_desc *gdp;
1069        ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1070#ifdef EXT4FS_DEBUG
1071        struct ext4_super_block *es;
1072        unsigned long bitmap_count, x;
1073        struct buffer_head *bitmap_bh = NULL;
1074
1075        es = EXT4_SB(sb)->s_es;
1076        desc_count = 0;
1077        bitmap_count = 0;
1078        gdp = NULL;
1079        for (i = 0; i < ngroups; i++) {
1080                gdp = ext4_get_group_desc(sb, i, NULL);
1081                if (!gdp)
1082                        continue;
1083                desc_count += ext4_free_inodes_count(sb, gdp);
1084                brelse(bitmap_bh);
1085                bitmap_bh = ext4_read_inode_bitmap(sb, i);
1086                if (!bitmap_bh)
1087                        continue;
1088
1089                x = ext4_count_free(bitmap_bh->b_data,
1090                                    EXT4_INODES_PER_GROUP(sb) / 8);
1091                printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1092                        (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1093                bitmap_count += x;
1094        }
1095        brelse(bitmap_bh);
1096        printk(KERN_DEBUG "ext4_count_free_inodes: "
1097               "stored = %u, computed = %lu, %lu\n",
1098               le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1099        return desc_count;
1100#else
1101        desc_count = 0;
1102        for (i = 0; i < ngroups; i++) {
1103                gdp = ext4_get_group_desc(sb, i, NULL);
1104                if (!gdp)
1105                        continue;
1106                desc_count += ext4_free_inodes_count(sb, gdp);
1107                cond_resched();
1108        }
1109        return desc_count;
1110#endif
1111}
1112
1113/* Called at mount-time, super-block is locked */
1114unsigned long ext4_count_dirs(struct super_block * sb)
1115{
1116        unsigned long count = 0;
1117        ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1118
1119        for (i = 0; i < ngroups; i++) {
1120                struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1121                if (!gdp)
1122                        continue;
1123                count += ext4_used_dirs_count(sb, gdp);
1124        }
1125        return count;
1126}
1127
1128/*
1129 * Zeroes not yet zeroed inode table - just write zeroes through the whole
1130 * inode table. Must be called without any spinlock held. The only place
1131 * where it is called from on active part of filesystem is ext4lazyinit
1132 * thread, so we do not need any special locks, however we have to prevent
1133 * inode allocation from the current group, so we take alloc_sem lock, to
1134 * block ext4_new_inode() until we are finished.
1135 */
1136int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1137                                 int barrier)
1138{
1139        struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1140        struct ext4_sb_info *sbi = EXT4_SB(sb);
1141        struct ext4_group_desc *gdp = NULL;
1142        struct buffer_head *group_desc_bh;
1143        handle_t *handle;
1144        ext4_fsblk_t blk;
1145        int num, ret = 0, used_blks = 0;
1146
1147        /* This should not happen, but just to be sure check this */
1148        if (sb->s_flags & MS_RDONLY) {
1149                ret = 1;
1150                goto out;
1151        }
1152
1153        gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
1154        if (!gdp)
1155                goto out;
1156
1157        /*
1158         * We do not need to lock this, because we are the only one
1159         * handling this flag.
1160         */
1161        if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
1162                goto out;
1163
1164        handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
1165        if (IS_ERR(handle)) {
1166                ret = PTR_ERR(handle);
1167                goto out;
1168        }
1169
1170        down_write(&grp->alloc_sem);
1171        /*
1172         * If inode bitmap was already initialized there may be some
1173         * used inodes so we need to skip blocks with used inodes in
1174         * inode table.
1175         */
1176        if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
1177                used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
1178                            ext4_itable_unused_count(sb, gdp)),
1179                            sbi->s_inodes_per_block);
1180
1181        if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
1182                ext4_error(sb, "Something is wrong with group %u: "
1183                           "used itable blocks: %d; "
1184                           "itable unused count: %u",
1185                           group, used_blks,
1186                           ext4_itable_unused_count(sb, gdp));
1187                ret = 1;
1188                goto err_out;
1189        }
1190
1191        blk = ext4_inode_table(sb, gdp) + used_blks;
1192        num = sbi->s_itb_per_group - used_blks;
1193
1194        BUFFER_TRACE(group_desc_bh, "get_write_access");
1195        ret = ext4_journal_get_write_access(handle,
1196                                            group_desc_bh);
1197        if (ret)
1198                goto err_out;
1199
1200        /*
1201         * Skip zeroout if the inode table is full. But we set the ZEROED
1202         * flag anyway, because obviously, when it is full it does not need
1203         * further zeroing.
1204         */
1205        if (unlikely(num == 0))
1206                goto skip_zeroout;
1207
1208        ext4_debug("going to zero out inode table in group %d\n",
1209                   group);
1210        ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1211        if (ret < 0)
1212                goto err_out;
1213        if (barrier)
1214                blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
1215
1216skip_zeroout:
1217        ext4_lock_group(sb, group);
1218        gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
1219        ext4_group_desc_csum_set(sb, group, gdp);
1220        ext4_unlock_group(sb, group);
1221
1222        BUFFER_TRACE(group_desc_bh,
1223                     "call ext4_handle_dirty_metadata");
1224        ret = ext4_handle_dirty_metadata(handle, NULL,
1225                                         group_desc_bh);
1226
1227err_out:
1228        up_write(&grp->alloc_sem);
1229        ext4_journal_stop(handle);
1230out:
1231        return ret;
1232}
1233