linux/fs/ext4/ialloc.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/ext4/ialloc.c
   3 *
   4 * Copyright (C) 1992, 1993, 1994, 1995
   5 * Remy Card (card@masi.ibp.fr)
   6 * Laboratoire MASI - Institut Blaise Pascal
   7 * Universite Pierre et Marie Curie (Paris VI)
   8 *
   9 *  BSD ufs-inspired inode and directory allocation by
  10 *  Stephen Tweedie (sct@redhat.com), 1993
  11 *  Big-endian to little-endian byte-swapping/bitmaps by
  12 *        David S. Miller (davem@caip.rutgers.edu), 1995
  13 */
  14
  15#include <linux/time.h>
  16#include <linux/fs.h>
  17#include <linux/stat.h>
  18#include <linux/string.h>
  19#include <linux/quotaops.h>
  20#include <linux/buffer_head.h>
  21#include <linux/random.h>
  22#include <linux/bitops.h>
  23#include <linux/blkdev.h>
  24#include <asm/byteorder.h>
  25
  26#include "ext4.h"
  27#include "ext4_jbd2.h"
  28#include "xattr.h"
  29#include "acl.h"
  30
  31#include <trace/events/ext4.h>
  32
  33/*
  34 * ialloc.c contains the inodes allocation and deallocation routines
  35 */
  36
  37/*
  38 * The free inodes are managed by bitmaps.  A file system contains several
  39 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
  40 * block for inodes, N blocks for the inode table and data blocks.
  41 *
  42 * The file system contains group descriptors which are located after the
  43 * super block.  Each descriptor contains the number of the bitmap block and
  44 * the free blocks count in the block.
  45 */
  46
  47/*
  48 * To avoid calling the atomic setbit hundreds or thousands of times, we only
  49 * need to use it within a single byte (to ensure we get endianness right).
  50 * We can use memset for the rest of the bitmap as there are no other users.
  51 */
  52void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
  53{
  54        int i;
  55
  56        if (start_bit >= end_bit)
  57                return;
  58
  59        ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
  60        for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
  61                ext4_set_bit(i, bitmap);
  62        if (i < end_bit)
  63                memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
  64}
  65
  66/* Initializes an uninitialized inode bitmap */
  67static unsigned ext4_init_inode_bitmap(struct super_block *sb,
  68                                       struct buffer_head *bh,
  69                                       ext4_group_t block_group,
  70                                       struct ext4_group_desc *gdp)
  71{
  72        struct ext4_group_info *grp;
  73        struct ext4_sb_info *sbi = EXT4_SB(sb);
  74        J_ASSERT_BH(bh, buffer_locked(bh));
  75
  76        /* If checksum is bad mark all blocks and inodes use to prevent
  77         * allocation, essentially implementing a per-group read-only flag. */
  78        if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
  79                ext4_error(sb, "Checksum bad for group %u", block_group);
  80                grp = ext4_get_group_info(sb, block_group);
  81                if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
  82                        percpu_counter_sub(&sbi->s_freeclusters_counter,
  83                                           grp->bb_free);
  84                set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
  85                if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
  86                        int count;
  87                        count = ext4_free_inodes_count(sb, gdp);
  88                        percpu_counter_sub(&sbi->s_freeinodes_counter,
  89                                           count);
  90                }
  91                set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
  92                return 0;
  93        }
  94
  95        memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
  96        ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
  97                        bh->b_data);
  98        ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
  99                                   EXT4_INODES_PER_GROUP(sb) / 8);
 100        ext4_group_desc_csum_set(sb, block_group, gdp);
 101
 102        return EXT4_INODES_PER_GROUP(sb);
 103}
 104
 105void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
 106{
 107        if (uptodate) {
 108                set_buffer_uptodate(bh);
 109                set_bitmap_uptodate(bh);
 110        }
 111        unlock_buffer(bh);
 112        put_bh(bh);
 113}
 114
 115/*
 116 * Read the inode allocation bitmap for a given block_group, reading
 117 * into the specified slot in the superblock's bitmap cache.
 118 *
 119 * Return buffer_head of bitmap on success or NULL.
 120 */
 121static struct buffer_head *
 122ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
 123{
 124        struct ext4_group_desc *desc;
 125        struct buffer_head *bh = NULL;
 126        ext4_fsblk_t bitmap_blk;
 127        struct ext4_group_info *grp;
 128        struct ext4_sb_info *sbi = EXT4_SB(sb);
 129
 130        desc = ext4_get_group_desc(sb, block_group, NULL);
 131        if (!desc)
 132                return NULL;
 133
 134        bitmap_blk = ext4_inode_bitmap(sb, desc);
 135        bh = sb_getblk(sb, bitmap_blk);
 136        if (unlikely(!bh)) {
 137                ext4_error(sb, "Cannot read inode bitmap - "
 138                            "block_group = %u, inode_bitmap = %llu",
 139                            block_group, bitmap_blk);
 140                return NULL;
 141        }
 142        if (bitmap_uptodate(bh))
 143                goto verify;
 144
 145        lock_buffer(bh);
 146        if (bitmap_uptodate(bh)) {
 147                unlock_buffer(bh);
 148                goto verify;
 149        }
 150
 151        ext4_lock_group(sb, block_group);
 152        if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
 153                ext4_init_inode_bitmap(sb, bh, block_group, desc);
 154                set_bitmap_uptodate(bh);
 155                set_buffer_uptodate(bh);
 156                set_buffer_verified(bh);
 157                ext4_unlock_group(sb, block_group);
 158                unlock_buffer(bh);
 159                return bh;
 160        }
 161        ext4_unlock_group(sb, block_group);
 162
 163        if (buffer_uptodate(bh)) {
 164                /*
 165                 * if not uninit if bh is uptodate,
 166                 * bitmap is also uptodate
 167                 */
 168                set_bitmap_uptodate(bh);
 169                unlock_buffer(bh);
 170                goto verify;
 171        }
 172        /*
 173         * submit the buffer_head for reading
 174         */
 175        trace_ext4_load_inode_bitmap(sb, block_group);
 176        bh->b_end_io = ext4_end_bitmap_read;
 177        get_bh(bh);
 178        submit_bh(READ | REQ_META | REQ_PRIO, bh);
 179        wait_on_buffer(bh);
 180        if (!buffer_uptodate(bh)) {
 181                put_bh(bh);
 182                ext4_error(sb, "Cannot read inode bitmap - "
 183                           "block_group = %u, inode_bitmap = %llu",
 184                           block_group, bitmap_blk);
 185                return NULL;
 186        }
 187
 188verify:
 189        ext4_lock_group(sb, block_group);
 190        if (!buffer_verified(bh) &&
 191            !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
 192                                           EXT4_INODES_PER_GROUP(sb) / 8)) {
 193                ext4_unlock_group(sb, block_group);
 194                put_bh(bh);
 195                ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
 196                           "inode_bitmap = %llu", block_group, bitmap_blk);
 197                grp = ext4_get_group_info(sb, block_group);
 198                if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
 199                        int count;
 200                        count = ext4_free_inodes_count(sb, desc);
 201                        percpu_counter_sub(&sbi->s_freeinodes_counter,
 202                                           count);
 203                }
 204                set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
 205                return NULL;
 206        }
 207        ext4_unlock_group(sb, block_group);
 208        set_buffer_verified(bh);
 209        return bh;
 210}
 211
 212/*
 213 * NOTE! When we get the inode, we're the only people
 214 * that have access to it, and as such there are no
 215 * race conditions we have to worry about. The inode
 216 * is not on the hash-lists, and it cannot be reached
 217 * through the filesystem because the directory entry
 218 * has been deleted earlier.
 219 *
 220 * HOWEVER: we must make sure that we get no aliases,
 221 * which means that we have to call "clear_inode()"
 222 * _before_ we mark the inode not in use in the inode
 223 * bitmaps. Otherwise a newly created file might use
 224 * the same inode number (not actually the same pointer
 225 * though), and then we'd have two inodes sharing the
 226 * same inode number and space on the harddisk.
 227 */
 228void ext4_free_inode(handle_t *handle, struct inode *inode)
 229{
 230        struct super_block *sb = inode->i_sb;
 231        int is_directory;
 232        unsigned long ino;
 233        struct buffer_head *bitmap_bh = NULL;
 234        struct buffer_head *bh2;
 235        ext4_group_t block_group;
 236        unsigned long bit;
 237        struct ext4_group_desc *gdp;
 238        struct ext4_super_block *es;
 239        struct ext4_sb_info *sbi;
 240        int fatal = 0, err, count, cleared;
 241        struct ext4_group_info *grp;
 242
 243        if (!sb) {
 244                printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
 245                       "nonexistent device\n", __func__, __LINE__);
 246                return;
 247        }
 248        if (atomic_read(&inode->i_count) > 1) {
 249                ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
 250                         __func__, __LINE__, inode->i_ino,
 251                         atomic_read(&inode->i_count));
 252                return;
 253        }
 254        if (inode->i_nlink) {
 255                ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
 256                         __func__, __LINE__, inode->i_ino, inode->i_nlink);
 257                return;
 258        }
 259        sbi = EXT4_SB(sb);
 260
 261        ino = inode->i_ino;
 262        ext4_debug("freeing inode %lu\n", ino);
 263        trace_ext4_free_inode(inode);
 264
 265        /*
 266         * Note: we must free any quota before locking the superblock,
 267         * as writing the quota to disk may need the lock as well.
 268         */
 269        dquot_initialize(inode);
 270        ext4_xattr_delete_inode(handle, inode);
 271        dquot_free_inode(inode);
 272        dquot_drop(inode);
 273
 274        is_directory = S_ISDIR(inode->i_mode);
 275
 276        /* Do this BEFORE marking the inode not in use or returning an error */
 277        ext4_clear_inode(inode);
 278
 279        es = EXT4_SB(sb)->s_es;
 280        if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
 281                ext4_error(sb, "reserved or nonexistent inode %lu", ino);
 282                goto error_return;
 283        }
 284        block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
 285        bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
 286        bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
 287        /* Don't bother if the inode bitmap is corrupt. */
 288        grp = ext4_get_group_info(sb, block_group);
 289        if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) || !bitmap_bh)
 290                goto error_return;
 291
 292        BUFFER_TRACE(bitmap_bh, "get_write_access");
 293        fatal = ext4_journal_get_write_access(handle, bitmap_bh);
 294        if (fatal)
 295                goto error_return;
 296
 297        fatal = -ESRCH;
 298        gdp = ext4_get_group_desc(sb, block_group, &bh2);
 299        if (gdp) {
 300                BUFFER_TRACE(bh2, "get_write_access");
 301                fatal = ext4_journal_get_write_access(handle, bh2);
 302        }
 303        ext4_lock_group(sb, block_group);
 304        cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
 305        if (fatal || !cleared) {
 306                ext4_unlock_group(sb, block_group);
 307                goto out;
 308        }
 309
 310        count = ext4_free_inodes_count(sb, gdp) + 1;
 311        ext4_free_inodes_set(sb, gdp, count);
 312        if (is_directory) {
 313                count = ext4_used_dirs_count(sb, gdp) - 1;
 314                ext4_used_dirs_set(sb, gdp, count);
 315                percpu_counter_dec(&sbi->s_dirs_counter);
 316        }
 317        ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
 318                                   EXT4_INODES_PER_GROUP(sb) / 8);
 319        ext4_group_desc_csum_set(sb, block_group, gdp);
 320        ext4_unlock_group(sb, block_group);
 321
 322        percpu_counter_inc(&sbi->s_freeinodes_counter);
 323        if (sbi->s_log_groups_per_flex) {
 324                ext4_group_t f = ext4_flex_group(sbi, block_group);
 325
 326                atomic_inc(&sbi->s_flex_groups[f].free_inodes);
 327                if (is_directory)
 328                        atomic_dec(&sbi->s_flex_groups[f].used_dirs);
 329        }
 330        BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
 331        fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
 332out:
 333        if (cleared) {
 334                BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
 335                err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
 336                if (!fatal)
 337                        fatal = err;
 338        } else {
 339                ext4_error(sb, "bit already cleared for inode %lu", ino);
 340                if (gdp && !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
 341                        int count;
 342                        count = ext4_free_inodes_count(sb, gdp);
 343                        percpu_counter_sub(&sbi->s_freeinodes_counter,
 344                                           count);
 345                }
 346                set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
 347        }
 348
 349error_return:
 350        brelse(bitmap_bh);
 351        ext4_std_error(sb, fatal);
 352}
 353
 354struct orlov_stats {
 355        __u64 free_clusters;
 356        __u32 free_inodes;
 357        __u32 used_dirs;
 358};
 359
 360/*
 361 * Helper function for Orlov's allocator; returns critical information
 362 * for a particular block group or flex_bg.  If flex_size is 1, then g
 363 * is a block group number; otherwise it is flex_bg number.
 364 */
 365static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
 366                            int flex_size, struct orlov_stats *stats)
 367{
 368        struct ext4_group_desc *desc;
 369        struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
 370
 371        if (flex_size > 1) {
 372                stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
 373                stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
 374                stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
 375                return;
 376        }
 377
 378        desc = ext4_get_group_desc(sb, g, NULL);
 379        if (desc) {
 380                stats->free_inodes = ext4_free_inodes_count(sb, desc);
 381                stats->free_clusters = ext4_free_group_clusters(sb, desc);
 382                stats->used_dirs = ext4_used_dirs_count(sb, desc);
 383        } else {
 384                stats->free_inodes = 0;
 385                stats->free_clusters = 0;
 386                stats->used_dirs = 0;
 387        }
 388}
 389
 390/*
 391 * Orlov's allocator for directories.
 392 *
 393 * We always try to spread first-level directories.
 394 *
 395 * If there are blockgroups with both free inodes and free blocks counts
 396 * not worse than average we return one with smallest directory count.
 397 * Otherwise we simply return a random group.
 398 *
 399 * For the rest rules look so:
 400 *
 401 * It's OK to put directory into a group unless
 402 * it has too many directories already (max_dirs) or
 403 * it has too few free inodes left (min_inodes) or
 404 * it has too few free blocks left (min_blocks) or
 405 * Parent's group is preferred, if it doesn't satisfy these
 406 * conditions we search cyclically through the rest. If none
 407 * of the groups look good we just look for a group with more
 408 * free inodes than average (starting at parent's group).
 409 */
 410
 411static int find_group_orlov(struct super_block *sb, struct inode *parent,
 412                            ext4_group_t *group, umode_t mode,
 413                            const struct qstr *qstr)
 414{
 415        ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
 416        struct ext4_sb_info *sbi = EXT4_SB(sb);
 417        ext4_group_t real_ngroups = ext4_get_groups_count(sb);
 418        int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
 419        unsigned int freei, avefreei, grp_free;
 420        ext4_fsblk_t freeb, avefreec;
 421        unsigned int ndirs;
 422        int max_dirs, min_inodes;
 423        ext4_grpblk_t min_clusters;
 424        ext4_group_t i, grp, g, ngroups;
 425        struct ext4_group_desc *desc;
 426        struct orlov_stats stats;
 427        int flex_size = ext4_flex_bg_size(sbi);
 428        struct dx_hash_info hinfo;
 429
 430        ngroups = real_ngroups;
 431        if (flex_size > 1) {
 432                ngroups = (real_ngroups + flex_size - 1) >>
 433                        sbi->s_log_groups_per_flex;
 434                parent_group >>= sbi->s_log_groups_per_flex;
 435        }
 436
 437        freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
 438        avefreei = freei / ngroups;
 439        freeb = EXT4_C2B(sbi,
 440                percpu_counter_read_positive(&sbi->s_freeclusters_counter));
 441        avefreec = freeb;
 442        do_div(avefreec, ngroups);
 443        ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
 444
 445        if (S_ISDIR(mode) &&
 446            ((parent == d_inode(sb->s_root)) ||
 447             (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
 448                int best_ndir = inodes_per_group;
 449                int ret = -1;
 450
 451                if (qstr) {
 452                        hinfo.hash_version = DX_HASH_HALF_MD4;
 453                        hinfo.seed = sbi->s_hash_seed;
 454                        ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
 455                        grp = hinfo.hash;
 456                } else
 457                        grp = prandom_u32();
 458                parent_group = (unsigned)grp % ngroups;
 459                for (i = 0; i < ngroups; i++) {
 460                        g = (parent_group + i) % ngroups;
 461                        get_orlov_stats(sb, g, flex_size, &stats);
 462                        if (!stats.free_inodes)
 463                                continue;
 464                        if (stats.used_dirs >= best_ndir)
 465                                continue;
 466                        if (stats.free_inodes < avefreei)
 467                                continue;
 468                        if (stats.free_clusters < avefreec)
 469                                continue;
 470                        grp = g;
 471                        ret = 0;
 472                        best_ndir = stats.used_dirs;
 473                }
 474                if (ret)
 475                        goto fallback;
 476        found_flex_bg:
 477                if (flex_size == 1) {
 478                        *group = grp;
 479                        return 0;
 480                }
 481
 482                /*
 483                 * We pack inodes at the beginning of the flexgroup's
 484                 * inode tables.  Block allocation decisions will do
 485                 * something similar, although regular files will
 486                 * start at 2nd block group of the flexgroup.  See
 487                 * ext4_ext_find_goal() and ext4_find_near().
 488                 */
 489                grp *= flex_size;
 490                for (i = 0; i < flex_size; i++) {
 491                        if (grp+i >= real_ngroups)
 492                                break;
 493                        desc = ext4_get_group_desc(sb, grp+i, NULL);
 494                        if (desc && ext4_free_inodes_count(sb, desc)) {
 495                                *group = grp+i;
 496                                return 0;
 497                        }
 498                }
 499                goto fallback;
 500        }
 501
 502        max_dirs = ndirs / ngroups + inodes_per_group / 16;
 503        min_inodes = avefreei - inodes_per_group*flex_size / 4;
 504        if (min_inodes < 1)
 505                min_inodes = 1;
 506        min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
 507
 508        /*
 509         * Start looking in the flex group where we last allocated an
 510         * inode for this parent directory
 511         */
 512        if (EXT4_I(parent)->i_last_alloc_group != ~0) {
 513                parent_group = EXT4_I(parent)->i_last_alloc_group;
 514                if (flex_size > 1)
 515                        parent_group >>= sbi->s_log_groups_per_flex;
 516        }
 517
 518        for (i = 0; i < ngroups; i++) {
 519                grp = (parent_group + i) % ngroups;
 520                get_orlov_stats(sb, grp, flex_size, &stats);
 521                if (stats.used_dirs >= max_dirs)
 522                        continue;
 523                if (stats.free_inodes < min_inodes)
 524                        continue;
 525                if (stats.free_clusters < min_clusters)
 526                        continue;
 527                goto found_flex_bg;
 528        }
 529
 530fallback:
 531        ngroups = real_ngroups;
 532        avefreei = freei / ngroups;
 533fallback_retry:
 534        parent_group = EXT4_I(parent)->i_block_group;
 535        for (i = 0; i < ngroups; i++) {
 536                grp = (parent_group + i) % ngroups;
 537                desc = ext4_get_group_desc(sb, grp, NULL);
 538                if (desc) {
 539                        grp_free = ext4_free_inodes_count(sb, desc);
 540                        if (grp_free && grp_free >= avefreei) {
 541                                *group = grp;
 542                                return 0;
 543                        }
 544                }
 545        }
 546
 547        if (avefreei) {
 548                /*
 549                 * The free-inodes counter is approximate, and for really small
 550                 * filesystems the above test can fail to find any blockgroups
 551                 */
 552                avefreei = 0;
 553                goto fallback_retry;
 554        }
 555
 556        return -1;
 557}
 558
 559static int find_group_other(struct super_block *sb, struct inode *parent,
 560                            ext4_group_t *group, umode_t mode)
 561{
 562        ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
 563        ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
 564        struct ext4_group_desc *desc;
 565        int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
 566
 567        /*
 568         * Try to place the inode is the same flex group as its
 569         * parent.  If we can't find space, use the Orlov algorithm to
 570         * find another flex group, and store that information in the
 571         * parent directory's inode information so that use that flex
 572         * group for future allocations.
 573         */
 574        if (flex_size > 1) {
 575                int retry = 0;
 576
 577        try_again:
 578                parent_group &= ~(flex_size-1);
 579                last = parent_group + flex_size;
 580                if (last > ngroups)
 581                        last = ngroups;
 582                for  (i = parent_group; i < last; i++) {
 583                        desc = ext4_get_group_desc(sb, i, NULL);
 584                        if (desc && ext4_free_inodes_count(sb, desc)) {
 585                                *group = i;
 586                                return 0;
 587                        }
 588                }
 589                if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
 590                        retry = 1;
 591                        parent_group = EXT4_I(parent)->i_last_alloc_group;
 592                        goto try_again;
 593                }
 594                /*
 595                 * If this didn't work, use the Orlov search algorithm
 596                 * to find a new flex group; we pass in the mode to
 597                 * avoid the topdir algorithms.
 598                 */
 599                *group = parent_group + flex_size;
 600                if (*group > ngroups)
 601                        *group = 0;
 602                return find_group_orlov(sb, parent, group, mode, NULL);
 603        }
 604
 605        /*
 606         * Try to place the inode in its parent directory
 607         */
 608        *group = parent_group;
 609        desc = ext4_get_group_desc(sb, *group, NULL);
 610        if (desc && ext4_free_inodes_count(sb, desc) &&
 611            ext4_free_group_clusters(sb, desc))
 612                return 0;
 613
 614        /*
 615         * We're going to place this inode in a different blockgroup from its
 616         * parent.  We want to cause files in a common directory to all land in
 617         * the same blockgroup.  But we want files which are in a different
 618         * directory which shares a blockgroup with our parent to land in a
 619         * different blockgroup.
 620         *
 621         * So add our directory's i_ino into the starting point for the hash.
 622         */
 623        *group = (*group + parent->i_ino) % ngroups;
 624
 625        /*
 626         * Use a quadratic hash to find a group with a free inode and some free
 627         * blocks.
 628         */
 629        for (i = 1; i < ngroups; i <<= 1) {
 630                *group += i;
 631                if (*group >= ngroups)
 632                        *group -= ngroups;
 633                desc = ext4_get_group_desc(sb, *group, NULL);
 634                if (desc && ext4_free_inodes_count(sb, desc) &&
 635                    ext4_free_group_clusters(sb, desc))
 636                        return 0;
 637        }
 638
 639        /*
 640         * That failed: try linear search for a free inode, even if that group
 641         * has no free blocks.
 642         */
 643        *group = parent_group;
 644        for (i = 0; i < ngroups; i++) {
 645                if (++*group >= ngroups)
 646                        *group = 0;
 647                desc = ext4_get_group_desc(sb, *group, NULL);
 648                if (desc && ext4_free_inodes_count(sb, desc))
 649                        return 0;
 650        }
 651
 652        return -1;
 653}
 654
 655/*
 656 * In no journal mode, if an inode has recently been deleted, we want
 657 * to avoid reusing it until we're reasonably sure the inode table
 658 * block has been written back to disk.  (Yes, these values are
 659 * somewhat arbitrary...)
 660 */
 661#define RECENTCY_MIN    5
 662#define RECENTCY_DIRTY  30
 663
 664static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
 665{
 666        struct ext4_group_desc  *gdp;
 667        struct ext4_inode       *raw_inode;
 668        struct buffer_head      *bh;
 669        unsigned long           dtime, now;
 670        int     inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
 671        int     offset, ret = 0, recentcy = RECENTCY_MIN;
 672
 673        gdp = ext4_get_group_desc(sb, group, NULL);
 674        if (unlikely(!gdp))
 675                return 0;
 676
 677        bh = sb_getblk(sb, ext4_inode_table(sb, gdp) +
 678                       (ino / inodes_per_block));
 679        if (unlikely(!bh) || !buffer_uptodate(bh))
 680                /*
 681                 * If the block is not in the buffer cache, then it
 682                 * must have been written out.
 683                 */
 684                goto out;
 685
 686        offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb);
 687        raw_inode = (struct ext4_inode *) (bh->b_data + offset);
 688        dtime = le32_to_cpu(raw_inode->i_dtime);
 689        now = get_seconds();
 690        if (buffer_dirty(bh))
 691                recentcy += RECENTCY_DIRTY;
 692
 693        if (dtime && (dtime < now) && (now < dtime + recentcy))
 694                ret = 1;
 695out:
 696        brelse(bh);
 697        return ret;
 698}
 699
 700/*
 701 * There are two policies for allocating an inode.  If the new inode is
 702 * a directory, then a forward search is made for a block group with both
 703 * free space and a low directory-to-inode ratio; if that fails, then of
 704 * the groups with above-average free space, that group with the fewest
 705 * directories already is chosen.
 706 *
 707 * For other inodes, search forward from the parent directory's block
 708 * group to find a free inode.
 709 */
 710struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
 711                               umode_t mode, const struct qstr *qstr,
 712                               __u32 goal, uid_t *owner, int handle_type,
 713                               unsigned int line_no, int nblocks)
 714{
 715        struct super_block *sb;
 716        struct buffer_head *inode_bitmap_bh = NULL;
 717        struct buffer_head *group_desc_bh;
 718        ext4_group_t ngroups, group = 0;
 719        unsigned long ino = 0;
 720        struct inode *inode;
 721        struct ext4_group_desc *gdp = NULL;
 722        struct ext4_inode_info *ei;
 723        struct ext4_sb_info *sbi;
 724        int ret2, err = 0;
 725        struct inode *ret;
 726        ext4_group_t i;
 727        ext4_group_t flex_group;
 728        struct ext4_group_info *grp;
 729
 730        /* Cannot create files in a deleted directory */
 731        if (!dir || !dir->i_nlink)
 732                return ERR_PTR(-EPERM);
 733
 734        sb = dir->i_sb;
 735        ngroups = ext4_get_groups_count(sb);
 736        trace_ext4_request_inode(dir, mode);
 737        inode = new_inode(sb);
 738        if (!inode)
 739                return ERR_PTR(-ENOMEM);
 740        ei = EXT4_I(inode);
 741        sbi = EXT4_SB(sb);
 742
 743        /*
 744         * Initalize owners and quota early so that we don't have to account
 745         * for quota initialization worst case in standard inode creating
 746         * transaction
 747         */
 748        if (owner) {
 749                inode->i_mode = mode;
 750                i_uid_write(inode, owner[0]);
 751                i_gid_write(inode, owner[1]);
 752        } else if (test_opt(sb, GRPID)) {
 753                inode->i_mode = mode;
 754                inode->i_uid = current_fsuid();
 755                inode->i_gid = dir->i_gid;
 756        } else
 757                inode_init_owner(inode, dir, mode);
 758        dquot_initialize(inode);
 759
 760        if (!goal)
 761                goal = sbi->s_inode_goal;
 762
 763        if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
 764                group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
 765                ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
 766                ret2 = 0;
 767                goto got_group;
 768        }
 769
 770        if (S_ISDIR(mode))
 771                ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
 772        else
 773                ret2 = find_group_other(sb, dir, &group, mode);
 774
 775got_group:
 776        EXT4_I(dir)->i_last_alloc_group = group;
 777        err = -ENOSPC;
 778        if (ret2 == -1)
 779                goto out;
 780
 781        /*
 782         * Normally we will only go through one pass of this loop,
 783         * unless we get unlucky and it turns out the group we selected
 784         * had its last inode grabbed by someone else.
 785         */
 786        for (i = 0; i < ngroups; i++, ino = 0) {
 787                err = -EIO;
 788
 789                gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
 790                if (!gdp)
 791                        goto out;
 792
 793                /*
 794                 * Check free inodes count before loading bitmap.
 795                 */
 796                if (ext4_free_inodes_count(sb, gdp) == 0) {
 797                        if (++group == ngroups)
 798                                group = 0;
 799                        continue;
 800                }
 801
 802                grp = ext4_get_group_info(sb, group);
 803                /* Skip groups with already-known suspicious inode tables */
 804                if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
 805                        if (++group == ngroups)
 806                                group = 0;
 807                        continue;
 808                }
 809
 810                brelse(inode_bitmap_bh);
 811                inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
 812                /* Skip groups with suspicious inode tables */
 813                if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp) || !inode_bitmap_bh) {
 814                        if (++group == ngroups)
 815                                group = 0;
 816                        continue;
 817                }
 818
 819repeat_in_this_group:
 820                ino = ext4_find_next_zero_bit((unsigned long *)
 821                                              inode_bitmap_bh->b_data,
 822                                              EXT4_INODES_PER_GROUP(sb), ino);
 823                if (ino >= EXT4_INODES_PER_GROUP(sb))
 824                        goto next_group;
 825                if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
 826                        ext4_error(sb, "reserved inode found cleared - "
 827                                   "inode=%lu", ino + 1);
 828                        continue;
 829                }
 830                if ((EXT4_SB(sb)->s_journal == NULL) &&
 831                    recently_deleted(sb, group, ino)) {
 832                        ino++;
 833                        goto next_inode;
 834                }
 835                if (!handle) {
 836                        BUG_ON(nblocks <= 0);
 837                        handle = __ext4_journal_start_sb(dir->i_sb, line_no,
 838                                                         handle_type, nblocks,
 839                                                         0);
 840                        if (IS_ERR(handle)) {
 841                                err = PTR_ERR(handle);
 842                                ext4_std_error(sb, err);
 843                                goto out;
 844                        }
 845                }
 846                BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
 847                err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
 848                if (err) {
 849                        ext4_std_error(sb, err);
 850                        goto out;
 851                }
 852                ext4_lock_group(sb, group);
 853                ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
 854                ext4_unlock_group(sb, group);
 855                ino++;          /* the inode bitmap is zero-based */
 856                if (!ret2)
 857                        goto got; /* we grabbed the inode! */
 858next_inode:
 859                if (ino < EXT4_INODES_PER_GROUP(sb))
 860                        goto repeat_in_this_group;
 861next_group:
 862                if (++group == ngroups)
 863                        group = 0;
 864        }
 865        err = -ENOSPC;
 866        goto out;
 867
 868got:
 869        BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
 870        err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
 871        if (err) {
 872                ext4_std_error(sb, err);
 873                goto out;
 874        }
 875
 876        BUFFER_TRACE(group_desc_bh, "get_write_access");
 877        err = ext4_journal_get_write_access(handle, group_desc_bh);
 878        if (err) {
 879                ext4_std_error(sb, err);
 880                goto out;
 881        }
 882
 883        /* We may have to initialize the block bitmap if it isn't already */
 884        if (ext4_has_group_desc_csum(sb) &&
 885            gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
 886                struct buffer_head *block_bitmap_bh;
 887
 888                block_bitmap_bh = ext4_read_block_bitmap(sb, group);
 889                if (!block_bitmap_bh) {
 890                        err = -EIO;
 891                        goto out;
 892                }
 893                BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
 894                err = ext4_journal_get_write_access(handle, block_bitmap_bh);
 895                if (err) {
 896                        brelse(block_bitmap_bh);
 897                        ext4_std_error(sb, err);
 898                        goto out;
 899                }
 900
 901                BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
 902                err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
 903
 904                /* recheck and clear flag under lock if we still need to */
 905                ext4_lock_group(sb, group);
 906                if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
 907                        gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
 908                        ext4_free_group_clusters_set(sb, gdp,
 909                                ext4_free_clusters_after_init(sb, group, gdp));
 910                        ext4_block_bitmap_csum_set(sb, group, gdp,
 911                                                   block_bitmap_bh);
 912                        ext4_group_desc_csum_set(sb, group, gdp);
 913                }
 914                ext4_unlock_group(sb, group);
 915                brelse(block_bitmap_bh);
 916
 917                if (err) {
 918                        ext4_std_error(sb, err);
 919                        goto out;
 920                }
 921        }
 922
 923        /* Update the relevant bg descriptor fields */
 924        if (ext4_has_group_desc_csum(sb)) {
 925                int free;
 926                struct ext4_group_info *grp = ext4_get_group_info(sb, group);
 927
 928                down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
 929                ext4_lock_group(sb, group); /* while we modify the bg desc */
 930                free = EXT4_INODES_PER_GROUP(sb) -
 931                        ext4_itable_unused_count(sb, gdp);
 932                if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
 933                        gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
 934                        free = 0;
 935                }
 936                /*
 937                 * Check the relative inode number against the last used
 938                 * relative inode number in this group. if it is greater
 939                 * we need to update the bg_itable_unused count
 940                 */
 941                if (ino > free)
 942                        ext4_itable_unused_set(sb, gdp,
 943                                        (EXT4_INODES_PER_GROUP(sb) - ino));
 944                up_read(&grp->alloc_sem);
 945        } else {
 946                ext4_lock_group(sb, group);
 947        }
 948
 949        ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
 950        if (S_ISDIR(mode)) {
 951                ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
 952                if (sbi->s_log_groups_per_flex) {
 953                        ext4_group_t f = ext4_flex_group(sbi, group);
 954
 955                        atomic_inc(&sbi->s_flex_groups[f].used_dirs);
 956                }
 957        }
 958        if (ext4_has_group_desc_csum(sb)) {
 959                ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
 960                                           EXT4_INODES_PER_GROUP(sb) / 8);
 961                ext4_group_desc_csum_set(sb, group, gdp);
 962        }
 963        ext4_unlock_group(sb, group);
 964
 965        BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
 966        err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
 967        if (err) {
 968                ext4_std_error(sb, err);
 969                goto out;
 970        }
 971
 972        percpu_counter_dec(&sbi->s_freeinodes_counter);
 973        if (S_ISDIR(mode))
 974                percpu_counter_inc(&sbi->s_dirs_counter);
 975
 976        if (sbi->s_log_groups_per_flex) {
 977                flex_group = ext4_flex_group(sbi, group);
 978                atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
 979        }
 980
 981        inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
 982        /* This is the optimal IO size (for stat), not the fs block size */
 983        inode->i_blocks = 0;
 984        inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
 985                                                       ext4_current_time(inode);
 986
 987        memset(ei->i_data, 0, sizeof(ei->i_data));
 988        ei->i_dir_start_lookup = 0;
 989        ei->i_disksize = 0;
 990
 991        /* Don't inherit extent flag from directory, amongst others. */
 992        ei->i_flags =
 993                ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
 994        ei->i_file_acl = 0;
 995        ei->i_dtime = 0;
 996        ei->i_block_group = group;
 997        ei->i_last_alloc_group = ~0;
 998
 999        /* If the directory encrypted, then we should encrypt the inode. */
1000        if ((S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) &&
1001            (ext4_encrypted_inode(dir) ||
1002             DUMMY_ENCRYPTION_ENABLED(sbi)))
1003                ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1004
1005        ext4_set_inode_flags(inode);
1006        if (IS_DIRSYNC(inode))
1007                ext4_handle_sync(handle);
1008        if (insert_inode_locked(inode) < 0) {
1009                /*
1010                 * Likely a bitmap corruption causing inode to be allocated
1011                 * twice.
1012                 */
1013                err = -EIO;
1014                ext4_error(sb, "failed to insert inode %lu: doubly allocated?",
1015                           inode->i_ino);
1016                goto out;
1017        }
1018        spin_lock(&sbi->s_next_gen_lock);
1019        inode->i_generation = sbi->s_next_generation++;
1020        spin_unlock(&sbi->s_next_gen_lock);
1021
1022        /* Precompute checksum seed for inode metadata */
1023        if (ext4_has_metadata_csum(sb)) {
1024                __u32 csum;
1025                __le32 inum = cpu_to_le32(inode->i_ino);
1026                __le32 gen = cpu_to_le32(inode->i_generation);
1027                csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
1028                                   sizeof(inum));
1029                ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
1030                                              sizeof(gen));
1031        }
1032
1033        ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
1034        ext4_set_inode_state(inode, EXT4_STATE_NEW);
1035
1036        ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
1037#ifdef CONFIG_EXT4_FS_ENCRYPTION
1038        if ((sbi->s_file_encryption_mode == EXT4_ENCRYPTION_MODE_INVALID) &&
1039            (sbi->s_dir_encryption_mode == EXT4_ENCRYPTION_MODE_INVALID)) {
1040                ei->i_inline_off = 0;
1041                if (EXT4_HAS_INCOMPAT_FEATURE(sb,
1042                        EXT4_FEATURE_INCOMPAT_INLINE_DATA))
1043                        ext4_set_inode_state(inode,
1044                        EXT4_STATE_MAY_INLINE_DATA);
1045        } else {
1046                /* Inline data and encryption are incompatible
1047                 * We turn off inline data since encryption is enabled */
1048                ei->i_inline_off = 1;
1049                if (EXT4_HAS_INCOMPAT_FEATURE(sb,
1050                        EXT4_FEATURE_INCOMPAT_INLINE_DATA))
1051                        ext4_clear_inode_state(inode,
1052                        EXT4_STATE_MAY_INLINE_DATA);
1053        }
1054#else
1055        ei->i_inline_off = 0;
1056        if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA))
1057                ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
1058#endif
1059        ret = inode;
1060        err = dquot_alloc_inode(inode);
1061        if (err)
1062                goto fail_drop;
1063
1064        err = ext4_init_acl(handle, inode, dir);
1065        if (err)
1066                goto fail_free_drop;
1067
1068        err = ext4_init_security(handle, inode, dir, qstr);
1069        if (err)
1070                goto fail_free_drop;
1071
1072        if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
1073                /* set extent flag only for directory, file and normal symlink*/
1074                if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
1075                        ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
1076                        ext4_ext_tree_init(handle, inode);
1077                }
1078        }
1079
1080        if (ext4_handle_valid(handle)) {
1081                ei->i_sync_tid = handle->h_transaction->t_tid;
1082                ei->i_datasync_tid = handle->h_transaction->t_tid;
1083        }
1084
1085        err = ext4_mark_inode_dirty(handle, inode);
1086        if (err) {
1087                ext4_std_error(sb, err);
1088                goto fail_free_drop;
1089        }
1090
1091        ext4_debug("allocating inode %lu\n", inode->i_ino);
1092        trace_ext4_allocate_inode(inode, dir, mode);
1093        brelse(inode_bitmap_bh);
1094        return ret;
1095
1096fail_free_drop:
1097        dquot_free_inode(inode);
1098fail_drop:
1099        clear_nlink(inode);
1100        unlock_new_inode(inode);
1101out:
1102        dquot_drop(inode);
1103        inode->i_flags |= S_NOQUOTA;
1104        iput(inode);
1105        brelse(inode_bitmap_bh);
1106        return ERR_PTR(err);
1107}
1108
1109/* Verify that we are loading a valid orphan from disk */
1110struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1111{
1112        unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
1113        ext4_group_t block_group;
1114        int bit;
1115        struct buffer_head *bitmap_bh;
1116        struct inode *inode = NULL;
1117        long err = -EIO;
1118
1119        /* Error cases - e2fsck has already cleaned up for us */
1120        if (ino > max_ino) {
1121                ext4_warning(sb, "bad orphan ino %lu!  e2fsck was run?", ino);
1122                goto error;
1123        }
1124
1125        block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
1126        bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1127        bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1128        if (!bitmap_bh) {
1129                ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
1130                goto error;
1131        }
1132
1133        /* Having the inode bit set should be a 100% indicator that this
1134         * is a valid orphan (no e2fsck run on fs).  Orphans also include
1135         * inodes that were being truncated, so we can't check i_nlink==0.
1136         */
1137        if (!ext4_test_bit(bit, bitmap_bh->b_data))
1138                goto bad_orphan;
1139
1140        inode = ext4_iget(sb, ino);
1141        if (IS_ERR(inode))
1142                goto iget_failed;
1143
1144        /*
1145         * If the orphans has i_nlinks > 0 then it should be able to be
1146         * truncated, otherwise it won't be removed from the orphan list
1147         * during processing and an infinite loop will result.
1148         */
1149        if (inode->i_nlink && !ext4_can_truncate(inode))
1150                goto bad_orphan;
1151
1152        if (NEXT_ORPHAN(inode) > max_ino)
1153                goto bad_orphan;
1154        brelse(bitmap_bh);
1155        return inode;
1156
1157iget_failed:
1158        err = PTR_ERR(inode);
1159        inode = NULL;
1160bad_orphan:
1161        ext4_warning(sb, "bad orphan inode %lu!  e2fsck was run?", ino);
1162        printk(KERN_WARNING "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1163               bit, (unsigned long long)bitmap_bh->b_blocknr,
1164               ext4_test_bit(bit, bitmap_bh->b_data));
1165        printk(KERN_WARNING "inode=%p\n", inode);
1166        if (inode) {
1167                printk(KERN_WARNING "is_bad_inode(inode)=%d\n",
1168                       is_bad_inode(inode));
1169                printk(KERN_WARNING "NEXT_ORPHAN(inode)=%u\n",
1170                       NEXT_ORPHAN(inode));
1171                printk(KERN_WARNING "max_ino=%lu\n", max_ino);
1172                printk(KERN_WARNING "i_nlink=%u\n", inode->i_nlink);
1173                /* Avoid freeing blocks if we got a bad deleted inode */
1174                if (inode->i_nlink == 0)
1175                        inode->i_blocks = 0;
1176                iput(inode);
1177        }
1178        brelse(bitmap_bh);
1179error:
1180        return ERR_PTR(err);
1181}
1182
1183unsigned long ext4_count_free_inodes(struct super_block *sb)
1184{
1185        unsigned long desc_count;
1186        struct ext4_group_desc *gdp;
1187        ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1188#ifdef EXT4FS_DEBUG
1189        struct ext4_super_block *es;
1190        unsigned long bitmap_count, x;
1191        struct buffer_head *bitmap_bh = NULL;
1192
1193        es = EXT4_SB(sb)->s_es;
1194        desc_count = 0;
1195        bitmap_count = 0;
1196        gdp = NULL;
1197        for (i = 0; i < ngroups; i++) {
1198                gdp = ext4_get_group_desc(sb, i, NULL);
1199                if (!gdp)
1200                        continue;
1201                desc_count += ext4_free_inodes_count(sb, gdp);
1202                brelse(bitmap_bh);
1203                bitmap_bh = ext4_read_inode_bitmap(sb, i);
1204                if (!bitmap_bh)
1205                        continue;
1206
1207                x = ext4_count_free(bitmap_bh->b_data,
1208                                    EXT4_INODES_PER_GROUP(sb) / 8);
1209                printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1210                        (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1211                bitmap_count += x;
1212        }
1213        brelse(bitmap_bh);
1214        printk(KERN_DEBUG "ext4_count_free_inodes: "
1215               "stored = %u, computed = %lu, %lu\n",
1216               le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1217        return desc_count;
1218#else
1219        desc_count = 0;
1220        for (i = 0; i < ngroups; i++) {
1221                gdp = ext4_get_group_desc(sb, i, NULL);
1222                if (!gdp)
1223                        continue;
1224                desc_count += ext4_free_inodes_count(sb, gdp);
1225                cond_resched();
1226        }
1227        return desc_count;
1228#endif
1229}
1230
1231/* Called at mount-time, super-block is locked */
1232unsigned long ext4_count_dirs(struct super_block * sb)
1233{
1234        unsigned long count = 0;
1235        ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1236
1237        for (i = 0; i < ngroups; i++) {
1238                struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1239                if (!gdp)
1240                        continue;
1241                count += ext4_used_dirs_count(sb, gdp);
1242        }
1243        return count;
1244}
1245
1246/*
1247 * Zeroes not yet zeroed inode table - just write zeroes through the whole
1248 * inode table. Must be called without any spinlock held. The only place
1249 * where it is called from on active part of filesystem is ext4lazyinit
1250 * thread, so we do not need any special locks, however we have to prevent
1251 * inode allocation from the current group, so we take alloc_sem lock, to
1252 * block ext4_new_inode() until we are finished.
1253 */
1254int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1255                                 int barrier)
1256{
1257        struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1258        struct ext4_sb_info *sbi = EXT4_SB(sb);
1259        struct ext4_group_desc *gdp = NULL;
1260        struct buffer_head *group_desc_bh;
1261        handle_t *handle;
1262        ext4_fsblk_t blk;
1263        int num, ret = 0, used_blks = 0;
1264
1265        /* This should not happen, but just to be sure check this */
1266        if (sb->s_flags & MS_RDONLY) {
1267                ret = 1;
1268                goto out;
1269        }
1270
1271        gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
1272        if (!gdp)
1273                goto out;
1274
1275        /*
1276         * We do not need to lock this, because we are the only one
1277         * handling this flag.
1278         */
1279        if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
1280                goto out;
1281
1282        handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
1283        if (IS_ERR(handle)) {
1284                ret = PTR_ERR(handle);
1285                goto out;
1286        }
1287
1288        down_write(&grp->alloc_sem);
1289        /*
1290         * If inode bitmap was already initialized there may be some
1291         * used inodes so we need to skip blocks with used inodes in
1292         * inode table.
1293         */
1294        if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
1295                used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
1296                            ext4_itable_unused_count(sb, gdp)),
1297                            sbi->s_inodes_per_block);
1298
1299        if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
1300                ext4_error(sb, "Something is wrong with group %u: "
1301                           "used itable blocks: %d; "
1302                           "itable unused count: %u",
1303                           group, used_blks,
1304                           ext4_itable_unused_count(sb, gdp));
1305                ret = 1;
1306                goto err_out;
1307        }
1308
1309        blk = ext4_inode_table(sb, gdp) + used_blks;
1310        num = sbi->s_itb_per_group - used_blks;
1311
1312        BUFFER_TRACE(group_desc_bh, "get_write_access");
1313        ret = ext4_journal_get_write_access(handle,
1314                                            group_desc_bh);
1315        if (ret)
1316                goto err_out;
1317
1318        /*
1319         * Skip zeroout if the inode table is full. But we set the ZEROED
1320         * flag anyway, because obviously, when it is full it does not need
1321         * further zeroing.
1322         */
1323        if (unlikely(num == 0))
1324                goto skip_zeroout;
1325
1326        ext4_debug("going to zero out inode table in group %d\n",
1327                   group);
1328        ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1329        if (ret < 0)
1330                goto err_out;
1331        if (barrier)
1332                blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
1333
1334skip_zeroout:
1335        ext4_lock_group(sb, group);
1336        gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
1337        ext4_group_desc_csum_set(sb, group, gdp);
1338        ext4_unlock_group(sb, group);
1339
1340        BUFFER_TRACE(group_desc_bh,
1341                     "call ext4_handle_dirty_metadata");
1342        ret = ext4_handle_dirty_metadata(handle, NULL,
1343                                         group_desc_bh);
1344
1345err_out:
1346        up_write(&grp->alloc_sem);
1347        ext4_journal_stop(handle);
1348out:
1349        return ret;
1350}
1351