linux/fs/ext4/balloc.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/ext4/balloc.c
   3 *
   4 * Copyright (C) 1992, 1993, 1994, 1995
   5 * Remy Card (card@masi.ibp.fr)
   6 * Laboratoire MASI - Institut Blaise Pascal
   7 * Universite Pierre et Marie Curie (Paris VI)
   8 *
   9 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
  10 *  Big-endian to little-endian byte-swapping/bitmaps by
  11 *        David S. Miller (davem@caip.rutgers.edu), 1995
  12 */
  13
  14#include <linux/time.h>
  15#include <linux/capability.h>
  16#include <linux/fs.h>
  17#include <linux/jbd2.h>
  18#include <linux/ext4_fs.h>
  19#include <linux/ext4_jbd2.h>
  20#include <linux/quotaops.h>
  21#include <linux/buffer_head.h>
  22
  23#include "group.h"
  24/*
  25 * balloc.c contains the blocks allocation and deallocation routines
  26 */
  27
  28/*
  29 * Calculate the block group number and offset, given a block number
  30 */
  31void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
  32                unsigned long *blockgrpp, ext4_grpblk_t *offsetp)
  33{
  34        struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  35        ext4_grpblk_t offset;
  36
  37        blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
  38        offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
  39        if (offsetp)
  40                *offsetp = offset;
  41        if (blockgrpp)
  42                *blockgrpp = blocknr;
  43
  44}
  45
  46/* Initializes an uninitialized block bitmap if given, and returns the
  47 * number of blocks free in the group. */
  48unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
  49                                int block_group, struct ext4_group_desc *gdp)
  50{
  51        unsigned long start;
  52        int bit, bit_max;
  53        unsigned free_blocks, group_blocks;
  54        struct ext4_sb_info *sbi = EXT4_SB(sb);
  55
  56        if (bh) {
  57                J_ASSERT_BH(bh, buffer_locked(bh));
  58
  59                /* If checksum is bad mark all blocks used to prevent allocation
  60                 * essentially implementing a per-group read-only flag. */
  61                if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
  62                        ext4_error(sb, __FUNCTION__,
  63                                   "Checksum bad for group %u\n", block_group);
  64                        gdp->bg_free_blocks_count = 0;
  65                        gdp->bg_free_inodes_count = 0;
  66                        gdp->bg_itable_unused = 0;
  67                        memset(bh->b_data, 0xff, sb->s_blocksize);
  68                        return 0;
  69                }
  70                memset(bh->b_data, 0, sb->s_blocksize);
  71        }
  72
  73        /* Check for superblock and gdt backups in this group */
  74        bit_max = ext4_bg_has_super(sb, block_group);
  75
  76        if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
  77            block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
  78                          sbi->s_desc_per_block) {
  79                if (bit_max) {
  80                        bit_max += ext4_bg_num_gdb(sb, block_group);
  81                        bit_max +=
  82                                le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
  83                }
  84        } else { /* For META_BG_BLOCK_GROUPS */
  85                int group_rel = (block_group -
  86                                 le32_to_cpu(sbi->s_es->s_first_meta_bg)) %
  87                                EXT4_DESC_PER_BLOCK(sb);
  88                if (group_rel == 0 || group_rel == 1 ||
  89                    (group_rel == EXT4_DESC_PER_BLOCK(sb) - 1))
  90                        bit_max += 1;
  91        }
  92
  93        if (block_group == sbi->s_groups_count - 1) {
  94                /*
  95                 * Even though mke2fs always initialize first and last group
  96                 * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need
  97                 * to make sure we calculate the right free blocks
  98                 */
  99                group_blocks = ext4_blocks_count(sbi->s_es) -
 100                        le32_to_cpu(sbi->s_es->s_first_data_block) -
 101                        (EXT4_BLOCKS_PER_GROUP(sb) * (sbi->s_groups_count -1));
 102        } else {
 103                group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
 104        }
 105
 106        free_blocks = group_blocks - bit_max;
 107
 108        if (bh) {
 109                for (bit = 0; bit < bit_max; bit++)
 110                        ext4_set_bit(bit, bh->b_data);
 111
 112                start = block_group * EXT4_BLOCKS_PER_GROUP(sb) +
 113                        le32_to_cpu(sbi->s_es->s_first_data_block);
 114
 115                /* Set bits for block and inode bitmaps, and inode table */
 116                ext4_set_bit(ext4_block_bitmap(sb, gdp) - start, bh->b_data);
 117                ext4_set_bit(ext4_inode_bitmap(sb, gdp) - start, bh->b_data);
 118                for (bit = (ext4_inode_table(sb, gdp) - start),
 119                     bit_max = bit + sbi->s_itb_per_group; bit < bit_max; bit++)
 120                        ext4_set_bit(bit, bh->b_data);
 121
 122                /*
 123                 * Also if the number of blocks within the group is
 124                 * less than the blocksize * 8 ( which is the size
 125                 * of bitmap ), set rest of the block bitmap to 1
 126                 */
 127                mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);
 128        }
 129
 130        return free_blocks - sbi->s_itb_per_group - 2;
 131}
 132
 133
 134/*
 135 * The free blocks are managed by bitmaps.  A file system contains several
 136 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 137 * block for inodes, N blocks for the inode table and data blocks.
 138 *
 139 * The file system contains group descriptors which are located after the
 140 * super block.  Each descriptor contains the number of the bitmap block and
 141 * the free blocks count in the block.  The descriptors are loaded in memory
 142 * when a file system is mounted (see ext4_fill_super).
 143 */
 144
 145
 146#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
 147
 148/**
 149 * ext4_get_group_desc() -- load group descriptor from disk
 150 * @sb:                 super block
 151 * @block_group:        given block group
 152 * @bh:                 pointer to the buffer head to store the block
 153 *                      group descriptor
 154 */
 155struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
 156                                             unsigned int block_group,
 157                                             struct buffer_head ** bh)
 158{
 159        unsigned long group_desc;
 160        unsigned long offset;
 161        struct ext4_group_desc * desc;
 162        struct ext4_sb_info *sbi = EXT4_SB(sb);
 163
 164        if (block_group >= sbi->s_groups_count) {
 165                ext4_error (sb, "ext4_get_group_desc",
 166                            "block_group >= groups_count - "
 167                            "block_group = %d, groups_count = %lu",
 168                            block_group, sbi->s_groups_count);
 169
 170                return NULL;
 171        }
 172        smp_rmb();
 173
 174        group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
 175        offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
 176        if (!sbi->s_group_desc[group_desc]) {
 177                ext4_error (sb, "ext4_get_group_desc",
 178                            "Group descriptor not loaded - "
 179                            "block_group = %d, group_desc = %lu, desc = %lu",
 180                             block_group, group_desc, offset);
 181                return NULL;
 182        }
 183
 184        desc = (struct ext4_group_desc *)(
 185                (__u8 *)sbi->s_group_desc[group_desc]->b_data +
 186                offset * EXT4_DESC_SIZE(sb));
 187        if (bh)
 188                *bh = sbi->s_group_desc[group_desc];
 189        return desc;
 190}
 191
 192/**
 193 * read_block_bitmap()
 194 * @sb:                 super block
 195 * @block_group:        given block group
 196 *
 197 * Read the bitmap for a given block_group, reading into the specified
 198 * slot in the superblock's bitmap cache.
 199 *
 200 * Return buffer_head on success or NULL in case of failure.
 201 */
 202struct buffer_head *
 203read_block_bitmap(struct super_block *sb, unsigned int block_group)
 204{
 205        struct ext4_group_desc * desc;
 206        struct buffer_head * bh = NULL;
 207        ext4_fsblk_t bitmap_blk;
 208
 209        desc = ext4_get_group_desc(sb, block_group, NULL);
 210        if (!desc)
 211                return NULL;
 212        bitmap_blk = ext4_block_bitmap(sb, desc);
 213        if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
 214                bh = sb_getblk(sb, bitmap_blk);
 215                if (!buffer_uptodate(bh)) {
 216                        lock_buffer(bh);
 217                        if (!buffer_uptodate(bh)) {
 218                                ext4_init_block_bitmap(sb, bh, block_group,
 219                                                       desc);
 220                                set_buffer_uptodate(bh);
 221                        }
 222                        unlock_buffer(bh);
 223                }
 224        } else {
 225                bh = sb_bread(sb, bitmap_blk);
 226        }
 227        if (!bh)
 228                ext4_error (sb, __FUNCTION__,
 229                            "Cannot read block bitmap - "
 230                            "block_group = %d, block_bitmap = %llu",
 231                            block_group, bitmap_blk);
 232        return bh;
 233}
 234/*
 235 * The reservation window structure operations
 236 * --------------------------------------------
 237 * Operations include:
 238 * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
 239 *
 240 * We use a red-black tree to represent per-filesystem reservation
 241 * windows.
 242 *
 243 */
 244
 245/**
 246 * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
 247 * @rb_root:            root of per-filesystem reservation rb tree
 248 * @verbose:            verbose mode
 249 * @fn:                 function which wishes to dump the reservation map
 250 *
 251 * If verbose is turned on, it will print the whole block reservation
 252 * windows(start, end). Otherwise, it will only print out the "bad" windows,
 253 * those windows that overlap with their immediate neighbors.
 254 */
 255#if 1
 256static void __rsv_window_dump(struct rb_root *root, int verbose,
 257                              const char *fn)
 258{
 259        struct rb_node *n;
 260        struct ext4_reserve_window_node *rsv, *prev;
 261        int bad;
 262
 263restart:
 264        n = rb_first(root);
 265        bad = 0;
 266        prev = NULL;
 267
 268        printk("Block Allocation Reservation Windows Map (%s):\n", fn);
 269        while (n) {
 270                rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
 271                if (verbose)
 272                        printk("reservation window 0x%p "
 273                               "start:  %llu, end:  %llu\n",
 274                               rsv, rsv->rsv_start, rsv->rsv_end);
 275                if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
 276                        printk("Bad reservation %p (start >= end)\n",
 277                               rsv);
 278                        bad = 1;
 279                }
 280                if (prev && prev->rsv_end >= rsv->rsv_start) {
 281                        printk("Bad reservation %p (prev->end >= start)\n",
 282                               rsv);
 283                        bad = 1;
 284                }
 285                if (bad) {
 286                        if (!verbose) {
 287                                printk("Restarting reservation walk in verbose mode\n");
 288                                verbose = 1;
 289                                goto restart;
 290                        }
 291                }
 292                n = rb_next(n);
 293                prev = rsv;
 294        }
 295        printk("Window map complete.\n");
 296        if (bad)
 297                BUG();
 298}
 299#define rsv_window_dump(root, verbose) \
 300        __rsv_window_dump((root), (verbose), __FUNCTION__)
 301#else
 302#define rsv_window_dump(root, verbose) do {} while (0)
 303#endif
 304
 305/**
 306 * goal_in_my_reservation()
 307 * @rsv:                inode's reservation window
 308 * @grp_goal:           given goal block relative to the allocation block group
 309 * @group:              the current allocation block group
 310 * @sb:                 filesystem super block
 311 *
 312 * Test if the given goal block (group relative) is within the file's
 313 * own block reservation window range.
 314 *
 315 * If the reservation window is outside the goal allocation group, return 0;
 316 * grp_goal (given goal block) could be -1, which means no specific
 317 * goal block. In this case, always return 1.
 318 * If the goal block is within the reservation window, return 1;
 319 * otherwise, return 0;
 320 */
 321static int
 322goal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal,
 323                        unsigned int group, struct super_block * sb)
 324{
 325        ext4_fsblk_t group_first_block, group_last_block;
 326
 327        group_first_block = ext4_group_first_block_no(sb, group);
 328        group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
 329
 330        if ((rsv->_rsv_start > group_last_block) ||
 331            (rsv->_rsv_end < group_first_block))
 332                return 0;
 333        if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
 334                || (grp_goal + group_first_block > rsv->_rsv_end)))
 335                return 0;
 336        return 1;
 337}
 338
 339/**
 340 * search_reserve_window()
 341 * @rb_root:            root of reservation tree
 342 * @goal:               target allocation block
 343 *
 344 * Find the reserved window which includes the goal, or the previous one
 345 * if the goal is not in any window.
 346 * Returns NULL if there are no windows or if all windows start after the goal.
 347 */
 348static struct ext4_reserve_window_node *
 349search_reserve_window(struct rb_root *root, ext4_fsblk_t goal)
 350{
 351        struct rb_node *n = root->rb_node;
 352        struct ext4_reserve_window_node *rsv;
 353
 354        if (!n)
 355                return NULL;
 356
 357        do {
 358                rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
 359
 360                if (goal < rsv->rsv_start)
 361                        n = n->rb_left;
 362                else if (goal > rsv->rsv_end)
 363                        n = n->rb_right;
 364                else
 365                        return rsv;
 366        } while (n);
 367        /*
 368         * We've fallen off the end of the tree: the goal wasn't inside
 369         * any particular node.  OK, the previous node must be to one
 370         * side of the interval containing the goal.  If it's the RHS,
 371         * we need to back up one.
 372         */
 373        if (rsv->rsv_start > goal) {
 374                n = rb_prev(&rsv->rsv_node);
 375                rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
 376        }
 377        return rsv;
 378}
 379
 380/**
 381 * ext4_rsv_window_add() -- Insert a window to the block reservation rb tree.
 382 * @sb:                 super block
 383 * @rsv:                reservation window to add
 384 *
 385 * Must be called with rsv_lock hold.
 386 */
 387void ext4_rsv_window_add(struct super_block *sb,
 388                    struct ext4_reserve_window_node *rsv)
 389{
 390        struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root;
 391        struct rb_node *node = &rsv->rsv_node;
 392        ext4_fsblk_t start = rsv->rsv_start;
 393
 394        struct rb_node ** p = &root->rb_node;
 395        struct rb_node * parent = NULL;
 396        struct ext4_reserve_window_node *this;
 397
 398        while (*p)
 399        {
 400                parent = *p;
 401                this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node);
 402
 403                if (start < this->rsv_start)
 404                        p = &(*p)->rb_left;
 405                else if (start > this->rsv_end)
 406                        p = &(*p)->rb_right;
 407                else {
 408                        rsv_window_dump(root, 1);
 409                        BUG();
 410                }
 411        }
 412
 413        rb_link_node(node, parent, p);
 414        rb_insert_color(node, root);
 415}
 416
 417/**
 418 * ext4_rsv_window_remove() -- unlink a window from the reservation rb tree
 419 * @sb:                 super block
 420 * @rsv:                reservation window to remove
 421 *
 422 * Mark the block reservation window as not allocated, and unlink it
 423 * from the filesystem reservation window rb tree. Must be called with
 424 * rsv_lock hold.
 425 */
 426static void rsv_window_remove(struct super_block *sb,
 427                              struct ext4_reserve_window_node *rsv)
 428{
 429        rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
 430        rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
 431        rsv->rsv_alloc_hit = 0;
 432        rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root);
 433}
 434
 435/*
 436 * rsv_is_empty() -- Check if the reservation window is allocated.
 437 * @rsv:                given reservation window to check
 438 *
 439 * returns 1 if the end block is EXT4_RESERVE_WINDOW_NOT_ALLOCATED.
 440 */
 441static inline int rsv_is_empty(struct ext4_reserve_window *rsv)
 442{
 443        /* a valid reservation end block could not be 0 */
 444        return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
 445}
 446
 447/**
 448 * ext4_init_block_alloc_info()
 449 * @inode:              file inode structure
 450 *
 451 * Allocate and initialize the  reservation window structure, and
 452 * link the window to the ext4 inode structure at last
 453 *
 454 * The reservation window structure is only dynamically allocated
 455 * and linked to ext4 inode the first time the open file
 456 * needs a new block. So, before every ext4_new_block(s) call, for
 457 * regular files, we should check whether the reservation window
 458 * structure exists or not. In the latter case, this function is called.
 459 * Fail to do so will result in block reservation being turned off for that
 460 * open file.
 461 *
 462 * This function is called from ext4_get_blocks_handle(), also called
 463 * when setting the reservation window size through ioctl before the file
 464 * is open for write (needs block allocation).
 465 *
 466 * Needs truncate_mutex protection prior to call this function.
 467 */
 468void ext4_init_block_alloc_info(struct inode *inode)
 469{
 470        struct ext4_inode_info *ei = EXT4_I(inode);
 471        struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
 472        struct super_block *sb = inode->i_sb;
 473
 474        block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
 475        if (block_i) {
 476                struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node;
 477
 478                rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
 479                rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
 480
 481                /*
 482                 * if filesystem is mounted with NORESERVATION, the goal
 483                 * reservation window size is set to zero to indicate
 484                 * block reservation is off
 485                 */
 486                if (!test_opt(sb, RESERVATION))
 487                        rsv->rsv_goal_size = 0;
 488                else
 489                        rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS;
 490                rsv->rsv_alloc_hit = 0;
 491                block_i->last_alloc_logical_block = 0;
 492                block_i->last_alloc_physical_block = 0;
 493        }
 494        ei->i_block_alloc_info = block_i;
 495}
 496
 497/**
 498 * ext4_discard_reservation()
 499 * @inode:              inode
 500 *
 501 * Discard(free) block reservation window on last file close, or truncate
 502 * or at last iput().
 503 *
 504 * It is being called in three cases:
 505 *      ext4_release_file(): last writer close the file
 506 *      ext4_clear_inode(): last iput(), when nobody link to this file.
 507 *      ext4_truncate(): when the block indirect map is about to change.
 508 *
 509 */
 510void ext4_discard_reservation(struct inode *inode)
 511{
 512        struct ext4_inode_info *ei = EXT4_I(inode);
 513        struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
 514        struct ext4_reserve_window_node *rsv;
 515        spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock;
 516
 517        if (!block_i)
 518                return;
 519
 520        rsv = &block_i->rsv_window_node;
 521        if (!rsv_is_empty(&rsv->rsv_window)) {
 522                spin_lock(rsv_lock);
 523                if (!rsv_is_empty(&rsv->rsv_window))
 524                        rsv_window_remove(inode->i_sb, rsv);
 525                spin_unlock(rsv_lock);
 526        }
 527}
 528
 529/**
 530 * ext4_free_blocks_sb() -- Free given blocks and update quota
 531 * @handle:                     handle to this transaction
 532 * @sb:                         super block
 533 * @block:                      start physcial block to free
 534 * @count:                      number of blocks to free
 535 * @pdquot_freed_blocks:        pointer to quota
 536 */
 537void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
 538                         ext4_fsblk_t block, unsigned long count,
 539                         unsigned long *pdquot_freed_blocks)
 540{
 541        struct buffer_head *bitmap_bh = NULL;
 542        struct buffer_head *gd_bh;
 543        unsigned long block_group;
 544        ext4_grpblk_t bit;
 545        unsigned long i;
 546        unsigned long overflow;
 547        struct ext4_group_desc * desc;
 548        struct ext4_super_block * es;
 549        struct ext4_sb_info *sbi;
 550        int err = 0, ret;
 551        ext4_grpblk_t group_freed;
 552
 553        *pdquot_freed_blocks = 0;
 554        sbi = EXT4_SB(sb);
 555        es = sbi->s_es;
 556        if (block < le32_to_cpu(es->s_first_data_block) ||
 557            block + count < block ||
 558            block + count > ext4_blocks_count(es)) {
 559                ext4_error (sb, "ext4_free_blocks",
 560                            "Freeing blocks not in datazone - "
 561                            "block = %llu, count = %lu", block, count);
 562                goto error_return;
 563        }
 564
 565        ext4_debug ("freeing block(s) %llu-%llu\n", block, block + count - 1);
 566
 567do_more:
 568        overflow = 0;
 569        ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
 570        /*
 571         * Check to see if we are freeing blocks across a group
 572         * boundary.
 573         */
 574        if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
 575                overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
 576                count -= overflow;
 577        }
 578        brelse(bitmap_bh);
 579        bitmap_bh = read_block_bitmap(sb, block_group);
 580        if (!bitmap_bh)
 581                goto error_return;
 582        desc = ext4_get_group_desc (sb, block_group, &gd_bh);
 583        if (!desc)
 584                goto error_return;
 585
 586        if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
 587            in_range(ext4_inode_bitmap(sb, desc), block, count) ||
 588            in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
 589            in_range(block + count - 1, ext4_inode_table(sb, desc),
 590                     sbi->s_itb_per_group))
 591                ext4_error (sb, "ext4_free_blocks",
 592                            "Freeing blocks in system zones - "
 593                            "Block = %llu, count = %lu",
 594                            block, count);
 595
 596        /*
 597         * We are about to start releasing blocks in the bitmap,
 598         * so we need undo access.
 599         */
 600        /* @@@ check errors */
 601        BUFFER_TRACE(bitmap_bh, "getting undo access");
 602        err = ext4_journal_get_undo_access(handle, bitmap_bh);
 603        if (err)
 604                goto error_return;
 605
 606        /*
 607         * We are about to modify some metadata.  Call the journal APIs
 608         * to unshare ->b_data if a currently-committing transaction is
 609         * using it
 610         */
 611        BUFFER_TRACE(gd_bh, "get_write_access");
 612        err = ext4_journal_get_write_access(handle, gd_bh);
 613        if (err)
 614                goto error_return;
 615
 616        jbd_lock_bh_state(bitmap_bh);
 617
 618        for (i = 0, group_freed = 0; i < count; i++) {
 619                /*
 620                 * An HJ special.  This is expensive...
 621                 */
 622#ifdef CONFIG_JBD2_DEBUG
 623                jbd_unlock_bh_state(bitmap_bh);
 624                {
 625                        struct buffer_head *debug_bh;
 626                        debug_bh = sb_find_get_block(sb, block + i);
 627                        if (debug_bh) {
 628                                BUFFER_TRACE(debug_bh, "Deleted!");
 629                                if (!bh2jh(bitmap_bh)->b_committed_data)
 630                                        BUFFER_TRACE(debug_bh,
 631                                                "No commited data in bitmap");
 632                                BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
 633                                __brelse(debug_bh);
 634                        }
 635                }
 636                jbd_lock_bh_state(bitmap_bh);
 637#endif
 638                if (need_resched()) {
 639                        jbd_unlock_bh_state(bitmap_bh);
 640                        cond_resched();
 641                        jbd_lock_bh_state(bitmap_bh);
 642                }
 643                /* @@@ This prevents newly-allocated data from being
 644                 * freed and then reallocated within the same
 645                 * transaction.
 646                 *
 647                 * Ideally we would want to allow that to happen, but to
 648                 * do so requires making jbd2_journal_forget() capable of
 649                 * revoking the queued write of a data block, which
 650                 * implies blocking on the journal lock.  *forget()
 651                 * cannot block due to truncate races.
 652                 *
 653                 * Eventually we can fix this by making jbd2_journal_forget()
 654                 * return a status indicating whether or not it was able
 655                 * to revoke the buffer.  On successful revoke, it is
 656                 * safe not to set the allocation bit in the committed
 657                 * bitmap, because we know that there is no outstanding
 658                 * activity on the buffer any more and so it is safe to
 659                 * reallocate it.
 660                 */
 661                BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
 662                J_ASSERT_BH(bitmap_bh,
 663                                bh2jh(bitmap_bh)->b_committed_data != NULL);
 664                ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
 665                                bh2jh(bitmap_bh)->b_committed_data);
 666
 667                /*
 668                 * We clear the bit in the bitmap after setting the committed
 669                 * data bit, because this is the reverse order to that which
 670                 * the allocator uses.
 671                 */
 672                BUFFER_TRACE(bitmap_bh, "clear bit");
 673                if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
 674                                                bit + i, bitmap_bh->b_data)) {
 675                        jbd_unlock_bh_state(bitmap_bh);
 676                        ext4_error(sb, __FUNCTION__,
 677                                   "bit already cleared for block %llu",
 678                                   (ext4_fsblk_t)(block + i));
 679                        jbd_lock_bh_state(bitmap_bh);
 680                        BUFFER_TRACE(bitmap_bh, "bit already cleared");
 681                } else {
 682                        group_freed++;
 683                }
 684        }
 685        jbd_unlock_bh_state(bitmap_bh);
 686
 687        spin_lock(sb_bgl_lock(sbi, block_group));
 688        desc->bg_free_blocks_count =
 689                cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
 690                        group_freed);
 691        desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
 692        spin_unlock(sb_bgl_lock(sbi, block_group));
 693        percpu_counter_add(&sbi->s_freeblocks_counter, count);
 694
 695        /* We dirtied the bitmap block */
 696        BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
 697        err = ext4_journal_dirty_metadata(handle, bitmap_bh);
 698
 699        /* And the group descriptor block */
 700        BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
 701        ret = ext4_journal_dirty_metadata(handle, gd_bh);
 702        if (!err) err = ret;
 703        *pdquot_freed_blocks += group_freed;
 704
 705        if (overflow && !err) {
 706                block += count;
 707                count = overflow;
 708                goto do_more;
 709        }
 710        sb->s_dirt = 1;
 711error_return:
 712        brelse(bitmap_bh);
 713        ext4_std_error(sb, err);
 714        return;
 715}
 716
 717/**
 718 * ext4_free_blocks() -- Free given blocks and update quota
 719 * @handle:             handle for this transaction
 720 * @inode:              inode
 721 * @block:              start physical block to free
 722 * @count:              number of blocks to count
 723 */
 724void ext4_free_blocks(handle_t *handle, struct inode *inode,
 725                        ext4_fsblk_t block, unsigned long count)
 726{
 727        struct super_block * sb;
 728        unsigned long dquot_freed_blocks;
 729
 730        sb = inode->i_sb;
 731        if (!sb) {
 732                printk ("ext4_free_blocks: nonexistent device");
 733                return;
 734        }
 735        ext4_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
 736        if (dquot_freed_blocks)
 737                DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
 738        return;
 739}
 740
 741/**
 742 * ext4_test_allocatable()
 743 * @nr:                 given allocation block group
 744 * @bh:                 bufferhead contains the bitmap of the given block group
 745 *
 746 * For ext4 allocations, we must not reuse any blocks which are
 747 * allocated in the bitmap buffer's "last committed data" copy.  This
 748 * prevents deletes from freeing up the page for reuse until we have
 749 * committed the delete transaction.
 750 *
 751 * If we didn't do this, then deleting something and reallocating it as
 752 * data would allow the old block to be overwritten before the
 753 * transaction committed (because we force data to disk before commit).
 754 * This would lead to corruption if we crashed between overwriting the
 755 * data and committing the delete.
 756 *
 757 * @@@ We may want to make this allocation behaviour conditional on
 758 * data-writes at some point, and disable it for metadata allocations or
 759 * sync-data inodes.
 760 */
 761static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh)
 762{
 763        int ret;
 764        struct journal_head *jh = bh2jh(bh);
 765
 766        if (ext4_test_bit(nr, bh->b_data))
 767                return 0;
 768
 769        jbd_lock_bh_state(bh);
 770        if (!jh->b_committed_data)
 771                ret = 1;
 772        else
 773                ret = !ext4_test_bit(nr, jh->b_committed_data);
 774        jbd_unlock_bh_state(bh);
 775        return ret;
 776}
 777
 778/**
 779 * bitmap_search_next_usable_block()
 780 * @start:              the starting block (group relative) of the search
 781 * @bh:                 bufferhead contains the block group bitmap
 782 * @maxblocks:          the ending block (group relative) of the reservation
 783 *
 784 * The bitmap search --- search forward alternately through the actual
 785 * bitmap on disk and the last-committed copy in journal, until we find a
 786 * bit free in both bitmaps.
 787 */
 788static ext4_grpblk_t
 789bitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
 790                                        ext4_grpblk_t maxblocks)
 791{
 792        ext4_grpblk_t next;
 793        struct journal_head *jh = bh2jh(bh);
 794
 795        while (start < maxblocks) {
 796                next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start);
 797                if (next >= maxblocks)
 798                        return -1;
 799                if (ext4_test_allocatable(next, bh))
 800                        return next;
 801                jbd_lock_bh_state(bh);
 802                if (jh->b_committed_data)
 803                        start = ext4_find_next_zero_bit(jh->b_committed_data,
 804                                                        maxblocks, next);
 805                jbd_unlock_bh_state(bh);
 806        }
 807        return -1;
 808}
 809
 810/**
 811 * find_next_usable_block()
 812 * @start:              the starting block (group relative) to find next
 813 *                      allocatable block in bitmap.
 814 * @bh:                 bufferhead contains the block group bitmap
 815 * @maxblocks:          the ending block (group relative) for the search
 816 *
 817 * Find an allocatable block in a bitmap.  We honor both the bitmap and
 818 * its last-committed copy (if that exists), and perform the "most
 819 * appropriate allocation" algorithm of looking for a free block near
 820 * the initial goal; then for a free byte somewhere in the bitmap; then
 821 * for any free bit in the bitmap.
 822 */
 823static ext4_grpblk_t
 824find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
 825                        ext4_grpblk_t maxblocks)
 826{
 827        ext4_grpblk_t here, next;
 828        char *p, *r;
 829
 830        if (start > 0) {
 831                /*
 832                 * The goal was occupied; search forward for a free
 833                 * block within the next XX blocks.
 834                 *
 835                 * end_goal is more or less random, but it has to be
 836                 * less than EXT4_BLOCKS_PER_GROUP. Aligning up to the
 837                 * next 64-bit boundary is simple..
 838                 */
 839                ext4_grpblk_t end_goal = (start + 63) & ~63;
 840                if (end_goal > maxblocks)
 841                        end_goal = maxblocks;
 842                here = ext4_find_next_zero_bit(bh->b_data, end_goal, start);
 843                if (here < end_goal && ext4_test_allocatable(here, bh))
 844                        return here;
 845                ext4_debug("Bit not found near goal\n");
 846        }
 847
 848        here = start;
 849        if (here < 0)
 850                here = 0;
 851
 852        p = ((char *)bh->b_data) + (here >> 3);
 853        r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
 854        next = (r - ((char *)bh->b_data)) << 3;
 855
 856        if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
 857                return next;
 858
 859        /*
 860         * The bitmap search --- search forward alternately through the actual
 861         * bitmap and the last-committed copy until we find a bit free in
 862         * both
 863         */
 864        here = bitmap_search_next_usable_block(here, bh, maxblocks);
 865        return here;
 866}
 867
 868/**
 869 * claim_block()
 870 * @block:              the free block (group relative) to allocate
 871 * @bh:                 the bufferhead containts the block group bitmap
 872 *
 873 * We think we can allocate this block in this bitmap.  Try to set the bit.
 874 * If that succeeds then check that nobody has allocated and then freed the
 875 * block since we saw that is was not marked in b_committed_data.  If it _was_
 876 * allocated and freed then clear the bit in the bitmap again and return
 877 * zero (failure).
 878 */
 879static inline int
 880claim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh)
 881{
 882        struct journal_head *jh = bh2jh(bh);
 883        int ret;
 884
 885        if (ext4_set_bit_atomic(lock, block, bh->b_data))
 886                return 0;
 887        jbd_lock_bh_state(bh);
 888        if (jh->b_committed_data && ext4_test_bit(block,jh->b_committed_data)) {
 889                ext4_clear_bit_atomic(lock, block, bh->b_data);
 890                ret = 0;
 891        } else {
 892                ret = 1;
 893        }
 894        jbd_unlock_bh_state(bh);
 895        return ret;
 896}
 897
 898/**
 899 * ext4_try_to_allocate()
 900 * @sb:                 superblock
 901 * @handle:             handle to this transaction
 902 * @group:              given allocation block group
 903 * @bitmap_bh:          bufferhead holds the block bitmap
 904 * @grp_goal:           given target block within the group
 905 * @count:              target number of blocks to allocate
 906 * @my_rsv:             reservation window
 907 *
 908 * Attempt to allocate blocks within a give range. Set the range of allocation
 909 * first, then find the first free bit(s) from the bitmap (within the range),
 910 * and at last, allocate the blocks by claiming the found free bit as allocated.
 911 *
 912 * To set the range of this allocation:
 913 *      if there is a reservation window, only try to allocate block(s) from the
 914 *      file's own reservation window;
 915 *      Otherwise, the allocation range starts from the give goal block, ends at
 916 *      the block group's last block.
 917 *
 918 * If we failed to allocate the desired block then we may end up crossing to a
 919 * new bitmap.  In that case we must release write access to the old one via
 920 * ext4_journal_release_buffer(), else we'll run out of credits.
 921 */
 922static ext4_grpblk_t
 923ext4_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
 924                        struct buffer_head *bitmap_bh, ext4_grpblk_t grp_goal,
 925                        unsigned long *count, struct ext4_reserve_window *my_rsv)
 926{
 927        ext4_fsblk_t group_first_block;
 928        ext4_grpblk_t start, end;
 929        unsigned long num = 0;
 930
 931        /* we do allocation within the reservation window if we have a window */
 932        if (my_rsv) {
 933                group_first_block = ext4_group_first_block_no(sb, group);
 934                if (my_rsv->_rsv_start >= group_first_block)
 935                        start = my_rsv->_rsv_start - group_first_block;
 936                else
 937                        /* reservation window cross group boundary */
 938                        start = 0;
 939                end = my_rsv->_rsv_end - group_first_block + 1;
 940                if (end > EXT4_BLOCKS_PER_GROUP(sb))
 941                        /* reservation window crosses group boundary */
 942                        end = EXT4_BLOCKS_PER_GROUP(sb);
 943                if ((start <= grp_goal) && (grp_goal < end))
 944                        start = grp_goal;
 945                else
 946                        grp_goal = -1;
 947        } else {
 948                if (grp_goal > 0)
 949                        start = grp_goal;
 950                else
 951                        start = 0;
 952                end = EXT4_BLOCKS_PER_GROUP(sb);
 953        }
 954
 955        BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb));
 956
 957repeat:
 958        if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) {
 959                grp_goal = find_next_usable_block(start, bitmap_bh, end);
 960                if (grp_goal < 0)
 961                        goto fail_access;
 962                if (!my_rsv) {
 963                        int i;
 964
 965                        for (i = 0; i < 7 && grp_goal > start &&
 966                                        ext4_test_allocatable(grp_goal - 1,
 967                                                                bitmap_bh);
 968                                        i++, grp_goal--)
 969                                ;
 970                }
 971        }
 972        start = grp_goal;
 973
 974        if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group),
 975                grp_goal, bitmap_bh)) {
 976                /*
 977                 * The block was allocated by another thread, or it was
 978                 * allocated and then freed by another thread
 979                 */
 980                start++;
 981                grp_goal++;
 982                if (start >= end)
 983                        goto fail_access;
 984                goto repeat;
 985        }
 986        num++;
 987        grp_goal++;
 988        while (num < *count && grp_goal < end
 989                && ext4_test_allocatable(grp_goal, bitmap_bh)
 990                && claim_block(sb_bgl_lock(EXT4_SB(sb), group),
 991                                grp_goal, bitmap_bh)) {
 992                num++;
 993                grp_goal++;
 994        }
 995        *count = num;
 996        return grp_goal - num;
 997fail_access:
 998        *count = num;
 999        return -1;
1000}
1001
1002/**
1003 *      find_next_reservable_window():
1004 *              find a reservable space within the given range.
1005 *              It does not allocate the reservation window for now:
1006 *              alloc_new_reservation() will do the work later.
1007 *
1008 *      @search_head: the head of the searching list;
1009 *              This is not necessarily the list head of the whole filesystem
1010 *
1011 *              We have both head and start_block to assist the search
1012 *              for the reservable space. The list starts from head,
1013 *              but we will shift to the place where start_block is,
1014 *              then start from there, when looking for a reservable space.
1015 *
1016 *      @size: the target new reservation window size
1017 *
1018 *      @group_first_block: the first block we consider to start
1019 *                      the real search from
1020 *
1021 *      @last_block:
1022 *              the maximum block number that our goal reservable space
1023 *              could start from. This is normally the last block in this
1024 *              group. The search will end when we found the start of next
1025 *              possible reservable space is out of this boundary.
1026 *              This could handle the cross boundary reservation window
1027 *              request.
1028 *
1029 *      basically we search from the given range, rather than the whole
1030 *      reservation double linked list, (start_block, last_block)
1031 *      to find a free region that is of my size and has not
1032 *      been reserved.
1033 *
1034 */
1035static int find_next_reservable_window(
1036                                struct ext4_reserve_window_node *search_head,
1037                                struct ext4_reserve_window_node *my_rsv,
1038                                struct super_block * sb,
1039                                ext4_fsblk_t start_block,
1040                                ext4_fsblk_t last_block)
1041{
1042        struct rb_node *next;
1043        struct ext4_reserve_window_node *rsv, *prev;
1044        ext4_fsblk_t cur;
1045        int size = my_rsv->rsv_goal_size;
1046
1047        /* TODO: make the start of the reservation window byte-aligned */
1048        /* cur = *start_block & ~7;*/
1049        cur = start_block;
1050        rsv = search_head;
1051        if (!rsv)
1052                return -1;
1053
1054        while (1) {
1055                if (cur <= rsv->rsv_end)
1056                        cur = rsv->rsv_end + 1;
1057
1058                /* TODO?
1059                 * in the case we could not find a reservable space
1060                 * that is what is expected, during the re-search, we could
1061                 * remember what's the largest reservable space we could have
1062                 * and return that one.
1063                 *
1064                 * For now it will fail if we could not find the reservable
1065                 * space with expected-size (or more)...
1066                 */
1067                if (cur > last_block)
1068                        return -1;              /* fail */
1069
1070                prev = rsv;
1071                next = rb_next(&rsv->rsv_node);
1072                rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node);
1073
1074                /*
1075                 * Reached the last reservation, we can just append to the
1076                 * previous one.
1077                 */
1078                if (!next)
1079                        break;
1080
1081                if (cur + size <= rsv->rsv_start) {
1082                        /*
1083                         * Found a reserveable space big enough.  We could
1084                         * have a reservation across the group boundary here
1085                         */
1086                        break;
1087                }
1088        }
1089        /*
1090         * we come here either :
1091         * when we reach the end of the whole list,
1092         * and there is empty reservable space after last entry in the list.
1093         * append it to the end of the list.
1094         *
1095         * or we found one reservable space in the middle of the list,
1096         * return the reservation window that we could append to.
1097         * succeed.
1098         */
1099
1100        if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
1101                rsv_window_remove(sb, my_rsv);
1102
1103        /*
1104         * Let's book the whole avaliable window for now.  We will check the
1105         * disk bitmap later and then, if there are free blocks then we adjust
1106         * the window size if it's larger than requested.
1107         * Otherwise, we will remove this node from the tree next time
1108         * call find_next_reservable_window.
1109         */
1110        my_rsv->rsv_start = cur;
1111        my_rsv->rsv_end = cur + size - 1;
1112        my_rsv->rsv_alloc_hit = 0;
1113
1114        if (prev != my_rsv)
1115                ext4_rsv_window_add(sb, my_rsv);
1116
1117        return 0;
1118}
1119
1120/**
1121 *      alloc_new_reservation()--allocate a new reservation window
1122 *
1123 *              To make a new reservation, we search part of the filesystem
1124 *              reservation list (the list that inside the group). We try to
1125 *              allocate a new reservation window near the allocation goal,
1126 *              or the beginning of the group, if there is no goal.
1127 *
1128 *              We first find a reservable space after the goal, then from
1129 *              there, we check the bitmap for the first free block after
1130 *              it. If there is no free block until the end of group, then the
1131 *              whole group is full, we failed. Otherwise, check if the free
1132 *              block is inside the expected reservable space, if so, we
1133 *              succeed.
1134 *              If the first free block is outside the reservable space, then
1135 *              start from the first free block, we search for next available
1136 *              space, and go on.
1137 *
1138 *      on succeed, a new reservation will be found and inserted into the list
1139 *      It contains at least one free block, and it does not overlap with other
1140 *      reservation windows.
1141 *
1142 *      failed: we failed to find a reservation window in this group
1143 *
1144 *      @rsv: the reservation
1145 *
1146 *      @grp_goal: The goal (group-relative).  It is where the search for a
1147 *              free reservable space should start from.
1148 *              if we have a grp_goal(grp_goal >0 ), then start from there,
1149 *              no grp_goal(grp_goal = -1), we start from the first block
1150 *              of the group.
1151 *
1152 *      @sb: the super block
1153 *      @group: the group we are trying to allocate in
1154 *      @bitmap_bh: the block group block bitmap
1155 *
1156 */
1157static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv,
1158                ext4_grpblk_t grp_goal, struct super_block *sb,
1159                unsigned int group, struct buffer_head *bitmap_bh)
1160{
1161        struct ext4_reserve_window_node *search_head;
1162        ext4_fsblk_t group_first_block, group_end_block, start_block;
1163        ext4_grpblk_t first_free_block;
1164        struct rb_root *fs_rsv_root = &EXT4_SB(sb)->s_rsv_window_root;
1165        unsigned long size;
1166        int ret;
1167        spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
1168
1169        group_first_block = ext4_group_first_block_no(sb, group);
1170        group_end_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1171
1172        if (grp_goal < 0)
1173                start_block = group_first_block;
1174        else
1175                start_block = grp_goal + group_first_block;
1176
1177        size = my_rsv->rsv_goal_size;
1178
1179        if (!rsv_is_empty(&my_rsv->rsv_window)) {
1180                /*
1181                 * if the old reservation is cross group boundary
1182                 * and if the goal is inside the old reservation window,
1183                 * we will come here when we just failed to allocate from
1184                 * the first part of the window. We still have another part
1185                 * that belongs to the next group. In this case, there is no
1186                 * point to discard our window and try to allocate a new one
1187                 * in this group(which will fail). we should
1188                 * keep the reservation window, just simply move on.
1189                 *
1190                 * Maybe we could shift the start block of the reservation
1191                 * window to the first block of next group.
1192                 */
1193
1194                if ((my_rsv->rsv_start <= group_end_block) &&
1195                                (my_rsv->rsv_end > group_end_block) &&
1196                                (start_block >= my_rsv->rsv_start))
1197                        return -1;
1198
1199                if ((my_rsv->rsv_alloc_hit >
1200                     (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
1201                        /*
1202                         * if the previously allocation hit ratio is
1203                         * greater than 1/2, then we double the size of
1204                         * the reservation window the next time,
1205                         * otherwise we keep the same size window
1206                         */
1207                        size = size * 2;
1208                        if (size > EXT4_MAX_RESERVE_BLOCKS)
1209                                size = EXT4_MAX_RESERVE_BLOCKS;
1210                        my_rsv->rsv_goal_size= size;
1211                }
1212        }
1213
1214        spin_lock(rsv_lock);
1215        /*
1216         * shift the search start to the window near the goal block
1217         */
1218        search_head = search_reserve_window(fs_rsv_root, start_block);
1219
1220        /*
1221         * find_next_reservable_window() simply finds a reservable window
1222         * inside the given range(start_block, group_end_block).
1223         *
1224         * To make sure the reservation window has a free bit inside it, we
1225         * need to check the bitmap after we found a reservable window.
1226         */
1227retry:
1228        ret = find_next_reservable_window(search_head, my_rsv, sb,
1229                                                start_block, group_end_block);
1230
1231        if (ret == -1) {
1232                if (!rsv_is_empty(&my_rsv->rsv_window))
1233                        rsv_window_remove(sb, my_rsv);
1234                spin_unlock(rsv_lock);
1235                return -1;
1236        }
1237
1238        /*
1239         * On success, find_next_reservable_window() returns the
1240         * reservation window where there is a reservable space after it.
1241         * Before we reserve this reservable space, we need
1242         * to make sure there is at least a free block inside this region.
1243         *
1244         * searching the first free bit on the block bitmap and copy of
1245         * last committed bitmap alternatively, until we found a allocatable
1246         * block. Search start from the start block of the reservable space
1247         * we just found.
1248         */
1249        spin_unlock(rsv_lock);
1250        first_free_block = bitmap_search_next_usable_block(
1251                        my_rsv->rsv_start - group_first_block,
1252                        bitmap_bh, group_end_block - group_first_block + 1);
1253
1254        if (first_free_block < 0) {
1255                /*
1256                 * no free block left on the bitmap, no point
1257                 * to reserve the space. return failed.
1258                 */
1259                spin_lock(rsv_lock);
1260                if (!rsv_is_empty(&my_rsv->rsv_window))
1261                        rsv_window_remove(sb, my_rsv);
1262                spin_unlock(rsv_lock);
1263                return -1;              /* failed */
1264        }
1265
1266        start_block = first_free_block + group_first_block;
1267        /*
1268         * check if the first free block is within the
1269         * free space we just reserved
1270         */
1271        if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
1272                return 0;               /* success */
1273        /*
1274         * if the first free bit we found is out of the reservable space
1275         * continue search for next reservable space,
1276         * start from where the free block is,
1277         * we also shift the list head to where we stopped last time
1278         */
1279        search_head = my_rsv;
1280        spin_lock(rsv_lock);
1281        goto retry;
1282}
1283
1284/**
1285 * try_to_extend_reservation()
1286 * @my_rsv:             given reservation window
1287 * @sb:                 super block
1288 * @size:               the delta to extend
1289 *
1290 * Attempt to expand the reservation window large enough to have
1291 * required number of free blocks
1292 *
1293 * Since ext4_try_to_allocate() will always allocate blocks within
1294 * the reservation window range, if the window size is too small,
1295 * multiple blocks allocation has to stop at the end of the reservation
1296 * window. To make this more efficient, given the total number of
1297 * blocks needed and the current size of the window, we try to
1298 * expand the reservation window size if necessary on a best-effort
1299 * basis before ext4_new_blocks() tries to allocate blocks,
1300 */
1301static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv,
1302                        struct super_block *sb, int size)
1303{
1304        struct ext4_reserve_window_node *next_rsv;
1305        struct rb_node *next;
1306        spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
1307
1308        if (!spin_trylock(rsv_lock))
1309                return;
1310
1311        next = rb_next(&my_rsv->rsv_node);
1312
1313        if (!next)
1314                my_rsv->rsv_end += size;
1315        else {
1316                next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
1317
1318                if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1319                        my_rsv->rsv_end += size;
1320                else
1321                        my_rsv->rsv_end = next_rsv->rsv_start - 1;
1322        }
1323        spin_unlock(rsv_lock);
1324}
1325
1326/**
1327 * ext4_try_to_allocate_with_rsv()
1328 * @sb:                 superblock
1329 * @handle:             handle to this transaction
1330 * @group:              given allocation block group
1331 * @bitmap_bh:          bufferhead holds the block bitmap
1332 * @grp_goal:           given target block within the group
1333 * @count:              target number of blocks to allocate
1334 * @my_rsv:             reservation window
1335 * @errp:               pointer to store the error code
1336 *
1337 * This is the main function used to allocate a new block and its reservation
1338 * window.
1339 *
1340 * Each time when a new block allocation is need, first try to allocate from
1341 * its own reservation.  If it does not have a reservation window, instead of
1342 * looking for a free bit on bitmap first, then look up the reservation list to
1343 * see if it is inside somebody else's reservation window, we try to allocate a
1344 * reservation window for it starting from the goal first. Then do the block
1345 * allocation within the reservation window.
1346 *
1347 * This will avoid keeping on searching the reservation list again and
1348 * again when somebody is looking for a free block (without
1349 * reservation), and there are lots of free blocks, but they are all
1350 * being reserved.
1351 *
1352 * We use a red-black tree for the per-filesystem reservation list.
1353 *
1354 */
1355static ext4_grpblk_t
1356ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1357                        unsigned int group, struct buffer_head *bitmap_bh,
1358                        ext4_grpblk_t grp_goal,
1359                        struct ext4_reserve_window_node * my_rsv,
1360                        unsigned long *count, int *errp)
1361{
1362        ext4_fsblk_t group_first_block, group_last_block;
1363        ext4_grpblk_t ret = 0;
1364        int fatal;
1365        unsigned long num = *count;
1366
1367        *errp = 0;
1368
1369        /*
1370         * Make sure we use undo access for the bitmap, because it is critical
1371         * that we do the frozen_data COW on bitmap buffers in all cases even
1372         * if the buffer is in BJ_Forget state in the committing transaction.
1373         */
1374        BUFFER_TRACE(bitmap_bh, "get undo access for new block");
1375        fatal = ext4_journal_get_undo_access(handle, bitmap_bh);
1376        if (fatal) {
1377                *errp = fatal;
1378                return -1;
1379        }
1380
1381        /*
1382         * we don't deal with reservation when
1383         * filesystem is mounted without reservation
1384         * or the file is not a regular file
1385         * or last attempt to allocate a block with reservation turned on failed
1386         */
1387        if (my_rsv == NULL ) {
1388                ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
1389                                                grp_goal, count, NULL);
1390                goto out;
1391        }
1392        /*
1393         * grp_goal is a group relative block number (if there is a goal)
1394         * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
1395         * first block is a filesystem wide block number
1396         * first block is the block number of the first block in this group
1397         */
1398        group_first_block = ext4_group_first_block_no(sb, group);
1399        group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1400
1401        /*
1402         * Basically we will allocate a new block from inode's reservation
1403         * window.
1404         *
1405         * We need to allocate a new reservation window, if:
1406         * a) inode does not have a reservation window; or
1407         * b) last attempt to allocate a block from existing reservation
1408         *    failed; or
1409         * c) we come here with a goal and with a reservation window
1410         *
1411         * We do not need to allocate a new reservation window if we come here
1412         * at the beginning with a goal and the goal is inside the window, or
1413         * we don't have a goal but already have a reservation window.
1414         * then we could go to allocate from the reservation window directly.
1415         */
1416        while (1) {
1417                if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1418                        !goal_in_my_reservation(&my_rsv->rsv_window,
1419                                                grp_goal, group, sb)) {
1420                        if (my_rsv->rsv_goal_size < *count)
1421                                my_rsv->rsv_goal_size = *count;
1422                        ret = alloc_new_reservation(my_rsv, grp_goal, sb,
1423                                                        group, bitmap_bh);
1424                        if (ret < 0)
1425                                break;                  /* failed */
1426
1427                        if (!goal_in_my_reservation(&my_rsv->rsv_window,
1428                                                        grp_goal, group, sb))
1429                                grp_goal = -1;
1430                } else if (grp_goal >= 0) {
1431                        int curr = my_rsv->rsv_end -
1432                                        (grp_goal + group_first_block) + 1;
1433
1434                        if (curr < *count)
1435                                try_to_extend_reservation(my_rsv, sb,
1436                                                        *count - curr);
1437                }
1438
1439                if ((my_rsv->rsv_start > group_last_block) ||
1440                                (my_rsv->rsv_end < group_first_block)) {
1441                        rsv_window_dump(&EXT4_SB(sb)->s_rsv_window_root, 1);
1442                        BUG();
1443                }
1444                ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
1445                                           grp_goal, &num, &my_rsv->rsv_window);
1446                if (ret >= 0) {
1447                        my_rsv->rsv_alloc_hit += num;
1448                        *count = num;
1449                        break;                          /* succeed */
1450                }
1451                num = *count;
1452        }
1453out:
1454        if (ret >= 0) {
1455                BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
1456                                        "bitmap block");
1457                fatal = ext4_journal_dirty_metadata(handle, bitmap_bh);
1458                if (fatal) {
1459                        *errp = fatal;
1460                        return -1;
1461                }
1462                return ret;
1463        }
1464
1465        BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
1466        ext4_journal_release_buffer(handle, bitmap_bh);
1467        return ret;
1468}
1469
1470/**
1471 * ext4_has_free_blocks()
1472 * @sbi:                in-core super block structure.
1473 *
1474 * Check if filesystem has at least 1 free block available for allocation.
1475 */
1476static int ext4_has_free_blocks(struct ext4_sb_info *sbi)
1477{
1478        ext4_fsblk_t free_blocks, root_blocks;
1479
1480        free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
1481        root_blocks = ext4_r_blocks_count(sbi->s_es);
1482        if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
1483                sbi->s_resuid != current->fsuid &&
1484                (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
1485                return 0;
1486        }
1487        return 1;
1488}
1489
1490/**
1491 * ext4_should_retry_alloc()
1492 * @sb:                 super block
1493 * @retries             number of attemps has been made
1494 *
1495 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
1496 * it is profitable to retry the operation, this function will wait
1497 * for the current or commiting transaction to complete, and then
1498 * return TRUE.
1499 *
1500 * if the total number of retries exceed three times, return FALSE.
1501 */
1502int ext4_should_retry_alloc(struct super_block *sb, int *retries)
1503{
1504        if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3)
1505                return 0;
1506
1507        jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
1508
1509        return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
1510}
1511
1512/**
1513 * ext4_new_blocks() -- core block(s) allocation function
1514 * @handle:             handle to this transaction
1515 * @inode:              file inode
1516 * @goal:               given target block(filesystem wide)
1517 * @count:              target number of blocks to allocate
1518 * @errp:               error code
1519 *
1520 * ext4_new_blocks uses a goal block to assist allocation.  It tries to
1521 * allocate block(s) from the block group contains the goal block first. If that
1522 * fails, it will try to allocate block(s) from other block groups without
1523 * any specific goal block.
1524 *
1525 */
1526ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
1527                        ext4_fsblk_t goal, unsigned long *count, int *errp)
1528{
1529        struct buffer_head *bitmap_bh = NULL;
1530        struct buffer_head *gdp_bh;
1531        unsigned long group_no;
1532        int goal_group;
1533        ext4_grpblk_t grp_target_blk;   /* blockgroup relative goal block */
1534        ext4_grpblk_t grp_alloc_blk;    /* blockgroup-relative allocated block*/
1535        ext4_fsblk_t ret_block;         /* filesyetem-wide allocated block */
1536        int bgi;                        /* blockgroup iteration index */
1537        int fatal = 0, err;
1538        int performed_allocation = 0;
1539        ext4_grpblk_t free_blocks;      /* number of free blocks in a group */
1540        struct super_block *sb;
1541        struct ext4_group_desc *gdp;
1542        struct ext4_super_block *es;
1543        struct ext4_sb_info *sbi;
1544        struct ext4_reserve_window_node *my_rsv = NULL;
1545        struct ext4_block_alloc_info *block_i;
1546        unsigned short windowsz = 0;
1547#ifdef EXT4FS_DEBUG
1548        static int goal_hits, goal_attempts;
1549#endif
1550        unsigned long ngroups;
1551        unsigned long num = *count;
1552
1553        *errp = -ENOSPC;
1554        sb = inode->i_sb;
1555        if (!sb) {
1556                printk("ext4_new_block: nonexistent device");
1557                return 0;
1558        }
1559
1560        /*
1561         * Check quota for allocation of this block.
1562         */
1563        if (DQUOT_ALLOC_BLOCK(inode, num)) {
1564                *errp = -EDQUOT;
1565                return 0;
1566        }
1567
1568        sbi = EXT4_SB(sb);
1569        es = EXT4_SB(sb)->s_es;
1570        ext4_debug("goal=%lu.\n", goal);
1571        /*
1572         * Allocate a block from reservation only when
1573         * filesystem is mounted with reservation(default,-o reservation), and
1574         * it's a regular file, and
1575         * the desired window size is greater than 0 (One could use ioctl
1576         * command EXT4_IOC_SETRSVSZ to set the window size to 0 to turn off
1577         * reservation on that particular file)
1578         */
1579        block_i = EXT4_I(inode)->i_block_alloc_info;
1580        if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
1581                my_rsv = &block_i->rsv_window_node;
1582
1583        if (!ext4_has_free_blocks(sbi)) {
1584                *errp = -ENOSPC;
1585                goto out;
1586        }
1587
1588        /*
1589         * First, test whether the goal block is free.
1590         */
1591        if (goal < le32_to_cpu(es->s_first_data_block) ||
1592            goal >= ext4_blocks_count(es))
1593                goal = le32_to_cpu(es->s_first_data_block);
1594        ext4_get_group_no_and_offset(sb, goal, &group_no, &grp_target_blk);
1595        goal_group = group_no;
1596retry_alloc:
1597        gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1598        if (!gdp)
1599                goto io_error;
1600
1601        free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1602        /*
1603         * if there is not enough free blocks to make a new resevation
1604         * turn off reservation for this allocation
1605         */
1606        if (my_rsv && (free_blocks < windowsz)
1607                && (rsv_is_empty(&my_rsv->rsv_window)))
1608                my_rsv = NULL;
1609
1610        if (free_blocks > 0) {
1611                bitmap_bh = read_block_bitmap(sb, group_no);
1612                if (!bitmap_bh)
1613                        goto io_error;
1614                grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
1615                                        group_no, bitmap_bh, grp_target_blk,
1616                                        my_rsv, &num, &fatal);
1617                if (fatal)
1618                        goto out;
1619                if (grp_alloc_blk >= 0)
1620                        goto allocated;
1621        }
1622
1623        ngroups = EXT4_SB(sb)->s_groups_count;
1624        smp_rmb();
1625
1626        /*
1627         * Now search the rest of the groups.  We assume that
1628         * i and gdp correctly point to the last group visited.
1629         */
1630        for (bgi = 0; bgi < ngroups; bgi++) {
1631                group_no++;
1632                if (group_no >= ngroups)
1633                        group_no = 0;
1634                gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1635                if (!gdp)
1636                        goto io_error;
1637                free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1638                /*
1639                 * skip this group if the number of
1640                 * free blocks is less than half of the reservation
1641                 * window size.
1642                 */
1643                if (free_blocks <= (windowsz/2))
1644                        continue;
1645
1646                brelse(bitmap_bh);
1647                bitmap_bh = read_block_bitmap(sb, group_no);
1648                if (!bitmap_bh)
1649                        goto io_error;
1650                /*
1651                 * try to allocate block(s) from this group, without a goal(-1).
1652                 */
1653                grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
1654                                        group_no, bitmap_bh, -1, my_rsv,
1655                                        &num, &fatal);
1656                if (fatal)
1657                        goto out;
1658                if (grp_alloc_blk >= 0)
1659                        goto allocated;
1660        }
1661        /*
1662         * We may end up a bogus ealier ENOSPC error due to
1663         * filesystem is "full" of reservations, but
1664         * there maybe indeed free blocks avaliable on disk
1665         * In this case, we just forget about the reservations
1666         * just do block allocation as without reservations.
1667         */
1668        if (my_rsv) {
1669                my_rsv = NULL;
1670                windowsz = 0;
1671                group_no = goal_group;
1672                goto retry_alloc;
1673        }
1674        /* No space left on the device */
1675        *errp = -ENOSPC;
1676        goto out;
1677
1678allocated:
1679
1680        ext4_debug("using block group %d(%d)\n",
1681                        group_no, gdp->bg_free_blocks_count);
1682
1683        BUFFER_TRACE(gdp_bh, "get_write_access");
1684        fatal = ext4_journal_get_write_access(handle, gdp_bh);
1685        if (fatal)
1686                goto out;
1687
1688        ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no);
1689
1690        if (in_range(ext4_block_bitmap(sb, gdp), ret_block, num) ||
1691            in_range(ext4_inode_bitmap(sb, gdp), ret_block, num) ||
1692            in_range(ret_block, ext4_inode_table(sb, gdp),
1693                     EXT4_SB(sb)->s_itb_per_group) ||
1694            in_range(ret_block + num - 1, ext4_inode_table(sb, gdp),
1695                     EXT4_SB(sb)->s_itb_per_group))
1696                ext4_error(sb, "ext4_new_block",
1697                            "Allocating block in system zone - "
1698                            "blocks from %llu, length %lu",
1699                             ret_block, num);
1700
1701        performed_allocation = 1;
1702
1703#ifdef CONFIG_JBD2_DEBUG
1704        {
1705                struct buffer_head *debug_bh;
1706
1707                /* Record bitmap buffer state in the newly allocated block */
1708                debug_bh = sb_find_get_block(sb, ret_block);
1709                if (debug_bh) {
1710                        BUFFER_TRACE(debug_bh, "state when allocated");
1711                        BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
1712                        brelse(debug_bh);
1713                }
1714        }
1715        jbd_lock_bh_state(bitmap_bh);
1716        spin_lock(sb_bgl_lock(sbi, group_no));
1717        if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
1718                int i;
1719
1720                for (i = 0; i < num; i++) {
1721                        if (ext4_test_bit(grp_alloc_blk+i,
1722                                        bh2jh(bitmap_bh)->b_committed_data)) {
1723                                printk("%s: block was unexpectedly set in "
1724                                        "b_committed_data\n", __FUNCTION__);
1725                        }
1726                }
1727        }
1728        ext4_debug("found bit %d\n", grp_alloc_blk);
1729        spin_unlock(sb_bgl_lock(sbi, group_no));
1730        jbd_unlock_bh_state(bitmap_bh);
1731#endif
1732
1733        if (ret_block + num - 1 >= ext4_blocks_count(es)) {
1734                ext4_error(sb, "ext4_new_block",
1735                            "block(%llu) >= blocks count(%llu) - "
1736                            "block_group = %lu, es == %p ", ret_block,
1737                        ext4_blocks_count(es), group_no, es);
1738                goto out;
1739        }
1740
1741        /*
1742         * It is up to the caller to add the new buffer to a journal
1743         * list of some description.  We don't know in advance whether
1744         * the caller wants to use it as metadata or data.
1745         */
1746        ext4_debug("allocating block %lu. Goal hits %d of %d.\n",
1747                        ret_block, goal_hits, goal_attempts);
1748
1749        spin_lock(sb_bgl_lock(sbi, group_no));
1750        if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
1751                gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
1752        gdp->bg_free_blocks_count =
1753                        cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
1754        gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
1755        spin_unlock(sb_bgl_lock(sbi, group_no));
1756        percpu_counter_sub(&sbi->s_freeblocks_counter, num);
1757
1758        BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
1759        err = ext4_journal_dirty_metadata(handle, gdp_bh);
1760        if (!fatal)
1761                fatal = err;
1762
1763        sb->s_dirt = 1;
1764        if (fatal)
1765                goto out;
1766
1767        *errp = 0;
1768        brelse(bitmap_bh);
1769        DQUOT_FREE_BLOCK(inode, *count-num);
1770        *count = num;
1771        return ret_block;
1772
1773io_error:
1774        *errp = -EIO;
1775out:
1776        if (fatal) {
1777                *errp = fatal;
1778                ext4_std_error(sb, fatal);
1779        }
1780        /*
1781         * Undo the block allocation
1782         */
1783        if (!performed_allocation)
1784                DQUOT_FREE_BLOCK(inode, *count);
1785        brelse(bitmap_bh);
1786        return 0;
1787}
1788
1789ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode,
1790                        ext4_fsblk_t goal, int *errp)
1791{
1792        unsigned long count = 1;
1793
1794        return ext4_new_blocks(handle, inode, goal, &count, errp);
1795}
1796
1797/**
1798 * ext4_count_free_blocks() -- count filesystem free blocks
1799 * @sb:         superblock
1800 *
1801 * Adds up the number of free blocks from each block group.
1802 */
1803ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
1804{
1805        ext4_fsblk_t desc_count;
1806        struct ext4_group_desc *gdp;
1807        int i;
1808        unsigned long ngroups = EXT4_SB(sb)->s_groups_count;
1809#ifdef EXT4FS_DEBUG
1810        struct ext4_super_block *es;
1811        ext4_fsblk_t bitmap_count;
1812        unsigned long x;
1813        struct buffer_head *bitmap_bh = NULL;
1814
1815        es = EXT4_SB(sb)->s_es;
1816        desc_count = 0;
1817        bitmap_count = 0;
1818        gdp = NULL;
1819
1820        smp_rmb();
1821        for (i = 0; i < ngroups; i++) {
1822                gdp = ext4_get_group_desc(sb, i, NULL);
1823                if (!gdp)
1824                        continue;
1825                desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1826                brelse(bitmap_bh);
1827                bitmap_bh = read_block_bitmap(sb, i);
1828                if (bitmap_bh == NULL)
1829                        continue;
1830
1831                x = ext4_count_free(bitmap_bh, sb->s_blocksize);
1832                printk("group %d: stored = %d, counted = %lu\n",
1833                        i, le16_to_cpu(gdp->bg_free_blocks_count), x);
1834                bitmap_count += x;
1835        }
1836        brelse(bitmap_bh);
1837        printk("ext4_count_free_blocks: stored = %llu"
1838                ", computed = %llu, %llu\n",
1839               EXT4_FREE_BLOCKS_COUNT(es),
1840                desc_count, bitmap_count);
1841        return bitmap_count;
1842#else
1843        desc_count = 0;
1844        smp_rmb();
1845        for (i = 0; i < ngroups; i++) {
1846                gdp = ext4_get_group_desc(sb, i, NULL);
1847                if (!gdp)
1848                        continue;
1849                desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1850        }
1851
1852        return desc_count;
1853#endif
1854}
1855
1856static inline int test_root(int a, int b)
1857{
1858        int num = b;
1859
1860        while (a > num)
1861                num *= b;
1862        return num == a;
1863}
1864
1865static int ext4_group_sparse(int group)
1866{
1867        if (group <= 1)
1868                return 1;
1869        if (!(group & 1))
1870                return 0;
1871        return (test_root(group, 7) || test_root(group, 5) ||
1872                test_root(group, 3));
1873}
1874
1875/**
1876 *      ext4_bg_has_super - number of blocks used by the superblock in group
1877 *      @sb: superblock for filesystem
1878 *      @group: group number to check
1879 *
1880 *      Return the number of blocks used by the superblock (primary or backup)
1881 *      in this group.  Currently this will be only 0 or 1.
1882 */
1883int ext4_bg_has_super(struct super_block *sb, int group)
1884{
1885        if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
1886                                EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1887                        !ext4_group_sparse(group))
1888                return 0;
1889        return 1;
1890}
1891
1892static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, int group)
1893{
1894        unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
1895        unsigned long first = metagroup * EXT4_DESC_PER_BLOCK(sb);
1896        unsigned long last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
1897
1898        if (group == first || group == first + 1 || group == last)
1899                return 1;
1900        return 0;
1901}
1902
1903static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, int group)
1904{
1905        if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
1906                                EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1907                        !ext4_group_sparse(group))
1908                return 0;
1909        return EXT4_SB(sb)->s_gdb_count;
1910}
1911
1912/**
1913 *      ext4_bg_num_gdb - number of blocks used by the group table in group
1914 *      @sb: superblock for filesystem
1915 *      @group: group number to check
1916 *
1917 *      Return the number of blocks used by the group descriptor table
1918 *      (primary or backup) in this group.  In the future there may be a
1919 *      different number of descriptor blocks in each group.
1920 */
1921unsigned long ext4_bg_num_gdb(struct super_block *sb, int group)
1922{
1923        unsigned long first_meta_bg =
1924                        le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
1925        unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
1926
1927        if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
1928                        metagroup < first_meta_bg)
1929                return ext4_bg_num_gdb_nometa(sb,group);
1930
1931        return ext4_bg_num_gdb_meta(sb,group);
1932
1933}
1934