linux/fs/fat/fatent.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2004, OGAWA Hirofumi
   3 * Released under GPL v2.
   4 */
   5
   6#include <linux/blkdev.h>
   7#include "fat.h"
   8
   9struct fatent_operations {
  10        void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
  11        void (*ent_set_ptr)(struct fat_entry *, int);
  12        int (*ent_bread)(struct super_block *, struct fat_entry *,
  13                         int, sector_t);
  14        int (*ent_get)(struct fat_entry *);
  15        void (*ent_put)(struct fat_entry *, int);
  16        int (*ent_next)(struct fat_entry *);
  17};
  18
  19static DEFINE_SPINLOCK(fat12_entry_lock);
  20
  21static void fat12_ent_blocknr(struct super_block *sb, int entry,
  22                              int *offset, sector_t *blocknr)
  23{
  24        struct msdos_sb_info *sbi = MSDOS_SB(sb);
  25        int bytes = entry + (entry >> 1);
  26        WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
  27        *offset = bytes & (sb->s_blocksize - 1);
  28        *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
  29}
  30
  31static void fat_ent_blocknr(struct super_block *sb, int entry,
  32                            int *offset, sector_t *blocknr)
  33{
  34        struct msdos_sb_info *sbi = MSDOS_SB(sb);
  35        int bytes = (entry << sbi->fatent_shift);
  36        WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
  37        *offset = bytes & (sb->s_blocksize - 1);
  38        *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
  39}
  40
  41static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset)
  42{
  43        struct buffer_head **bhs = fatent->bhs;
  44        if (fatent->nr_bhs == 1) {
  45                WARN_ON(offset >= (bhs[0]->b_size - 1));
  46                fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
  47                fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1);
  48        } else {
  49                WARN_ON(offset != (bhs[0]->b_size - 1));
  50                fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
  51                fatent->u.ent12_p[1] = bhs[1]->b_data;
  52        }
  53}
  54
  55static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset)
  56{
  57        WARN_ON(offset & (2 - 1));
  58        fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset);
  59}
  60
  61static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset)
  62{
  63        WARN_ON(offset & (4 - 1));
  64        fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset);
  65}
  66
  67static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
  68                           int offset, sector_t blocknr)
  69{
  70        struct buffer_head **bhs = fatent->bhs;
  71
  72        WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
  73        fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
  74
  75        bhs[0] = sb_bread(sb, blocknr);
  76        if (!bhs[0])
  77                goto err;
  78
  79        if ((offset + 1) < sb->s_blocksize)
  80                fatent->nr_bhs = 1;
  81        else {
  82                /* This entry is block boundary, it needs the next block */
  83                blocknr++;
  84                bhs[1] = sb_bread(sb, blocknr);
  85                if (!bhs[1])
  86                        goto err_brelse;
  87                fatent->nr_bhs = 2;
  88        }
  89        fat12_ent_set_ptr(fatent, offset);
  90        return 0;
  91
  92err_brelse:
  93        brelse(bhs[0]);
  94err:
  95        fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr);
  96        return -EIO;
  97}
  98
  99static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
 100                         int offset, sector_t blocknr)
 101{
 102        const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
 103
 104        WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
 105        fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
 106        fatent->bhs[0] = sb_bread(sb, blocknr);
 107        if (!fatent->bhs[0]) {
 108                fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
 109                       (llu)blocknr);
 110                return -EIO;
 111        }
 112        fatent->nr_bhs = 1;
 113        ops->ent_set_ptr(fatent, offset);
 114        return 0;
 115}
 116
 117static int fat12_ent_get(struct fat_entry *fatent)
 118{
 119        u8 **ent12_p = fatent->u.ent12_p;
 120        int next;
 121
 122        spin_lock(&fat12_entry_lock);
 123        if (fatent->entry & 1)
 124                next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4);
 125        else
 126                next = (*ent12_p[1] << 8) | *ent12_p[0];
 127        spin_unlock(&fat12_entry_lock);
 128
 129        next &= 0x0fff;
 130        if (next >= BAD_FAT12)
 131                next = FAT_ENT_EOF;
 132        return next;
 133}
 134
 135static int fat16_ent_get(struct fat_entry *fatent)
 136{
 137        int next = le16_to_cpu(*fatent->u.ent16_p);
 138        WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
 139        if (next >= BAD_FAT16)
 140                next = FAT_ENT_EOF;
 141        return next;
 142}
 143
 144static int fat32_ent_get(struct fat_entry *fatent)
 145{
 146        int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
 147        WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
 148        if (next >= BAD_FAT32)
 149                next = FAT_ENT_EOF;
 150        return next;
 151}
 152
 153static void fat12_ent_put(struct fat_entry *fatent, int new)
 154{
 155        u8 **ent12_p = fatent->u.ent12_p;
 156
 157        if (new == FAT_ENT_EOF)
 158                new = EOF_FAT12;
 159
 160        spin_lock(&fat12_entry_lock);
 161        if (fatent->entry & 1) {
 162                *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f);
 163                *ent12_p[1] = new >> 4;
 164        } else {
 165                *ent12_p[0] = new & 0xff;
 166                *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8);
 167        }
 168        spin_unlock(&fat12_entry_lock);
 169
 170        mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
 171        if (fatent->nr_bhs == 2)
 172                mark_buffer_dirty_inode(fatent->bhs[1], fatent->fat_inode);
 173}
 174
 175static void fat16_ent_put(struct fat_entry *fatent, int new)
 176{
 177        if (new == FAT_ENT_EOF)
 178                new = EOF_FAT16;
 179
 180        *fatent->u.ent16_p = cpu_to_le16(new);
 181        mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
 182}
 183
 184static void fat32_ent_put(struct fat_entry *fatent, int new)
 185{
 186        WARN_ON(new & 0xf0000000);
 187        new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
 188        *fatent->u.ent32_p = cpu_to_le32(new);
 189        mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
 190}
 191
 192static int fat12_ent_next(struct fat_entry *fatent)
 193{
 194        u8 **ent12_p = fatent->u.ent12_p;
 195        struct buffer_head **bhs = fatent->bhs;
 196        u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1);
 197
 198        fatent->entry++;
 199        if (fatent->nr_bhs == 1) {
 200                WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data +
 201                                                        (bhs[0]->b_size - 2)));
 202                WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data +
 203                                                        (bhs[0]->b_size - 1)));
 204                if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
 205                        ent12_p[0] = nextp - 1;
 206                        ent12_p[1] = nextp;
 207                        return 1;
 208                }
 209        } else {
 210                WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data +
 211                                                        (bhs[0]->b_size - 1)));
 212                WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
 213                ent12_p[0] = nextp - 1;
 214                ent12_p[1] = nextp;
 215                brelse(bhs[0]);
 216                bhs[0] = bhs[1];
 217                fatent->nr_bhs = 1;
 218                return 1;
 219        }
 220        ent12_p[0] = NULL;
 221        ent12_p[1] = NULL;
 222        return 0;
 223}
 224
 225static int fat16_ent_next(struct fat_entry *fatent)
 226{
 227        const struct buffer_head *bh = fatent->bhs[0];
 228        fatent->entry++;
 229        if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) {
 230                fatent->u.ent16_p++;
 231                return 1;
 232        }
 233        fatent->u.ent16_p = NULL;
 234        return 0;
 235}
 236
 237static int fat32_ent_next(struct fat_entry *fatent)
 238{
 239        const struct buffer_head *bh = fatent->bhs[0];
 240        fatent->entry++;
 241        if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) {
 242                fatent->u.ent32_p++;
 243                return 1;
 244        }
 245        fatent->u.ent32_p = NULL;
 246        return 0;
 247}
 248
 249static const struct fatent_operations fat12_ops = {
 250        .ent_blocknr    = fat12_ent_blocknr,
 251        .ent_set_ptr    = fat12_ent_set_ptr,
 252        .ent_bread      = fat12_ent_bread,
 253        .ent_get        = fat12_ent_get,
 254        .ent_put        = fat12_ent_put,
 255        .ent_next       = fat12_ent_next,
 256};
 257
 258static const struct fatent_operations fat16_ops = {
 259        .ent_blocknr    = fat_ent_blocknr,
 260        .ent_set_ptr    = fat16_ent_set_ptr,
 261        .ent_bread      = fat_ent_bread,
 262        .ent_get        = fat16_ent_get,
 263        .ent_put        = fat16_ent_put,
 264        .ent_next       = fat16_ent_next,
 265};
 266
 267static const struct fatent_operations fat32_ops = {
 268        .ent_blocknr    = fat_ent_blocknr,
 269        .ent_set_ptr    = fat32_ent_set_ptr,
 270        .ent_bread      = fat_ent_bread,
 271        .ent_get        = fat32_ent_get,
 272        .ent_put        = fat32_ent_put,
 273        .ent_next       = fat32_ent_next,
 274};
 275
 276static inline void lock_fat(struct msdos_sb_info *sbi)
 277{
 278        mutex_lock(&sbi->fat_lock);
 279}
 280
 281static inline void unlock_fat(struct msdos_sb_info *sbi)
 282{
 283        mutex_unlock(&sbi->fat_lock);
 284}
 285
 286void fat_ent_access_init(struct super_block *sb)
 287{
 288        struct msdos_sb_info *sbi = MSDOS_SB(sb);
 289
 290        mutex_init(&sbi->fat_lock);
 291
 292        switch (sbi->fat_bits) {
 293        case 32:
 294                sbi->fatent_shift = 2;
 295                sbi->fatent_ops = &fat32_ops;
 296                break;
 297        case 16:
 298                sbi->fatent_shift = 1;
 299                sbi->fatent_ops = &fat16_ops;
 300                break;
 301        case 12:
 302                sbi->fatent_shift = -1;
 303                sbi->fatent_ops = &fat12_ops;
 304                break;
 305        }
 306}
 307
 308static void mark_fsinfo_dirty(struct super_block *sb)
 309{
 310        struct msdos_sb_info *sbi = MSDOS_SB(sb);
 311
 312        if (sb_rdonly(sb) || sbi->fat_bits != 32)
 313                return;
 314
 315        __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
 316}
 317
 318static inline int fat_ent_update_ptr(struct super_block *sb,
 319                                     struct fat_entry *fatent,
 320                                     int offset, sector_t blocknr)
 321{
 322        struct msdos_sb_info *sbi = MSDOS_SB(sb);
 323        const struct fatent_operations *ops = sbi->fatent_ops;
 324        struct buffer_head **bhs = fatent->bhs;
 325
 326        /* Is this fatent's blocks including this entry? */
 327        if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
 328                return 0;
 329        if (sbi->fat_bits == 12) {
 330                if ((offset + 1) < sb->s_blocksize) {
 331                        /* This entry is on bhs[0]. */
 332                        if (fatent->nr_bhs == 2) {
 333                                brelse(bhs[1]);
 334                                fatent->nr_bhs = 1;
 335                        }
 336                } else {
 337                        /* This entry needs the next block. */
 338                        if (fatent->nr_bhs != 2)
 339                                return 0;
 340                        if (bhs[1]->b_blocknr != (blocknr + 1))
 341                                return 0;
 342                }
 343        }
 344        ops->ent_set_ptr(fatent, offset);
 345        return 1;
 346}
 347
 348int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
 349{
 350        struct super_block *sb = inode->i_sb;
 351        struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
 352        const struct fatent_operations *ops = sbi->fatent_ops;
 353        int err, offset;
 354        sector_t blocknr;
 355
 356        if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
 357                fatent_brelse(fatent);
 358                fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
 359                return -EIO;
 360        }
 361
 362        fatent_set_entry(fatent, entry);
 363        ops->ent_blocknr(sb, entry, &offset, &blocknr);
 364
 365        if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) {
 366                fatent_brelse(fatent);
 367                err = ops->ent_bread(sb, fatent, offset, blocknr);
 368                if (err)
 369                        return err;
 370        }
 371        return ops->ent_get(fatent);
 372}
 373
 374/* FIXME: We can write the blocks as more big chunk. */
 375static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
 376                          int nr_bhs)
 377{
 378        struct msdos_sb_info *sbi = MSDOS_SB(sb);
 379        struct buffer_head *c_bh;
 380        int err, n, copy;
 381
 382        err = 0;
 383        for (copy = 1; copy < sbi->fats; copy++) {
 384                sector_t backup_fat = sbi->fat_length * copy;
 385
 386                for (n = 0; n < nr_bhs; n++) {
 387                        c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr);
 388                        if (!c_bh) {
 389                                err = -ENOMEM;
 390                                goto error;
 391                        }
 392                        memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
 393                        set_buffer_uptodate(c_bh);
 394                        mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
 395                        if (sb->s_flags & SB_SYNCHRONOUS)
 396                                err = sync_dirty_buffer(c_bh);
 397                        brelse(c_bh);
 398                        if (err)
 399                                goto error;
 400                }
 401        }
 402error:
 403        return err;
 404}
 405
 406int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
 407                  int new, int wait)
 408{
 409        struct super_block *sb = inode->i_sb;
 410        const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
 411        int err;
 412
 413        ops->ent_put(fatent, new);
 414        if (wait) {
 415                err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs);
 416                if (err)
 417                        return err;
 418        }
 419        return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs);
 420}
 421
 422static inline int fat_ent_next(struct msdos_sb_info *sbi,
 423                               struct fat_entry *fatent)
 424{
 425        if (sbi->fatent_ops->ent_next(fatent)) {
 426                if (fatent->entry < sbi->max_cluster)
 427                        return 1;
 428        }
 429        return 0;
 430}
 431
 432static inline int fat_ent_read_block(struct super_block *sb,
 433                                     struct fat_entry *fatent)
 434{
 435        const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
 436        sector_t blocknr;
 437        int offset;
 438
 439        fatent_brelse(fatent);
 440        ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
 441        return ops->ent_bread(sb, fatent, offset, blocknr);
 442}
 443
 444static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs,
 445                            struct fat_entry *fatent)
 446{
 447        int n, i;
 448
 449        for (n = 0; n < fatent->nr_bhs; n++) {
 450                for (i = 0; i < *nr_bhs; i++) {
 451                        if (fatent->bhs[n] == bhs[i])
 452                                break;
 453                }
 454                if (i == *nr_bhs) {
 455                        get_bh(fatent->bhs[n]);
 456                        bhs[i] = fatent->bhs[n];
 457                        (*nr_bhs)++;
 458                }
 459        }
 460}
 461
 462int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
 463{
 464        struct super_block *sb = inode->i_sb;
 465        struct msdos_sb_info *sbi = MSDOS_SB(sb);
 466        const struct fatent_operations *ops = sbi->fatent_ops;
 467        struct fat_entry fatent, prev_ent;
 468        struct buffer_head *bhs[MAX_BUF_PER_PAGE];
 469        int i, count, err, nr_bhs, idx_clus;
 470
 471        BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2));    /* fixed limit */
 472
 473        lock_fat(sbi);
 474        if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
 475            sbi->free_clusters < nr_cluster) {
 476                unlock_fat(sbi);
 477                return -ENOSPC;
 478        }
 479
 480        err = nr_bhs = idx_clus = 0;
 481        count = FAT_START_ENT;
 482        fatent_init(&prev_ent);
 483        fatent_init(&fatent);
 484        fatent_set_entry(&fatent, sbi->prev_free + 1);
 485        while (count < sbi->max_cluster) {
 486                if (fatent.entry >= sbi->max_cluster)
 487                        fatent.entry = FAT_START_ENT;
 488                fatent_set_entry(&fatent, fatent.entry);
 489                err = fat_ent_read_block(sb, &fatent);
 490                if (err)
 491                        goto out;
 492
 493                /* Find the free entries in a block */
 494                do {
 495                        if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
 496                                int entry = fatent.entry;
 497
 498                                /* make the cluster chain */
 499                                ops->ent_put(&fatent, FAT_ENT_EOF);
 500                                if (prev_ent.nr_bhs)
 501                                        ops->ent_put(&prev_ent, entry);
 502
 503                                fat_collect_bhs(bhs, &nr_bhs, &fatent);
 504
 505                                sbi->prev_free = entry;
 506                                if (sbi->free_clusters != -1)
 507                                        sbi->free_clusters--;
 508
 509                                cluster[idx_clus] = entry;
 510                                idx_clus++;
 511                                if (idx_clus == nr_cluster)
 512                                        goto out;
 513
 514                                /*
 515                                 * fat_collect_bhs() gets ref-count of bhs,
 516                                 * so we can still use the prev_ent.
 517                                 */
 518                                prev_ent = fatent;
 519                        }
 520                        count++;
 521                        if (count == sbi->max_cluster)
 522                                break;
 523                } while (fat_ent_next(sbi, &fatent));
 524        }
 525
 526        /* Couldn't allocate the free entries */
 527        sbi->free_clusters = 0;
 528        sbi->free_clus_valid = 1;
 529        err = -ENOSPC;
 530
 531out:
 532        unlock_fat(sbi);
 533        mark_fsinfo_dirty(sb);
 534        fatent_brelse(&fatent);
 535        if (!err) {
 536                if (inode_needs_sync(inode))
 537                        err = fat_sync_bhs(bhs, nr_bhs);
 538                if (!err)
 539                        err = fat_mirror_bhs(sb, bhs, nr_bhs);
 540        }
 541        for (i = 0; i < nr_bhs; i++)
 542                brelse(bhs[i]);
 543
 544        if (err && idx_clus)
 545                fat_free_clusters(inode, cluster[0]);
 546
 547        return err;
 548}
 549
 550int fat_free_clusters(struct inode *inode, int cluster)
 551{
 552        struct super_block *sb = inode->i_sb;
 553        struct msdos_sb_info *sbi = MSDOS_SB(sb);
 554        const struct fatent_operations *ops = sbi->fatent_ops;
 555        struct fat_entry fatent;
 556        struct buffer_head *bhs[MAX_BUF_PER_PAGE];
 557        int i, err, nr_bhs;
 558        int first_cl = cluster, dirty_fsinfo = 0;
 559
 560        nr_bhs = 0;
 561        fatent_init(&fatent);
 562        lock_fat(sbi);
 563        do {
 564                cluster = fat_ent_read(inode, &fatent, cluster);
 565                if (cluster < 0) {
 566                        err = cluster;
 567                        goto error;
 568                } else if (cluster == FAT_ENT_FREE) {
 569                        fat_fs_error(sb, "%s: deleting FAT entry beyond EOF",
 570                                     __func__);
 571                        err = -EIO;
 572                        goto error;
 573                }
 574
 575                if (sbi->options.discard) {
 576                        /*
 577                         * Issue discard for the sectors we no longer
 578                         * care about, batching contiguous clusters
 579                         * into one request
 580                         */
 581                        if (cluster != fatent.entry + 1) {
 582                                int nr_clus = fatent.entry - first_cl + 1;
 583
 584                                sb_issue_discard(sb,
 585                                        fat_clus_to_blknr(sbi, first_cl),
 586                                        nr_clus * sbi->sec_per_clus,
 587                                        GFP_NOFS, 0);
 588
 589                                first_cl = cluster;
 590                        }
 591                }
 592
 593                ops->ent_put(&fatent, FAT_ENT_FREE);
 594                if (sbi->free_clusters != -1) {
 595                        sbi->free_clusters++;
 596                        dirty_fsinfo = 1;
 597                }
 598
 599                if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
 600                        if (sb->s_flags & SB_SYNCHRONOUS) {
 601                                err = fat_sync_bhs(bhs, nr_bhs);
 602                                if (err)
 603                                        goto error;
 604                        }
 605                        err = fat_mirror_bhs(sb, bhs, nr_bhs);
 606                        if (err)
 607                                goto error;
 608                        for (i = 0; i < nr_bhs; i++)
 609                                brelse(bhs[i]);
 610                        nr_bhs = 0;
 611                }
 612                fat_collect_bhs(bhs, &nr_bhs, &fatent);
 613        } while (cluster != FAT_ENT_EOF);
 614
 615        if (sb->s_flags & SB_SYNCHRONOUS) {
 616                err = fat_sync_bhs(bhs, nr_bhs);
 617                if (err)
 618                        goto error;
 619        }
 620        err = fat_mirror_bhs(sb, bhs, nr_bhs);
 621error:
 622        fatent_brelse(&fatent);
 623        for (i = 0; i < nr_bhs; i++)
 624                brelse(bhs[i]);
 625        unlock_fat(sbi);
 626        if (dirty_fsinfo)
 627                mark_fsinfo_dirty(sb);
 628
 629        return err;
 630}
 631EXPORT_SYMBOL_GPL(fat_free_clusters);
 632
 633/* 128kb is the whole sectors for FAT12 and FAT16 */
 634#define FAT_READA_SIZE          (128 * 1024)
 635
 636static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent,
 637                          unsigned long reada_blocks)
 638{
 639        const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
 640        sector_t blocknr;
 641        int i, offset;
 642
 643        ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
 644
 645        for (i = 0; i < reada_blocks; i++)
 646                sb_breadahead(sb, blocknr + i);
 647}
 648
 649int fat_count_free_clusters(struct super_block *sb)
 650{
 651        struct msdos_sb_info *sbi = MSDOS_SB(sb);
 652        const struct fatent_operations *ops = sbi->fatent_ops;
 653        struct fat_entry fatent;
 654        unsigned long reada_blocks, reada_mask, cur_block;
 655        int err = 0, free;
 656
 657        lock_fat(sbi);
 658        if (sbi->free_clusters != -1 && sbi->free_clus_valid)
 659                goto out;
 660
 661        reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
 662        reada_mask = reada_blocks - 1;
 663        cur_block = 0;
 664
 665        free = 0;
 666        fatent_init(&fatent);
 667        fatent_set_entry(&fatent, FAT_START_ENT);
 668        while (fatent.entry < sbi->max_cluster) {
 669                /* readahead of fat blocks */
 670                if ((cur_block & reada_mask) == 0) {
 671                        unsigned long rest = sbi->fat_length - cur_block;
 672                        fat_ent_reada(sb, &fatent, min(reada_blocks, rest));
 673                }
 674                cur_block++;
 675
 676                err = fat_ent_read_block(sb, &fatent);
 677                if (err)
 678                        goto out;
 679
 680                do {
 681                        if (ops->ent_get(&fatent) == FAT_ENT_FREE)
 682                                free++;
 683                } while (fat_ent_next(sbi, &fatent));
 684        }
 685        sbi->free_clusters = free;
 686        sbi->free_clus_valid = 1;
 687        mark_fsinfo_dirty(sb);
 688        fatent_brelse(&fatent);
 689out:
 690        unlock_fat(sbi);
 691        return err;
 692}
 693