linux/fs/fat/cache.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/fat/cache.c
   3 *
   4 *  Written 1992,1993 by Werner Almesberger
   5 *
   6 *  Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
   7 *      of inode number.
   8 *  May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
   9 */
  10
  11#include <linux/fs.h>
  12#include <linux/slab.h>
  13#include <linux/buffer_head.h>
  14#include "fat.h"
  15
  16/* this must be > 0. */
  17#define FAT_MAX_CACHE   8
  18
  19struct fat_cache {
  20        struct list_head cache_list;
  21        int nr_contig;  /* number of contiguous clusters */
  22        int fcluster;   /* cluster number in the file. */
  23        int dcluster;   /* cluster number on disk. */
  24};
  25
  26struct fat_cache_id {
  27        unsigned int id;
  28        int nr_contig;
  29        int fcluster;
  30        int dcluster;
  31};
  32
  33static inline int fat_max_cache(struct inode *inode)
  34{
  35        return FAT_MAX_CACHE;
  36}
  37
  38static struct kmem_cache *fat_cache_cachep;
  39
  40static void init_once(void *foo)
  41{
  42        struct fat_cache *cache = (struct fat_cache *)foo;
  43
  44        INIT_LIST_HEAD(&cache->cache_list);
  45}
  46
  47int __init fat_cache_init(void)
  48{
  49        fat_cache_cachep = kmem_cache_create("fat_cache",
  50                                sizeof(struct fat_cache),
  51                                0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
  52                                init_once);
  53        if (fat_cache_cachep == NULL)
  54                return -ENOMEM;
  55        return 0;
  56}
  57
  58void fat_cache_destroy(void)
  59{
  60        kmem_cache_destroy(fat_cache_cachep);
  61}
  62
  63static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
  64{
  65        return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS);
  66}
  67
  68static inline void fat_cache_free(struct fat_cache *cache)
  69{
  70        BUG_ON(!list_empty(&cache->cache_list));
  71        kmem_cache_free(fat_cache_cachep, cache);
  72}
  73
  74static inline void fat_cache_update_lru(struct inode *inode,
  75                                        struct fat_cache *cache)
  76{
  77        if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
  78                list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru);
  79}
  80
  81static int fat_cache_lookup(struct inode *inode, int fclus,
  82                            struct fat_cache_id *cid,
  83                            int *cached_fclus, int *cached_dclus)
  84{
  85        static struct fat_cache nohit = { .fcluster = 0, };
  86
  87        struct fat_cache *hit = &nohit, *p;
  88        int offset = -1;
  89
  90        spin_lock(&MSDOS_I(inode)->cache_lru_lock);
  91        list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
  92                /* Find the cache of "fclus" or nearest cache. */
  93                if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
  94                        hit = p;
  95                        if ((hit->fcluster + hit->nr_contig) < fclus) {
  96                                offset = hit->nr_contig;
  97                        } else {
  98                                offset = fclus - hit->fcluster;
  99                                break;
 100                        }
 101                }
 102        }
 103        if (hit != &nohit) {
 104                fat_cache_update_lru(inode, hit);
 105
 106                cid->id = MSDOS_I(inode)->cache_valid_id;
 107                cid->nr_contig = hit->nr_contig;
 108                cid->fcluster = hit->fcluster;
 109                cid->dcluster = hit->dcluster;
 110                *cached_fclus = cid->fcluster + offset;
 111                *cached_dclus = cid->dcluster + offset;
 112        }
 113        spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
 114
 115        return offset;
 116}
 117
 118static struct fat_cache *fat_cache_merge(struct inode *inode,
 119                                         struct fat_cache_id *new)
 120{
 121        struct fat_cache *p;
 122
 123        list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
 124                /* Find the same part as "new" in cluster-chain. */
 125                if (p->fcluster == new->fcluster) {
 126                        BUG_ON(p->dcluster != new->dcluster);
 127                        if (new->nr_contig > p->nr_contig)
 128                                p->nr_contig = new->nr_contig;
 129                        return p;
 130                }
 131        }
 132        return NULL;
 133}
 134
 135static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
 136{
 137        struct fat_cache *cache, *tmp;
 138
 139        if (new->fcluster == -1) /* dummy cache */
 140                return;
 141
 142        spin_lock(&MSDOS_I(inode)->cache_lru_lock);
 143        if (new->id != FAT_CACHE_VALID &&
 144            new->id != MSDOS_I(inode)->cache_valid_id)
 145                goto out;       /* this cache was invalidated */
 146
 147        cache = fat_cache_merge(inode, new);
 148        if (cache == NULL) {
 149                if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
 150                        MSDOS_I(inode)->nr_caches++;
 151                        spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
 152
 153                        tmp = fat_cache_alloc(inode);
 154                        spin_lock(&MSDOS_I(inode)->cache_lru_lock);
 155                        cache = fat_cache_merge(inode, new);
 156                        if (cache != NULL) {
 157                                MSDOS_I(inode)->nr_caches--;
 158                                fat_cache_free(tmp);
 159                                goto out_update_lru;
 160                        }
 161                        cache = tmp;
 162                } else {
 163                        struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
 164                        cache = list_entry(p, struct fat_cache, cache_list);
 165                }
 166                cache->fcluster = new->fcluster;
 167                cache->dcluster = new->dcluster;
 168                cache->nr_contig = new->nr_contig;
 169        }
 170out_update_lru:
 171        fat_cache_update_lru(inode, cache);
 172out:
 173        spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
 174}
 175
 176/*
 177 * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
 178 * fixes itself after a while.
 179 */
 180static void __fat_cache_inval_inode(struct inode *inode)
 181{
 182        struct msdos_inode_info *i = MSDOS_I(inode);
 183        struct fat_cache *cache;
 184
 185        while (!list_empty(&i->cache_lru)) {
 186                cache = list_entry(i->cache_lru.next, struct fat_cache, cache_list);
 187                list_del_init(&cache->cache_list);
 188                i->nr_caches--;
 189                fat_cache_free(cache);
 190        }
 191        /* Update. The copy of caches before this id is discarded. */
 192        i->cache_valid_id++;
 193        if (i->cache_valid_id == FAT_CACHE_VALID)
 194                i->cache_valid_id++;
 195}
 196
 197void fat_cache_inval_inode(struct inode *inode)
 198{
 199        spin_lock(&MSDOS_I(inode)->cache_lru_lock);
 200        __fat_cache_inval_inode(inode);
 201        spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
 202}
 203
 204static inline int cache_contiguous(struct fat_cache_id *cid, int dclus)
 205{
 206        cid->nr_contig++;
 207        return ((cid->dcluster + cid->nr_contig) == dclus);
 208}
 209
 210static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
 211{
 212        cid->id = FAT_CACHE_VALID;
 213        cid->fcluster = fclus;
 214        cid->dcluster = dclus;
 215        cid->nr_contig = 0;
 216}
 217
 218int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
 219{
 220        struct super_block *sb = inode->i_sb;
 221        const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
 222        struct fat_entry fatent;
 223        struct fat_cache_id cid;
 224        int nr;
 225
 226        BUG_ON(MSDOS_I(inode)->i_start == 0);
 227
 228        *fclus = 0;
 229        *dclus = MSDOS_I(inode)->i_start;
 230        if (cluster == 0)
 231                return 0;
 232
 233        if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) {
 234                /*
 235                 * dummy, always not contiguous
 236                 * This is reinitialized by cache_init(), later.
 237                 */
 238                cache_init(&cid, -1, -1);
 239        }
 240
 241        fatent_init(&fatent);
 242        while (*fclus < cluster) {
 243                /* prevent the infinite loop of cluster chain */
 244                if (*fclus > limit) {
 245                        fat_fs_error_ratelimit(sb,
 246                                        "%s: detected the cluster chain loop"
 247                                        " (i_pos %lld)", __func__,
 248                                        MSDOS_I(inode)->i_pos);
 249                        nr = -EIO;
 250                        goto out;
 251                }
 252
 253                nr = fat_ent_read(inode, &fatent, *dclus);
 254                if (nr < 0)
 255                        goto out;
 256                else if (nr == FAT_ENT_FREE) {
 257                        fat_fs_error_ratelimit(sb, "%s: invalid cluster chain"
 258                                               " (i_pos %lld)", __func__,
 259                                               MSDOS_I(inode)->i_pos);
 260                        nr = -EIO;
 261                        goto out;
 262                } else if (nr == FAT_ENT_EOF) {
 263                        fat_cache_add(inode, &cid);
 264                        goto out;
 265                }
 266                (*fclus)++;
 267                *dclus = nr;
 268                if (!cache_contiguous(&cid, *dclus))
 269                        cache_init(&cid, *fclus, *dclus);
 270        }
 271        nr = 0;
 272        fat_cache_add(inode, &cid);
 273out:
 274        fatent_brelse(&fatent);
 275        return nr;
 276}
 277
 278static int fat_bmap_cluster(struct inode *inode, int cluster)
 279{
 280        struct super_block *sb = inode->i_sb;
 281        int ret, fclus, dclus;
 282
 283        if (MSDOS_I(inode)->i_start == 0)
 284                return 0;
 285
 286        ret = fat_get_cluster(inode, cluster, &fclus, &dclus);
 287        if (ret < 0)
 288                return ret;
 289        else if (ret == FAT_ENT_EOF) {
 290                fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)",
 291                             __func__, MSDOS_I(inode)->i_pos);
 292                return -EIO;
 293        }
 294        return dclus;
 295}
 296
 297int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
 298             unsigned long *mapped_blocks, int create)
 299{
 300        struct super_block *sb = inode->i_sb;
 301        struct msdos_sb_info *sbi = MSDOS_SB(sb);
 302        const unsigned long blocksize = sb->s_blocksize;
 303        const unsigned char blocksize_bits = sb->s_blocksize_bits;
 304        sector_t last_block;
 305        int cluster, offset;
 306
 307        *phys = 0;
 308        *mapped_blocks = 0;
 309        if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) {
 310                if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) {
 311                        *phys = sector + sbi->dir_start;
 312                        *mapped_blocks = 1;
 313                }
 314                return 0;
 315        }
 316
 317        last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
 318        if (sector >= last_block) {
 319                if (!create)
 320                        return 0;
 321
 322                /*
 323                 * ->mmu_private can access on only allocation path.
 324                 * (caller must hold ->i_mutex)
 325                 */
 326                last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
 327                        >> blocksize_bits;
 328                if (sector >= last_block)
 329                        return 0;
 330        }
 331
 332        cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
 333        offset  = sector & (sbi->sec_per_clus - 1);
 334        cluster = fat_bmap_cluster(inode, cluster);
 335        if (cluster < 0)
 336                return cluster;
 337        else if (cluster) {
 338                *phys = fat_clus_to_blknr(sbi, cluster) + offset;
 339                *mapped_blocks = sbi->sec_per_clus - offset;
 340                if (*mapped_blocks > last_block - sector)
 341                        *mapped_blocks = last_block - sector;
 342        }
 343        return 0;
 344}
 345