linux/fs/fat/cache.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/fat/cache.c
   3 *
   4 *  Written 1992,1993 by Werner Almesberger
   5 *
   6 *  Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
   7 *      of inode number.
   8 *  May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
   9 */
  10
  11#include <linux/fs.h>
  12#include <linux/buffer_head.h>
  13#include "fat.h"
  14
  15/* this must be > 0. */
  16#define FAT_MAX_CACHE   8
  17
  18struct fat_cache {
  19        struct list_head cache_list;
  20        int nr_contig;  /* number of contiguous clusters */
  21        int fcluster;   /* cluster number in the file. */
  22        int dcluster;   /* cluster number on disk. */
  23};
  24
  25struct fat_cache_id {
  26        unsigned int id;
  27        int nr_contig;
  28        int fcluster;
  29        int dcluster;
  30};
  31
  32static inline int fat_max_cache(struct inode *inode)
  33{
  34        return FAT_MAX_CACHE;
  35}
  36
  37static struct kmem_cache *fat_cache_cachep;
  38
  39static void init_once(void *foo)
  40{
  41        struct fat_cache *cache = (struct fat_cache *)foo;
  42
  43        INIT_LIST_HEAD(&cache->cache_list);
  44}
  45
  46int __init fat_cache_init(void)
  47{
  48        fat_cache_cachep = kmem_cache_create("fat_cache",
  49                                sizeof(struct fat_cache),
  50                                0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
  51                                init_once);
  52        if (fat_cache_cachep == NULL)
  53                return -ENOMEM;
  54        return 0;
  55}
  56
  57void fat_cache_destroy(void)
  58{
  59        kmem_cache_destroy(fat_cache_cachep);
  60}
  61
  62static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
  63{
  64        return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS);
  65}
  66
  67static inline void fat_cache_free(struct fat_cache *cache)
  68{
  69        BUG_ON(!list_empty(&cache->cache_list));
  70        kmem_cache_free(fat_cache_cachep, cache);
  71}
  72
  73static inline void fat_cache_update_lru(struct inode *inode,
  74                                        struct fat_cache *cache)
  75{
  76        if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
  77                list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru);
  78}
  79
  80static int fat_cache_lookup(struct inode *inode, int fclus,
  81                            struct fat_cache_id *cid,
  82                            int *cached_fclus, int *cached_dclus)
  83{
  84        static struct fat_cache nohit = { .fcluster = 0, };
  85
  86        struct fat_cache *hit = &nohit, *p;
  87        int offset = -1;
  88
  89        spin_lock(&MSDOS_I(inode)->cache_lru_lock);
  90        list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
  91                /* Find the cache of "fclus" or nearest cache. */
  92                if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
  93                        hit = p;
  94                        if ((hit->fcluster + hit->nr_contig) < fclus) {
  95                                offset = hit->nr_contig;
  96                        } else {
  97                                offset = fclus - hit->fcluster;
  98                                break;
  99                        }
 100                }
 101        }
 102        if (hit != &nohit) {
 103                fat_cache_update_lru(inode, hit);
 104
 105                cid->id = MSDOS_I(inode)->cache_valid_id;
 106                cid->nr_contig = hit->nr_contig;
 107                cid->fcluster = hit->fcluster;
 108                cid->dcluster = hit->dcluster;
 109                *cached_fclus = cid->fcluster + offset;
 110                *cached_dclus = cid->dcluster + offset;
 111        }
 112        spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
 113
 114        return offset;
 115}
 116
 117static struct fat_cache *fat_cache_merge(struct inode *inode,
 118                                         struct fat_cache_id *new)
 119{
 120        struct fat_cache *p;
 121
 122        list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
 123                /* Find the same part as "new" in cluster-chain. */
 124                if (p->fcluster == new->fcluster) {
 125                        BUG_ON(p->dcluster != new->dcluster);
 126                        if (new->nr_contig > p->nr_contig)
 127                                p->nr_contig = new->nr_contig;
 128                        return p;
 129                }
 130        }
 131        return NULL;
 132}
 133
 134static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
 135{
 136        struct fat_cache *cache, *tmp;
 137
 138        if (new->fcluster == -1) /* dummy cache */
 139                return;
 140
 141        spin_lock(&MSDOS_I(inode)->cache_lru_lock);
 142        if (new->id != FAT_CACHE_VALID &&
 143            new->id != MSDOS_I(inode)->cache_valid_id)
 144                goto out;       /* this cache was invalidated */
 145
 146        cache = fat_cache_merge(inode, new);
 147        if (cache == NULL) {
 148                if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
 149                        MSDOS_I(inode)->nr_caches++;
 150                        spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
 151
 152                        tmp = fat_cache_alloc(inode);
 153                        spin_lock(&MSDOS_I(inode)->cache_lru_lock);
 154                        cache = fat_cache_merge(inode, new);
 155                        if (cache != NULL) {
 156                                MSDOS_I(inode)->nr_caches--;
 157                                fat_cache_free(tmp);
 158                                goto out_update_lru;
 159                        }
 160                        cache = tmp;
 161                } else {
 162                        struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
 163                        cache = list_entry(p, struct fat_cache, cache_list);
 164                }
 165                cache->fcluster = new->fcluster;
 166                cache->dcluster = new->dcluster;
 167                cache->nr_contig = new->nr_contig;
 168        }
 169out_update_lru:
 170        fat_cache_update_lru(inode, cache);
 171out:
 172        spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
 173}
 174
 175/*
 176 * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
 177 * fixes itself after a while.
 178 */
 179static void __fat_cache_inval_inode(struct inode *inode)
 180{
 181        struct msdos_inode_info *i = MSDOS_I(inode);
 182        struct fat_cache *cache;
 183
 184        while (!list_empty(&i->cache_lru)) {
 185                cache = list_entry(i->cache_lru.next, struct fat_cache, cache_list);
 186                list_del_init(&cache->cache_list);
 187                i->nr_caches--;
 188                fat_cache_free(cache);
 189        }
 190        /* Update. The copy of caches before this id is discarded. */
 191        i->cache_valid_id++;
 192        if (i->cache_valid_id == FAT_CACHE_VALID)
 193                i->cache_valid_id++;
 194}
 195
 196void fat_cache_inval_inode(struct inode *inode)
 197{
 198        spin_lock(&MSDOS_I(inode)->cache_lru_lock);
 199        __fat_cache_inval_inode(inode);
 200        spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
 201}
 202
 203static inline int cache_contiguous(struct fat_cache_id *cid, int dclus)
 204{
 205        cid->nr_contig++;
 206        return ((cid->dcluster + cid->nr_contig) == dclus);
 207}
 208
 209static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
 210{
 211        cid->id = FAT_CACHE_VALID;
 212        cid->fcluster = fclus;
 213        cid->dcluster = dclus;
 214        cid->nr_contig = 0;
 215}
 216
 217int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
 218{
 219        struct super_block *sb = inode->i_sb;
 220        const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
 221        struct fat_entry fatent;
 222        struct fat_cache_id cid;
 223        int nr;
 224
 225        BUG_ON(MSDOS_I(inode)->i_start == 0);
 226
 227        *fclus = 0;
 228        *dclus = MSDOS_I(inode)->i_start;
 229        if (cluster == 0)
 230                return 0;
 231
 232        if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) {
 233                /*
 234                 * dummy, always not contiguous
 235                 * This is reinitialized by cache_init(), later.
 236                 */
 237                cache_init(&cid, -1, -1);
 238        }
 239
 240        fatent_init(&fatent);
 241        while (*fclus < cluster) {
 242                /* prevent the infinite loop of cluster chain */
 243                if (*fclus > limit) {
 244                        fat_fs_error(sb, "%s: detected the cluster chain loop"
 245                                     " (i_pos %lld)", __func__,
 246                                     MSDOS_I(inode)->i_pos);
 247                        nr = -EIO;
 248                        goto out;
 249                }
 250
 251                nr = fat_ent_read(inode, &fatent, *dclus);
 252                if (nr < 0)
 253                        goto out;
 254                else if (nr == FAT_ENT_FREE) {
 255                        fat_fs_error(sb, "%s: invalid cluster chain"
 256                                     " (i_pos %lld)", __func__,
 257                                     MSDOS_I(inode)->i_pos);
 258                        nr = -EIO;
 259                        goto out;
 260                } else if (nr == FAT_ENT_EOF) {
 261                        fat_cache_add(inode, &cid);
 262                        goto out;
 263                }
 264                (*fclus)++;
 265                *dclus = nr;
 266                if (!cache_contiguous(&cid, *dclus))
 267                        cache_init(&cid, *fclus, *dclus);
 268        }
 269        nr = 0;
 270        fat_cache_add(inode, &cid);
 271out:
 272        fatent_brelse(&fatent);
 273        return nr;
 274}
 275
 276static int fat_bmap_cluster(struct inode *inode, int cluster)
 277{
 278        struct super_block *sb = inode->i_sb;
 279        int ret, fclus, dclus;
 280
 281        if (MSDOS_I(inode)->i_start == 0)
 282                return 0;
 283
 284        ret = fat_get_cluster(inode, cluster, &fclus, &dclus);
 285        if (ret < 0)
 286                return ret;
 287        else if (ret == FAT_ENT_EOF) {
 288                fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)",
 289                             __func__, MSDOS_I(inode)->i_pos);
 290                return -EIO;
 291        }
 292        return dclus;
 293}
 294
 295int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
 296             unsigned long *mapped_blocks, int create)
 297{
 298        struct super_block *sb = inode->i_sb;
 299        struct msdos_sb_info *sbi = MSDOS_SB(sb);
 300        const unsigned long blocksize = sb->s_blocksize;
 301        const unsigned char blocksize_bits = sb->s_blocksize_bits;
 302        sector_t last_block;
 303        int cluster, offset;
 304
 305        *phys = 0;
 306        *mapped_blocks = 0;
 307        if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) {
 308                if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) {
 309                        *phys = sector + sbi->dir_start;
 310                        *mapped_blocks = 1;
 311                }
 312                return 0;
 313        }
 314
 315        last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
 316        if (sector >= last_block) {
 317                if (!create)
 318                        return 0;
 319
 320                /*
 321                 * ->mmu_private can access on only allocation path.
 322                 * (caller must hold ->i_mutex)
 323                 */
 324                last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
 325                        >> blocksize_bits;
 326                if (sector >= last_block)
 327                        return 0;
 328        }
 329
 330        cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
 331        offset  = sector & (sbi->sec_per_clus - 1);
 332        cluster = fat_bmap_cluster(inode, cluster);
 333        if (cluster < 0)
 334                return cluster;
 335        else if (cluster) {
 336                *phys = fat_clus_to_blknr(sbi, cluster) + offset;
 337                *mapped_blocks = sbi->sec_per_clus - offset;
 338                if (*mapped_blocks > last_block - sector)
 339                        *mapped_blocks = last_block - sector;
 340        }
 341        return 0;
 342}
 343