linux/fs/squashfs/cache.c
<<
>>
Prefs
   1/*
   2 * Squashfs - a compressed read only filesystem for Linux
   3 *
   4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
   5 * Phillip Lougher <phillip@lougher.demon.co.uk>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; either version 2,
  10 * or (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  20 *
  21 * cache.c
  22 */
  23
  24/*
  25 * Blocks in Squashfs are compressed.  To avoid repeatedly decompressing
  26 * recently accessed data Squashfs uses two small metadata and fragment caches.
  27 *
  28 * This file implements a generic cache implementation used for both caches,
  29 * plus functions layered ontop of the generic cache implementation to
  30 * access the metadata and fragment caches.
  31 *
  32 * To avoid out of memory and fragmentation isssues with vmalloc the cache
  33 * uses sequences of kmalloced PAGE_CACHE_SIZE buffers.
  34 *
  35 * It should be noted that the cache is not used for file datablocks, these
  36 * are decompressed and cached in the page-cache in the normal way.  The
  37 * cache is only used to temporarily cache fragment and metadata blocks
  38 * which have been read as as a result of a metadata (i.e. inode or
  39 * directory) or fragment access.  Because metadata and fragments are packed
  40 * together into blocks (to gain greater compression) the read of a particular
  41 * piece of metadata or fragment will retrieve other metadata/fragments which
  42 * have been packed with it, these because of locality-of-reference may be read
  43 * in the near future. Temporarily caching them ensures they are available for
  44 * near future access without requiring an additional read and decompress.
  45 */
  46
  47#include <linux/fs.h>
  48#include <linux/vfs.h>
  49#include <linux/slab.h>
  50#include <linux/vmalloc.h>
  51#include <linux/sched.h>
  52#include <linux/spinlock.h>
  53#include <linux/wait.h>
  54#include <linux/pagemap.h>
  55
  56#include "squashfs_fs.h"
  57#include "squashfs_fs_sb.h"
  58#include "squashfs.h"
  59
  60/*
  61 * Look-up block in cache, and increment usage count.  If not in cache, read
  62 * and decompress it from disk.
  63 */
  64struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
  65        struct squashfs_cache *cache, u64 block, int length)
  66{
  67        int i, n;
  68        struct squashfs_cache_entry *entry;
  69
  70        spin_lock(&cache->lock);
  71
  72        while (1) {
  73                for (i = 0; i < cache->entries; i++)
  74                        if (cache->entry[i].block == block)
  75                                break;
  76
  77                if (i == cache->entries) {
  78                        /*
  79                         * Block not in cache, if all cache entries are used
  80                         * go to sleep waiting for one to become available.
  81                         */
  82                        if (cache->unused == 0) {
  83                                cache->num_waiters++;
  84                                spin_unlock(&cache->lock);
  85                                wait_event(cache->wait_queue, cache->unused);
  86                                spin_lock(&cache->lock);
  87                                cache->num_waiters--;
  88                                continue;
  89                        }
  90
  91                        /*
  92                         * At least one unused cache entry.  A simple
  93                         * round-robin strategy is used to choose the entry to
  94                         * be evicted from the cache.
  95                         */
  96                        i = cache->next_blk;
  97                        for (n = 0; n < cache->entries; n++) {
  98                                if (cache->entry[i].refcount == 0)
  99                                        break;
 100                                i = (i + 1) % cache->entries;
 101                        }
 102
 103                        cache->next_blk = (i + 1) % cache->entries;
 104                        entry = &cache->entry[i];
 105
 106                        /*
 107                         * Initialise choosen cache entry, and fill it in from
 108                         * disk.
 109                         */
 110                        cache->unused--;
 111                        entry->block = block;
 112                        entry->refcount = 1;
 113                        entry->pending = 1;
 114                        entry->num_waiters = 0;
 115                        entry->error = 0;
 116                        spin_unlock(&cache->lock);
 117
 118                        entry->length = squashfs_read_data(sb, entry->data,
 119                                block, length, &entry->next_index,
 120                                cache->block_size, cache->pages);
 121
 122                        spin_lock(&cache->lock);
 123
 124                        if (entry->length < 0)
 125                                entry->error = entry->length;
 126
 127                        entry->pending = 0;
 128
 129                        /*
 130                         * While filling this entry one or more other processes
 131                         * have looked it up in the cache, and have slept
 132                         * waiting for it to become available.
 133                         */
 134                        if (entry->num_waiters) {
 135                                spin_unlock(&cache->lock);
 136                                wake_up_all(&entry->wait_queue);
 137                        } else
 138                                spin_unlock(&cache->lock);
 139
 140                        goto out;
 141                }
 142
 143                /*
 144                 * Block already in cache.  Increment refcount so it doesn't
 145                 * get reused until we're finished with it, if it was
 146                 * previously unused there's one less cache entry available
 147                 * for reuse.
 148                 */
 149                entry = &cache->entry[i];
 150                if (entry->refcount == 0)
 151                        cache->unused--;
 152                entry->refcount++;
 153
 154                /*
 155                 * If the entry is currently being filled in by another process
 156                 * go to sleep waiting for it to become available.
 157                 */
 158                if (entry->pending) {
 159                        entry->num_waiters++;
 160                        spin_unlock(&cache->lock);
 161                        wait_event(entry->wait_queue, !entry->pending);
 162                } else
 163                        spin_unlock(&cache->lock);
 164
 165                goto out;
 166        }
 167
 168out:
 169        TRACE("Got %s %d, start block %lld, refcount %d, error %d\n",
 170                cache->name, i, entry->block, entry->refcount, entry->error);
 171
 172        if (entry->error)
 173                ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
 174                                                        block);
 175        return entry;
 176}
 177
 178
 179/*
 180 * Release cache entry, once usage count is zero it can be reused.
 181 */
 182void squashfs_cache_put(struct squashfs_cache_entry *entry)
 183{
 184        struct squashfs_cache *cache = entry->cache;
 185
 186        spin_lock(&cache->lock);
 187        entry->refcount--;
 188        if (entry->refcount == 0) {
 189                cache->unused++;
 190                /*
 191                 * If there's any processes waiting for a block to become
 192                 * available, wake one up.
 193                 */
 194                if (cache->num_waiters) {
 195                        spin_unlock(&cache->lock);
 196                        wake_up(&cache->wait_queue);
 197                        return;
 198                }
 199        }
 200        spin_unlock(&cache->lock);
 201}
 202
 203/*
 204 * Delete cache reclaiming all kmalloced buffers.
 205 */
 206void squashfs_cache_delete(struct squashfs_cache *cache)
 207{
 208        int i, j;
 209
 210        if (cache == NULL)
 211                return;
 212
 213        for (i = 0; i < cache->entries; i++) {
 214                if (cache->entry[i].data) {
 215                        for (j = 0; j < cache->pages; j++)
 216                                kfree(cache->entry[i].data[j]);
 217                        kfree(cache->entry[i].data);
 218                }
 219        }
 220
 221        kfree(cache->entry);
 222        kfree(cache);
 223}
 224
 225
 226/*
 227 * Initialise cache allocating the specified number of entries, each of
 228 * size block_size.  To avoid vmalloc fragmentation issues each entry
 229 * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers.
 230 */
 231struct squashfs_cache *squashfs_cache_init(char *name, int entries,
 232        int block_size)
 233{
 234        int i, j;
 235        struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
 236
 237        if (cache == NULL) {
 238                ERROR("Failed to allocate %s cache\n", name);
 239                return NULL;
 240        }
 241
 242        cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
 243        if (cache->entry == NULL) {
 244                ERROR("Failed to allocate %s cache\n", name);
 245                goto cleanup;
 246        }
 247
 248        cache->next_blk = 0;
 249        cache->unused = entries;
 250        cache->entries = entries;
 251        cache->block_size = block_size;
 252        cache->pages = block_size >> PAGE_CACHE_SHIFT;
 253        cache->pages = cache->pages ? cache->pages : 1;
 254        cache->name = name;
 255        cache->num_waiters = 0;
 256        spin_lock_init(&cache->lock);
 257        init_waitqueue_head(&cache->wait_queue);
 258
 259        for (i = 0; i < entries; i++) {
 260                struct squashfs_cache_entry *entry = &cache->entry[i];
 261
 262                init_waitqueue_head(&cache->entry[i].wait_queue);
 263                entry->cache = cache;
 264                entry->block = SQUASHFS_INVALID_BLK;
 265                entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
 266                if (entry->data == NULL) {
 267                        ERROR("Failed to allocate %s cache entry\n", name);
 268                        goto cleanup;
 269                }
 270
 271                for (j = 0; j < cache->pages; j++) {
 272                        entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
 273                        if (entry->data[j] == NULL) {
 274                                ERROR("Failed to allocate %s buffer\n", name);
 275                                goto cleanup;
 276                        }
 277                }
 278        }
 279
 280        return cache;
 281
 282cleanup:
 283        squashfs_cache_delete(cache);
 284        return NULL;
 285}
 286
 287
 288/*
 289 * Copy upto length bytes from cache entry to buffer starting at offset bytes
 290 * into the cache entry.  If there's not length bytes then copy the number of
 291 * bytes available.  In all cases return the number of bytes copied.
 292 */
 293int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
 294                int offset, int length)
 295{
 296        int remaining = length;
 297
 298        if (length == 0)
 299                return 0;
 300        else if (buffer == NULL)
 301                return min(length, entry->length - offset);
 302
 303        while (offset < entry->length) {
 304                void *buff = entry->data[offset / PAGE_CACHE_SIZE]
 305                                + (offset % PAGE_CACHE_SIZE);
 306                int bytes = min_t(int, entry->length - offset,
 307                                PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE));
 308
 309                if (bytes >= remaining) {
 310                        memcpy(buffer, buff, remaining);
 311                        remaining = 0;
 312                        break;
 313                }
 314
 315                memcpy(buffer, buff, bytes);
 316                buffer += bytes;
 317                remaining -= bytes;
 318                offset += bytes;
 319        }
 320
 321        return length - remaining;
 322}
 323
 324
 325/*
 326 * Read length bytes from metadata position <block, offset> (block is the
 327 * start of the compressed block on disk, and offset is the offset into
 328 * the block once decompressed).  Data is packed into consecutive blocks,
 329 * and length bytes may require reading more than one block.
 330 */
 331int squashfs_read_metadata(struct super_block *sb, void *buffer,
 332                u64 *block, int *offset, int length)
 333{
 334        struct squashfs_sb_info *msblk = sb->s_fs_info;
 335        int bytes, copied = length;
 336        struct squashfs_cache_entry *entry;
 337
 338        TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
 339
 340        while (length) {
 341                entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
 342                if (entry->error)
 343                        return entry->error;
 344                else if (*offset >= entry->length)
 345                        return -EIO;
 346
 347                bytes = squashfs_copy_data(buffer, entry, *offset, length);
 348                if (buffer)
 349                        buffer += bytes;
 350                length -= bytes;
 351                *offset += bytes;
 352
 353                if (*offset == entry->length) {
 354                        *block = entry->next_index;
 355                        *offset = 0;
 356                }
 357
 358                squashfs_cache_put(entry);
 359        }
 360
 361        return copied;
 362}
 363
 364
 365/*
 366 * Look-up in the fragmment cache the fragment located at <start_block> in the
 367 * filesystem.  If necessary read and decompress it from disk.
 368 */
 369struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb,
 370                                u64 start_block, int length)
 371{
 372        struct squashfs_sb_info *msblk = sb->s_fs_info;
 373
 374        return squashfs_cache_get(sb, msblk->fragment_cache, start_block,
 375                length);
 376}
 377
 378
 379/*
 380 * Read and decompress the datablock located at <start_block> in the
 381 * filesystem.  The cache is used here to avoid duplicating locking and
 382 * read/decompress code.
 383 */
 384struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
 385                                u64 start_block, int length)
 386{
 387        struct squashfs_sb_info *msblk = sb->s_fs_info;
 388
 389        return squashfs_cache_get(sb, msblk->read_page, start_block, length);
 390}
 391
 392
 393/*
 394 * Read a filesystem table (uncompressed sequence of bytes) from disk
 395 */
 396int squashfs_read_table(struct super_block *sb, void *buffer, u64 block,
 397        int length)
 398{
 399        int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 400        int i, res;
 401        void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
 402        if (data == NULL)
 403                return -ENOMEM;
 404
 405        for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
 406                data[i] = buffer;
 407        res = squashfs_read_data(sb, data, block, length |
 408                SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages);
 409        kfree(data);
 410        return res;
 411}
 412