linux/fs/squashfs/cache.c
<<
>>
Prefs
   1/*
   2 * Squashfs - a compressed read only filesystem for Linux
   3 *
   4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
   5 * Phillip Lougher <phillip@lougher.demon.co.uk>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; either version 2,
  10 * or (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  20 *
  21 * cache.c
  22 */
  23
  24/*
  25 * Blocks in Squashfs are compressed.  To avoid repeatedly decompressing
  26 * recently accessed data Squashfs uses two small metadata and fragment caches.
  27 *
  28 * This file implements a generic cache implementation used for both caches,
  29 * plus functions layered ontop of the generic cache implementation to
  30 * access the metadata and fragment caches.
  31 *
  32 * To avoid out of memory and fragmentation isssues with vmalloc the cache
  33 * uses sequences of kmalloced PAGE_CACHE_SIZE buffers.
  34 *
  35 * It should be noted that the cache is not used for file datablocks, these
  36 * are decompressed and cached in the page-cache in the normal way.  The
  37 * cache is only used to temporarily cache fragment and metadata blocks
  38 * which have been read as as a result of a metadata (i.e. inode or
  39 * directory) or fragment access.  Because metadata and fragments are packed
  40 * together into blocks (to gain greater compression) the read of a particular
  41 * piece of metadata or fragment will retrieve other metadata/fragments which
  42 * have been packed with it, these because of locality-of-reference may be read
  43 * in the near future. Temporarily caching them ensures they are available for
  44 * near future access without requiring an additional read and decompress.
  45 */
  46
  47#include <linux/fs.h>
  48#include <linux/vfs.h>
  49#include <linux/slab.h>
  50#include <linux/vmalloc.h>
  51#include <linux/sched.h>
  52#include <linux/spinlock.h>
  53#include <linux/wait.h>
  54#include <linux/zlib.h>
  55#include <linux/pagemap.h>
  56
  57#include "squashfs_fs.h"
  58#include "squashfs_fs_sb.h"
  59#include "squashfs_fs_i.h"
  60#include "squashfs.h"
  61
  62/*
  63 * Look-up block in cache, and increment usage count.  If not in cache, read
  64 * and decompress it from disk.
  65 */
  66struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
  67        struct squashfs_cache *cache, u64 block, int length)
  68{
  69        int i, n;
  70        struct squashfs_cache_entry *entry;
  71
  72        spin_lock(&cache->lock);
  73
  74        while (1) {
  75                for (i = 0; i < cache->entries; i++)
  76                        if (cache->entry[i].block == block)
  77                                break;
  78
  79                if (i == cache->entries) {
  80                        /*
  81                         * Block not in cache, if all cache entries are used
  82                         * go to sleep waiting for one to become available.
  83                         */
  84                        if (cache->unused == 0) {
  85                                cache->num_waiters++;
  86                                spin_unlock(&cache->lock);
  87                                wait_event(cache->wait_queue, cache->unused);
  88                                spin_lock(&cache->lock);
  89                                cache->num_waiters--;
  90                                continue;
  91                        }
  92
  93                        /*
  94                         * At least one unused cache entry.  A simple
  95                         * round-robin strategy is used to choose the entry to
  96                         * be evicted from the cache.
  97                         */
  98                        i = cache->next_blk;
  99                        for (n = 0; n < cache->entries; n++) {
 100                                if (cache->entry[i].refcount == 0)
 101                                        break;
 102                                i = (i + 1) % cache->entries;
 103                        }
 104
 105                        cache->next_blk = (i + 1) % cache->entries;
 106                        entry = &cache->entry[i];
 107
 108                        /*
 109                         * Initialise choosen cache entry, and fill it in from
 110                         * disk.
 111                         */
 112                        cache->unused--;
 113                        entry->block = block;
 114                        entry->refcount = 1;
 115                        entry->pending = 1;
 116                        entry->num_waiters = 0;
 117                        entry->error = 0;
 118                        spin_unlock(&cache->lock);
 119
 120                        entry->length = squashfs_read_data(sb, entry->data,
 121                                block, length, &entry->next_index,
 122                                cache->block_size, cache->pages);
 123
 124                        spin_lock(&cache->lock);
 125
 126                        if (entry->length < 0)
 127                                entry->error = entry->length;
 128
 129                        entry->pending = 0;
 130
 131                        /*
 132                         * While filling this entry one or more other processes
 133                         * have looked it up in the cache, and have slept
 134                         * waiting for it to become available.
 135                         */
 136                        if (entry->num_waiters) {
 137                                spin_unlock(&cache->lock);
 138                                wake_up_all(&entry->wait_queue);
 139                        } else
 140                                spin_unlock(&cache->lock);
 141
 142                        goto out;
 143                }
 144
 145                /*
 146                 * Block already in cache.  Increment refcount so it doesn't
 147                 * get reused until we're finished with it, if it was
 148                 * previously unused there's one less cache entry available
 149                 * for reuse.
 150                 */
 151                entry = &cache->entry[i];
 152                if (entry->refcount == 0)
 153                        cache->unused--;
 154                entry->refcount++;
 155
 156                /*
 157                 * If the entry is currently being filled in by another process
 158                 * go to sleep waiting for it to become available.
 159                 */
 160                if (entry->pending) {
 161                        entry->num_waiters++;
 162                        spin_unlock(&cache->lock);
 163                        wait_event(entry->wait_queue, !entry->pending);
 164                } else
 165                        spin_unlock(&cache->lock);
 166
 167                goto out;
 168        }
 169
 170out:
 171        TRACE("Got %s %d, start block %lld, refcount %d, error %d\n",
 172                cache->name, i, entry->block, entry->refcount, entry->error);
 173
 174        if (entry->error)
 175                ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
 176                                                        block);
 177        return entry;
 178}
 179
 180
 181/*
 182 * Release cache entry, once usage count is zero it can be reused.
 183 */
 184void squashfs_cache_put(struct squashfs_cache_entry *entry)
 185{
 186        struct squashfs_cache *cache = entry->cache;
 187
 188        spin_lock(&cache->lock);
 189        entry->refcount--;
 190        if (entry->refcount == 0) {
 191                cache->unused++;
 192                /*
 193                 * If there's any processes waiting for a block to become
 194                 * available, wake one up.
 195                 */
 196                if (cache->num_waiters) {
 197                        spin_unlock(&cache->lock);
 198                        wake_up(&cache->wait_queue);
 199                        return;
 200                }
 201        }
 202        spin_unlock(&cache->lock);
 203}
 204
 205/*
 206 * Delete cache reclaiming all kmalloced buffers.
 207 */
 208void squashfs_cache_delete(struct squashfs_cache *cache)
 209{
 210        int i, j;
 211
 212        if (cache == NULL)
 213                return;
 214
 215        for (i = 0; i < cache->entries; i++) {
 216                if (cache->entry[i].data) {
 217                        for (j = 0; j < cache->pages; j++)
 218                                kfree(cache->entry[i].data[j]);
 219                        kfree(cache->entry[i].data);
 220                }
 221        }
 222
 223        kfree(cache->entry);
 224        kfree(cache);
 225}
 226
 227
 228/*
 229 * Initialise cache allocating the specified number of entries, each of
 230 * size block_size.  To avoid vmalloc fragmentation issues each entry
 231 * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers.
 232 */
 233struct squashfs_cache *squashfs_cache_init(char *name, int entries,
 234        int block_size)
 235{
 236        int i, j;
 237        struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
 238
 239        if (cache == NULL) {
 240                ERROR("Failed to allocate %s cache\n", name);
 241                return NULL;
 242        }
 243
 244        cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
 245        if (cache->entry == NULL) {
 246                ERROR("Failed to allocate %s cache\n", name);
 247                goto cleanup;
 248        }
 249
 250        cache->next_blk = 0;
 251        cache->unused = entries;
 252        cache->entries = entries;
 253        cache->block_size = block_size;
 254        cache->pages = block_size >> PAGE_CACHE_SHIFT;
 255        cache->pages = cache->pages ? cache->pages : 1;
 256        cache->name = name;
 257        cache->num_waiters = 0;
 258        spin_lock_init(&cache->lock);
 259        init_waitqueue_head(&cache->wait_queue);
 260
 261        for (i = 0; i < entries; i++) {
 262                struct squashfs_cache_entry *entry = &cache->entry[i];
 263
 264                init_waitqueue_head(&cache->entry[i].wait_queue);
 265                entry->cache = cache;
 266                entry->block = SQUASHFS_INVALID_BLK;
 267                entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
 268                if (entry->data == NULL) {
 269                        ERROR("Failed to allocate %s cache entry\n", name);
 270                        goto cleanup;
 271                }
 272
 273                for (j = 0; j < cache->pages; j++) {
 274                        entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
 275                        if (entry->data[j] == NULL) {
 276                                ERROR("Failed to allocate %s buffer\n", name);
 277                                goto cleanup;
 278                        }
 279                }
 280        }
 281
 282        return cache;
 283
 284cleanup:
 285        squashfs_cache_delete(cache);
 286        return NULL;
 287}
 288
 289
 290/*
 291 * Copy upto length bytes from cache entry to buffer starting at offset bytes
 292 * into the cache entry.  If there's not length bytes then copy the number of
 293 * bytes available.  In all cases return the number of bytes copied.
 294 */
 295int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
 296                int offset, int length)
 297{
 298        int remaining = length;
 299
 300        if (length == 0)
 301                return 0;
 302        else if (buffer == NULL)
 303                return min(length, entry->length - offset);
 304
 305        while (offset < entry->length) {
 306                void *buff = entry->data[offset / PAGE_CACHE_SIZE]
 307                                + (offset % PAGE_CACHE_SIZE);
 308                int bytes = min_t(int, entry->length - offset,
 309                                PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE));
 310
 311                if (bytes >= remaining) {
 312                        memcpy(buffer, buff, remaining);
 313                        remaining = 0;
 314                        break;
 315                }
 316
 317                memcpy(buffer, buff, bytes);
 318                buffer += bytes;
 319                remaining -= bytes;
 320                offset += bytes;
 321        }
 322
 323        return length - remaining;
 324}
 325
 326
 327/*
 328 * Read length bytes from metadata position <block, offset> (block is the
 329 * start of the compressed block on disk, and offset is the offset into
 330 * the block once decompressed).  Data is packed into consecutive blocks,
 331 * and length bytes may require reading more than one block.
 332 */
 333int squashfs_read_metadata(struct super_block *sb, void *buffer,
 334                u64 *block, int *offset, int length)
 335{
 336        struct squashfs_sb_info *msblk = sb->s_fs_info;
 337        int bytes, copied = length;
 338        struct squashfs_cache_entry *entry;
 339
 340        TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
 341
 342        while (length) {
 343                entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
 344                if (entry->error)
 345                        return entry->error;
 346                else if (*offset >= entry->length)
 347                        return -EIO;
 348
 349                bytes = squashfs_copy_data(buffer, entry, *offset, length);
 350                if (buffer)
 351                        buffer += bytes;
 352                length -= bytes;
 353                *offset += bytes;
 354
 355                if (*offset == entry->length) {
 356                        *block = entry->next_index;
 357                        *offset = 0;
 358                }
 359
 360                squashfs_cache_put(entry);
 361        }
 362
 363        return copied;
 364}
 365
 366
 367/*
 368 * Look-up in the fragmment cache the fragment located at <start_block> in the
 369 * filesystem.  If necessary read and decompress it from disk.
 370 */
 371struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb,
 372                                u64 start_block, int length)
 373{
 374        struct squashfs_sb_info *msblk = sb->s_fs_info;
 375
 376        return squashfs_cache_get(sb, msblk->fragment_cache, start_block,
 377                length);
 378}
 379
 380
 381/*
 382 * Read and decompress the datablock located at <start_block> in the
 383 * filesystem.  The cache is used here to avoid duplicating locking and
 384 * read/decompress code.
 385 */
 386struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
 387                                u64 start_block, int length)
 388{
 389        struct squashfs_sb_info *msblk = sb->s_fs_info;
 390
 391        return squashfs_cache_get(sb, msblk->read_page, start_block, length);
 392}
 393
 394
 395/*
 396 * Read a filesystem table (uncompressed sequence of bytes) from disk
 397 */
 398int squashfs_read_table(struct super_block *sb, void *buffer, u64 block,
 399        int length)
 400{
 401        int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 402        int i, res;
 403        void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
 404        if (data == NULL)
 405                return -ENOMEM;
 406
 407        for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
 408                data[i] = buffer;
 409        res = squashfs_read_data(sb, data, block, length |
 410                SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages);
 411        kfree(data);
 412        return res;
 413}
 414