linux/drivers/staging/zram/zram_drv.c
<<
>>
Prefs
   1/*
   2 * Compressed RAM block device
   3 *
   4 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
   5 *
   6 * This code is released using a dual license strategy: BSD/GPL
   7 * You can choose the licence that better fits your requirements.
   8 *
   9 * Released under the terms of 3-clause BSD License
  10 * Released under the terms of GNU General Public License Version 2.0
  11 *
  12 * Project home: http://compcache.googlecode.com
  13 */
  14
  15#define KMSG_COMPONENT "zram"
  16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  17
  18#include <linux/module.h>
  19#include <linux/kernel.h>
  20#include <linux/bio.h>
  21#include <linux/bitops.h>
  22#include <linux/blkdev.h>
  23#include <linux/buffer_head.h>
  24#include <linux/device.h>
  25#include <linux/genhd.h>
  26#include <linux/highmem.h>
  27#include <linux/slab.h>
  28#include <linux/lzo.h>
  29#include <linux/string.h>
  30#include <linux/vmalloc.h>
  31
  32#include "zram_drv.h"
  33
  34/* Globals */
  35static int zram_major;
  36struct zram *devices;
  37
  38/* Module params (documentation at end) */
  39unsigned int num_devices;
  40
  41static void zram_stat_inc(u32 *v)
  42{
  43        *v = *v + 1;
  44}
  45
  46static void zram_stat_dec(u32 *v)
  47{
  48        *v = *v - 1;
  49}
  50
  51static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
  52{
  53        spin_lock(&zram->stat64_lock);
  54        *v = *v + inc;
  55        spin_unlock(&zram->stat64_lock);
  56}
  57
  58static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
  59{
  60        spin_lock(&zram->stat64_lock);
  61        *v = *v - dec;
  62        spin_unlock(&zram->stat64_lock);
  63}
  64
  65static void zram_stat64_inc(struct zram *zram, u64 *v)
  66{
  67        zram_stat64_add(zram, v, 1);
  68}
  69
  70static int zram_test_flag(struct zram *zram, u32 index,
  71                        enum zram_pageflags flag)
  72{
  73        return zram->table[index].flags & BIT(flag);
  74}
  75
  76static void zram_set_flag(struct zram *zram, u32 index,
  77                        enum zram_pageflags flag)
  78{
  79        zram->table[index].flags |= BIT(flag);
  80}
  81
  82static void zram_clear_flag(struct zram *zram, u32 index,
  83                        enum zram_pageflags flag)
  84{
  85        zram->table[index].flags &= ~BIT(flag);
  86}
  87
  88static int page_zero_filled(void *ptr)
  89{
  90        unsigned int pos;
  91        unsigned long *page;
  92
  93        page = (unsigned long *)ptr;
  94
  95        for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
  96                if (page[pos])
  97                        return 0;
  98        }
  99
 100        return 1;
 101}
 102
 103static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
 104{
 105        if (!zram->disksize) {
 106                pr_info(
 107                "disk size not provided. You can use disksize_kb module "
 108                "param to specify size.\nUsing default: (%u%% of RAM).\n",
 109                default_disksize_perc_ram
 110                );
 111                zram->disksize = default_disksize_perc_ram *
 112                                        (totalram_bytes / 100);
 113        }
 114
 115        if (zram->disksize > 2 * (totalram_bytes)) {
 116                pr_info(
 117                "There is little point creating a zram of greater than "
 118                "twice the size of memory since we expect a 2:1 compression "
 119                "ratio. Note that zram uses about 0.1%% of the size of "
 120                "the disk when not in use so a huge zram is "
 121                "wasteful.\n"
 122                "\tMemory Size: %zu kB\n"
 123                "\tSize you selected: %llu kB\n"
 124                "Continuing anyway ...\n",
 125                totalram_bytes >> 10, zram->disksize
 126                );
 127        }
 128
 129        zram->disksize &= PAGE_MASK;
 130}
 131
 132static void zram_free_page(struct zram *zram, size_t index)
 133{
 134        u32 clen;
 135        void *obj;
 136
 137        struct page *page = zram->table[index].page;
 138        u32 offset = zram->table[index].offset;
 139
 140        if (unlikely(!page)) {
 141                /*
 142                 * No memory is allocated for zero filled pages.
 143                 * Simply clear zero page flag.
 144                 */
 145                if (zram_test_flag(zram, index, ZRAM_ZERO)) {
 146                        zram_clear_flag(zram, index, ZRAM_ZERO);
 147                        zram_stat_dec(&zram->stats.pages_zero);
 148                }
 149                return;
 150        }
 151
 152        if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
 153                clen = PAGE_SIZE;
 154                __free_page(page);
 155                zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
 156                zram_stat_dec(&zram->stats.pages_expand);
 157                goto out;
 158        }
 159
 160        obj = kmap_atomic(page, KM_USER0) + offset;
 161        clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
 162        kunmap_atomic(obj, KM_USER0);
 163
 164        xv_free(zram->mem_pool, page, offset);
 165        if (clen <= PAGE_SIZE / 2)
 166                zram_stat_dec(&zram->stats.good_compress);
 167
 168out:
 169        zram_stat64_sub(zram, &zram->stats.compr_size, clen);
 170        zram_stat_dec(&zram->stats.pages_stored);
 171
 172        zram->table[index].page = NULL;
 173        zram->table[index].offset = 0;
 174}
 175
 176static void handle_zero_page(struct page *page)
 177{
 178        void *user_mem;
 179
 180        user_mem = kmap_atomic(page, KM_USER0);
 181        memset(user_mem, 0, PAGE_SIZE);
 182        kunmap_atomic(user_mem, KM_USER0);
 183
 184        flush_dcache_page(page);
 185}
 186
 187static void handle_uncompressed_page(struct zram *zram,
 188                                struct page *page, u32 index)
 189{
 190        unsigned char *user_mem, *cmem;
 191
 192        user_mem = kmap_atomic(page, KM_USER0);
 193        cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
 194                        zram->table[index].offset;
 195
 196        memcpy(user_mem, cmem, PAGE_SIZE);
 197        kunmap_atomic(user_mem, KM_USER0);
 198        kunmap_atomic(cmem, KM_USER1);
 199
 200        flush_dcache_page(page);
 201}
 202
 203static int zram_read(struct zram *zram, struct bio *bio)
 204{
 205
 206        int i;
 207        u32 index;
 208        struct bio_vec *bvec;
 209
 210        if (unlikely(!zram->init_done)) {
 211                set_bit(BIO_UPTODATE, &bio->bi_flags);
 212                bio_endio(bio, 0);
 213                return 0;
 214        }
 215
 216        zram_stat64_inc(zram, &zram->stats.num_reads);
 217        index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
 218
 219        bio_for_each_segment(bvec, bio, i) {
 220                int ret;
 221                size_t clen;
 222                struct page *page;
 223                struct zobj_header *zheader;
 224                unsigned char *user_mem, *cmem;
 225
 226                page = bvec->bv_page;
 227
 228                if (zram_test_flag(zram, index, ZRAM_ZERO)) {
 229                        handle_zero_page(page);
 230                        index++;
 231                        continue;
 232                }
 233
 234                /* Requested page is not present in compressed area */
 235                if (unlikely(!zram->table[index].page)) {
 236                        pr_debug("Read before write: sector=%lu, size=%u",
 237                                (ulong)(bio->bi_sector), bio->bi_size);
 238                        /* Do nothing */
 239                        index++;
 240                        continue;
 241                }
 242
 243                /* Page is stored uncompressed since it's incompressible */
 244                if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
 245                        handle_uncompressed_page(zram, page, index);
 246                        index++;
 247                        continue;
 248                }
 249
 250                user_mem = kmap_atomic(page, KM_USER0);
 251                clen = PAGE_SIZE;
 252
 253                cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
 254                                zram->table[index].offset;
 255
 256                ret = lzo1x_decompress_safe(
 257                        cmem + sizeof(*zheader),
 258                        xv_get_object_size(cmem) - sizeof(*zheader),
 259                        user_mem, &clen);
 260
 261                kunmap_atomic(user_mem, KM_USER0);
 262                kunmap_atomic(cmem, KM_USER1);
 263
 264                /* Should NEVER happen. Return bio error if it does. */
 265                if (unlikely(ret != LZO_E_OK)) {
 266                        pr_err("Decompression failed! err=%d, page=%u\n",
 267                                ret, index);
 268                        zram_stat64_inc(zram, &zram->stats.failed_reads);
 269                        goto out;
 270                }
 271
 272                flush_dcache_page(page);
 273                index++;
 274        }
 275
 276        set_bit(BIO_UPTODATE, &bio->bi_flags);
 277        bio_endio(bio, 0);
 278        return 0;
 279
 280out:
 281        bio_io_error(bio);
 282        return 0;
 283}
 284
 285static int zram_write(struct zram *zram, struct bio *bio)
 286{
 287        int i, ret;
 288        u32 index;
 289        struct bio_vec *bvec;
 290
 291        if (unlikely(!zram->init_done)) {
 292                ret = zram_init_device(zram);
 293                if (ret)
 294                        goto out;
 295        }
 296
 297        zram_stat64_inc(zram, &zram->stats.num_writes);
 298        index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
 299
 300        bio_for_each_segment(bvec, bio, i) {
 301                u32 offset;
 302                size_t clen;
 303                struct zobj_header *zheader;
 304                struct page *page, *page_store;
 305                unsigned char *user_mem, *cmem, *src;
 306
 307                page = bvec->bv_page;
 308                src = zram->compress_buffer;
 309
 310                /*
 311                 * System overwrites unused sectors. Free memory associated
 312                 * with this sector now.
 313                 */
 314                if (zram->table[index].page ||
 315                                zram_test_flag(zram, index, ZRAM_ZERO))
 316                        zram_free_page(zram, index);
 317
 318                mutex_lock(&zram->lock);
 319
 320                user_mem = kmap_atomic(page, KM_USER0);
 321                if (page_zero_filled(user_mem)) {
 322                        kunmap_atomic(user_mem, KM_USER0);
 323                        mutex_unlock(&zram->lock);
 324                        zram_stat_inc(&zram->stats.pages_zero);
 325                        zram_set_flag(zram, index, ZRAM_ZERO);
 326                        index++;
 327                        continue;
 328                }
 329
 330                ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
 331                                        zram->compress_workmem);
 332
 333                kunmap_atomic(user_mem, KM_USER0);
 334
 335                if (unlikely(ret != LZO_E_OK)) {
 336                        mutex_unlock(&zram->lock);
 337                        pr_err("Compression failed! err=%d\n", ret);
 338                        zram_stat64_inc(zram, &zram->stats.failed_writes);
 339                        goto out;
 340                }
 341
 342                /*
 343                 * Page is incompressible. Store it as-is (uncompressed)
 344                 * since we do not want to return too many disk write
 345                 * errors which has side effect of hanging the system.
 346                 */
 347                if (unlikely(clen > max_zpage_size)) {
 348                        clen = PAGE_SIZE;
 349                        page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
 350                        if (unlikely(!page_store)) {
 351                                mutex_unlock(&zram->lock);
 352                                pr_info("Error allocating memory for "
 353                                        "incompressible page: %u\n", index);
 354                                zram_stat64_inc(zram,
 355                                        &zram->stats.failed_writes);
 356                                goto out;
 357                        }
 358
 359                        offset = 0;
 360                        zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
 361                        zram_stat_inc(&zram->stats.pages_expand);
 362                        zram->table[index].page = page_store;
 363                        src = kmap_atomic(page, KM_USER0);
 364                        goto memstore;
 365                }
 366
 367                if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
 368                                &zram->table[index].page, &offset,
 369                                GFP_NOIO | __GFP_HIGHMEM)) {
 370                        mutex_unlock(&zram->lock);
 371                        pr_info("Error allocating memory for compressed "
 372                                "page: %u, size=%zu\n", index, clen);
 373                        zram_stat64_inc(zram, &zram->stats.failed_writes);
 374                        goto out;
 375                }
 376
 377memstore:
 378                zram->table[index].offset = offset;
 379
 380                cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
 381                                zram->table[index].offset;
 382
 383#if 0
 384                /* Back-reference needed for memory defragmentation */
 385                if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
 386                        zheader = (struct zobj_header *)cmem;
 387                        zheader->table_idx = index;
 388                        cmem += sizeof(*zheader);
 389                }
 390#endif
 391
 392                memcpy(cmem, src, clen);
 393
 394                kunmap_atomic(cmem, KM_USER1);
 395                if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
 396                        kunmap_atomic(src, KM_USER0);
 397
 398                /* Update stats */
 399                zram_stat64_add(zram, &zram->stats.compr_size, clen);
 400                zram_stat_inc(&zram->stats.pages_stored);
 401                if (clen <= PAGE_SIZE / 2)
 402                        zram_stat_inc(&zram->stats.good_compress);
 403
 404                mutex_unlock(&zram->lock);
 405                index++;
 406        }
 407
 408        set_bit(BIO_UPTODATE, &bio->bi_flags);
 409        bio_endio(bio, 0);
 410        return 0;
 411
 412out:
 413        bio_io_error(bio);
 414        return 0;
 415}
 416
 417/*
 418 * Check if request is within bounds and page aligned.
 419 */
 420static inline int valid_io_request(struct zram *zram, struct bio *bio)
 421{
 422        if (unlikely(
 423                (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
 424                (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
 425                (bio->bi_size & (PAGE_SIZE - 1)))) {
 426
 427                return 0;
 428        }
 429
 430        /* I/O request is valid */
 431        return 1;
 432}
 433
 434/*
 435 * Handler function for all zram I/O requests.
 436 */
 437static int zram_make_request(struct request_queue *queue, struct bio *bio)
 438{
 439        int ret = 0;
 440        struct zram *zram = queue->queuedata;
 441
 442        if (!valid_io_request(zram, bio)) {
 443                zram_stat64_inc(zram, &zram->stats.invalid_io);
 444                bio_io_error(bio);
 445                return 0;
 446        }
 447
 448        switch (bio_data_dir(bio)) {
 449        case READ:
 450                ret = zram_read(zram, bio);
 451                break;
 452
 453        case WRITE:
 454                ret = zram_write(zram, bio);
 455                break;
 456        }
 457
 458        return ret;
 459}
 460
 461void zram_reset_device(struct zram *zram)
 462{
 463        size_t index;
 464
 465        mutex_lock(&zram->init_lock);
 466        zram->init_done = 0;
 467
 468        /* Free various per-device buffers */
 469        kfree(zram->compress_workmem);
 470        free_pages((unsigned long)zram->compress_buffer, 1);
 471
 472        zram->compress_workmem = NULL;
 473        zram->compress_buffer = NULL;
 474
 475        /* Free all pages that are still in this zram device */
 476        for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
 477                struct page *page;
 478                u16 offset;
 479
 480                page = zram->table[index].page;
 481                offset = zram->table[index].offset;
 482
 483                if (!page)
 484                        continue;
 485
 486                if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
 487                        __free_page(page);
 488                else
 489                        xv_free(zram->mem_pool, page, offset);
 490        }
 491
 492        vfree(zram->table);
 493        zram->table = NULL;
 494
 495        xv_destroy_pool(zram->mem_pool);
 496        zram->mem_pool = NULL;
 497
 498        /* Reset stats */
 499        memset(&zram->stats, 0, sizeof(zram->stats));
 500
 501        zram->disksize = 0;
 502        mutex_unlock(&zram->init_lock);
 503}
 504
 505int zram_init_device(struct zram *zram)
 506{
 507        int ret;
 508        size_t num_pages;
 509
 510        mutex_lock(&zram->init_lock);
 511
 512        if (zram->init_done) {
 513                mutex_unlock(&zram->init_lock);
 514                return 0;
 515        }
 516
 517        zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
 518
 519        zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
 520        if (!zram->compress_workmem) {
 521                pr_err("Error allocating compressor working memory!\n");
 522                ret = -ENOMEM;
 523                goto fail;
 524        }
 525
 526        zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
 527        if (!zram->compress_buffer) {
 528                pr_err("Error allocating compressor buffer space\n");
 529                ret = -ENOMEM;
 530                goto fail;
 531        }
 532
 533        num_pages = zram->disksize >> PAGE_SHIFT;
 534        zram->table = vzalloc(num_pages * sizeof(*zram->table));
 535        if (!zram->table) {
 536                pr_err("Error allocating zram address table\n");
 537                /* To prevent accessing table entries during cleanup */
 538                zram->disksize = 0;
 539                ret = -ENOMEM;
 540                goto fail;
 541        }
 542
 543        set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
 544
 545        /* zram devices sort of resembles non-rotational disks */
 546        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
 547
 548        zram->mem_pool = xv_create_pool();
 549        if (!zram->mem_pool) {
 550                pr_err("Error creating memory pool\n");
 551                ret = -ENOMEM;
 552                goto fail;
 553        }
 554
 555        zram->init_done = 1;
 556        mutex_unlock(&zram->init_lock);
 557
 558        pr_debug("Initialization done!\n");
 559        return 0;
 560
 561fail:
 562        mutex_unlock(&zram->init_lock);
 563        zram_reset_device(zram);
 564
 565        pr_err("Initialization failed: err=%d\n", ret);
 566        return ret;
 567}
 568
 569void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
 570{
 571        struct zram *zram;
 572
 573        zram = bdev->bd_disk->private_data;
 574        zram_free_page(zram, index);
 575        zram_stat64_inc(zram, &zram->stats.notify_free);
 576}
 577
 578static const struct block_device_operations zram_devops = {
 579        .swap_slot_free_notify = zram_slot_free_notify,
 580        .owner = THIS_MODULE
 581};
 582
 583static int create_device(struct zram *zram, int device_id)
 584{
 585        int ret = 0;
 586
 587        mutex_init(&zram->lock);
 588        mutex_init(&zram->init_lock);
 589        spin_lock_init(&zram->stat64_lock);
 590
 591        zram->queue = blk_alloc_queue(GFP_KERNEL);
 592        if (!zram->queue) {
 593                pr_err("Error allocating disk queue for device %d\n",
 594                        device_id);
 595                ret = -ENOMEM;
 596                goto out;
 597        }
 598
 599        blk_queue_make_request(zram->queue, zram_make_request);
 600        zram->queue->queuedata = zram;
 601
 602         /* gendisk structure */
 603        zram->disk = alloc_disk(1);
 604        if (!zram->disk) {
 605                blk_cleanup_queue(zram->queue);
 606                pr_warning("Error allocating disk structure for device %d\n",
 607                        device_id);
 608                ret = -ENOMEM;
 609                goto out;
 610        }
 611
 612        zram->disk->major = zram_major;
 613        zram->disk->first_minor = device_id;
 614        zram->disk->fops = &zram_devops;
 615        zram->disk->queue = zram->queue;
 616        zram->disk->private_data = zram;
 617        snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
 618
 619        /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
 620        set_capacity(zram->disk, 0);
 621
 622        /*
 623         * To ensure that we always get PAGE_SIZE aligned
 624         * and n*PAGE_SIZED sized I/O requests.
 625         */
 626        blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
 627        blk_queue_logical_block_size(zram->disk->queue, PAGE_SIZE);
 628        blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
 629        blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
 630
 631        add_disk(zram->disk);
 632
 633#ifdef CONFIG_SYSFS
 634        ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
 635                                &zram_disk_attr_group);
 636        if (ret < 0) {
 637                pr_warning("Error creating sysfs group");
 638                goto out;
 639        }
 640#endif
 641
 642        zram->init_done = 0;
 643
 644out:
 645        return ret;
 646}
 647
 648static void destroy_device(struct zram *zram)
 649{
 650#ifdef CONFIG_SYSFS
 651        sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
 652                        &zram_disk_attr_group);
 653#endif
 654
 655        if (zram->disk) {
 656                del_gendisk(zram->disk);
 657                put_disk(zram->disk);
 658        }
 659
 660        if (zram->queue)
 661                blk_cleanup_queue(zram->queue);
 662}
 663
 664static int __init zram_init(void)
 665{
 666        int ret, dev_id;
 667
 668        if (num_devices > max_num_devices) {
 669                pr_warning("Invalid value for num_devices: %u\n",
 670                                num_devices);
 671                ret = -EINVAL;
 672                goto out;
 673        }
 674
 675        zram_major = register_blkdev(0, "zram");
 676        if (zram_major <= 0) {
 677                pr_warning("Unable to get major number\n");
 678                ret = -EBUSY;
 679                goto out;
 680        }
 681
 682        if (!num_devices) {
 683                pr_info("num_devices not specified. Using default: 1\n");
 684                num_devices = 1;
 685        }
 686
 687        /* Allocate the device array and initialize each one */
 688        pr_info("Creating %u devices ...\n", num_devices);
 689        devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
 690        if (!devices) {
 691                ret = -ENOMEM;
 692                goto unregister;
 693        }
 694
 695        for (dev_id = 0; dev_id < num_devices; dev_id++) {
 696                ret = create_device(&devices[dev_id], dev_id);
 697                if (ret)
 698                        goto free_devices;
 699        }
 700
 701        return 0;
 702
 703free_devices:
 704        while (dev_id)
 705                destroy_device(&devices[--dev_id]);
 706        kfree(devices);
 707unregister:
 708        unregister_blkdev(zram_major, "zram");
 709out:
 710        return ret;
 711}
 712
 713static void __exit zram_exit(void)
 714{
 715        int i;
 716        struct zram *zram;
 717
 718        for (i = 0; i < num_devices; i++) {
 719                zram = &devices[i];
 720
 721                destroy_device(zram);
 722                if (zram->init_done)
 723                        zram_reset_device(zram);
 724        }
 725
 726        unregister_blkdev(zram_major, "zram");
 727
 728        kfree(devices);
 729        pr_debug("Cleanup done!\n");
 730}
 731
 732module_param(num_devices, uint, 0);
 733MODULE_PARM_DESC(num_devices, "Number of zram devices");
 734
 735module_init(zram_init);
 736module_exit(zram_exit);
 737
 738MODULE_LICENSE("Dual BSD/GPL");
 739MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
 740MODULE_DESCRIPTION("Compressed RAM Block Device");
 741