linux/drivers/staging/zram/zram_drv.c
<<
>>
Prefs
   1/*
   2 * Compressed RAM block device
   3 *
   4 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
   5 *
   6 * This code is released using a dual license strategy: BSD/GPL
   7 * You can choose the licence that better fits your requirements.
   8 *
   9 * Released under the terms of 3-clause BSD License
  10 * Released under the terms of GNU General Public License Version 2.0
  11 *
  12 * Project home: http://compcache.googlecode.com
  13 */
  14
  15#define KMSG_COMPONENT "zram"
  16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  17
  18#ifdef CONFIG_ZRAM_DEBUG
  19#define DEBUG
  20#endif
  21
  22#include <linux/module.h>
  23#include <linux/kernel.h>
  24#include <linux/bio.h>
  25#include <linux/bitops.h>
  26#include <linux/blkdev.h>
  27#include <linux/buffer_head.h>
  28#include <linux/device.h>
  29#include <linux/genhd.h>
  30#include <linux/highmem.h>
  31#include <linux/slab.h>
  32#include <linux/lzo.h>
  33#include <linux/string.h>
  34#include <linux/vmalloc.h>
  35
  36#include "zram_drv.h"
  37
  38/* Globals */
  39static int zram_major;
  40struct zram *zram_devices;
  41
  42/* Module params (documentation at end) */
  43static unsigned int num_devices = 1;
  44
  45static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
  46{
  47        spin_lock(&zram->stat64_lock);
  48        *v = *v + inc;
  49        spin_unlock(&zram->stat64_lock);
  50}
  51
  52static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
  53{
  54        spin_lock(&zram->stat64_lock);
  55        *v = *v - dec;
  56        spin_unlock(&zram->stat64_lock);
  57}
  58
  59static void zram_stat64_inc(struct zram *zram, u64 *v)
  60{
  61        zram_stat64_add(zram, v, 1);
  62}
  63
  64static int zram_test_flag(struct zram_meta *meta, u32 index,
  65                        enum zram_pageflags flag)
  66{
  67        return meta->table[index].flags & BIT(flag);
  68}
  69
  70static void zram_set_flag(struct zram_meta *meta, u32 index,
  71                        enum zram_pageflags flag)
  72{
  73        meta->table[index].flags |= BIT(flag);
  74}
  75
  76static void zram_clear_flag(struct zram_meta *meta, u32 index,
  77                        enum zram_pageflags flag)
  78{
  79        meta->table[index].flags &= ~BIT(flag);
  80}
  81
  82static int page_zero_filled(void *ptr)
  83{
  84        unsigned int pos;
  85        unsigned long *page;
  86
  87        page = (unsigned long *)ptr;
  88
  89        for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
  90                if (page[pos])
  91                        return 0;
  92        }
  93
  94        return 1;
  95}
  96
  97static void zram_free_page(struct zram *zram, size_t index)
  98{
  99        struct zram_meta *meta = zram->meta;
 100        unsigned long handle = meta->table[index].handle;
 101        u16 size = meta->table[index].size;
 102
 103        if (unlikely(!handle)) {
 104                /*
 105                 * No memory is allocated for zero filled pages.
 106                 * Simply clear zero page flag.
 107                 */
 108                if (zram_test_flag(meta, index, ZRAM_ZERO)) {
 109                        zram_clear_flag(meta, index, ZRAM_ZERO);
 110                        zram->stats.pages_zero--;
 111                }
 112                return;
 113        }
 114
 115        if (unlikely(size > max_zpage_size))
 116                zram->stats.bad_compress--;
 117
 118        zs_free(meta->mem_pool, handle);
 119
 120        if (size <= PAGE_SIZE / 2)
 121                zram->stats.good_compress--;
 122
 123        zram_stat64_sub(zram, &zram->stats.compr_size,
 124                        meta->table[index].size);
 125        zram->stats.pages_stored--;
 126
 127        meta->table[index].handle = 0;
 128        meta->table[index].size = 0;
 129}
 130
 131static void handle_zero_page(struct bio_vec *bvec)
 132{
 133        struct page *page = bvec->bv_page;
 134        void *user_mem;
 135
 136        user_mem = kmap_atomic(page);
 137        memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
 138        kunmap_atomic(user_mem);
 139
 140        flush_dcache_page(page);
 141}
 142
 143static inline int is_partial_io(struct bio_vec *bvec)
 144{
 145        return bvec->bv_len != PAGE_SIZE;
 146}
 147
 148static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 149{
 150        int ret = LZO_E_OK;
 151        size_t clen = PAGE_SIZE;
 152        unsigned char *cmem;
 153        struct zram_meta *meta = zram->meta;
 154        unsigned long handle = meta->table[index].handle;
 155
 156        if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
 157                memset(mem, 0, PAGE_SIZE);
 158                return 0;
 159        }
 160
 161        cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
 162        if (meta->table[index].size == PAGE_SIZE)
 163                memcpy(mem, cmem, PAGE_SIZE);
 164        else
 165                ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
 166                                                mem, &clen);
 167        zs_unmap_object(meta->mem_pool, handle);
 168
 169        /* Should NEVER happen. Return bio error if it does. */
 170        if (unlikely(ret != LZO_E_OK)) {
 171                pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
 172                zram_stat64_inc(zram, &zram->stats.failed_reads);
 173                return ret;
 174        }
 175
 176        return 0;
 177}
 178
 179static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
 180                          u32 index, int offset, struct bio *bio)
 181{
 182        int ret;
 183        struct page *page;
 184        unsigned char *user_mem, *uncmem = NULL;
 185        struct zram_meta *meta = zram->meta;
 186        page = bvec->bv_page;
 187
 188        if (unlikely(!meta->table[index].handle) ||
 189                        zram_test_flag(meta, index, ZRAM_ZERO)) {
 190                handle_zero_page(bvec);
 191                return 0;
 192        }
 193
 194        if (is_partial_io(bvec))
 195                /* Use  a temporary buffer to decompress the page */
 196                uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
 197
 198        user_mem = kmap_atomic(page);
 199        if (!is_partial_io(bvec))
 200                uncmem = user_mem;
 201
 202        if (!uncmem) {
 203                pr_info("Unable to allocate temp memory\n");
 204                ret = -ENOMEM;
 205                goto out_cleanup;
 206        }
 207
 208        ret = zram_decompress_page(zram, uncmem, index);
 209        /* Should NEVER happen. Return bio error if it does. */
 210        if (unlikely(ret != LZO_E_OK))
 211                goto out_cleanup;
 212
 213        if (is_partial_io(bvec))
 214                memcpy(user_mem + bvec->bv_offset, uncmem + offset,
 215                                bvec->bv_len);
 216
 217        flush_dcache_page(page);
 218        ret = 0;
 219out_cleanup:
 220        kunmap_atomic(user_mem);
 221        if (is_partial_io(bvec))
 222                kfree(uncmem);
 223        return ret;
 224}
 225
 226static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 227                           int offset)
 228{
 229        int ret = 0;
 230        size_t clen;
 231        unsigned long handle;
 232        struct page *page;
 233        unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
 234        struct zram_meta *meta = zram->meta;
 235
 236        page = bvec->bv_page;
 237        src = meta->compress_buffer;
 238
 239        if (is_partial_io(bvec)) {
 240                /*
 241                 * This is a partial IO. We need to read the full page
 242                 * before to write the changes.
 243                 */
 244                uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
 245                if (!uncmem) {
 246                        ret = -ENOMEM;
 247                        goto out;
 248                }
 249                ret = zram_decompress_page(zram, uncmem, index);
 250                if (ret)
 251                        goto out;
 252        }
 253
 254        /*
 255         * System overwrites unused sectors. Free memory associated
 256         * with this sector now.
 257         */
 258        if (meta->table[index].handle ||
 259            zram_test_flag(meta, index, ZRAM_ZERO))
 260                zram_free_page(zram, index);
 261
 262        user_mem = kmap_atomic(page);
 263
 264        if (is_partial_io(bvec)) {
 265                memcpy(uncmem + offset, user_mem + bvec->bv_offset,
 266                       bvec->bv_len);
 267                kunmap_atomic(user_mem);
 268                user_mem = NULL;
 269        } else {
 270                uncmem = user_mem;
 271        }
 272
 273        if (page_zero_filled(uncmem)) {
 274                kunmap_atomic(user_mem);
 275                if (is_partial_io(bvec))
 276                        kfree(uncmem);
 277                zram->stats.pages_zero++;
 278                zram_set_flag(meta, index, ZRAM_ZERO);
 279                ret = 0;
 280                goto out;
 281        }
 282
 283        ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
 284                               meta->compress_workmem);
 285
 286        if (!is_partial_io(bvec)) {
 287                kunmap_atomic(user_mem);
 288                user_mem = NULL;
 289                uncmem = NULL;
 290        }
 291
 292        if (unlikely(ret != LZO_E_OK)) {
 293                pr_err("Compression failed! err=%d\n", ret);
 294                goto out;
 295        }
 296
 297        if (unlikely(clen > max_zpage_size)) {
 298                zram->stats.bad_compress++;
 299                clen = PAGE_SIZE;
 300                src = NULL;
 301                if (is_partial_io(bvec))
 302                        src = uncmem;
 303        }
 304
 305        handle = zs_malloc(meta->mem_pool, clen);
 306        if (!handle) {
 307                pr_info("Error allocating memory for compressed "
 308                        "page: %u, size=%zu\n", index, clen);
 309                ret = -ENOMEM;
 310                goto out;
 311        }
 312        cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
 313
 314        if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
 315                src = kmap_atomic(page);
 316        memcpy(cmem, src, clen);
 317        if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
 318                kunmap_atomic(src);
 319
 320        zs_unmap_object(meta->mem_pool, handle);
 321
 322        meta->table[index].handle = handle;
 323        meta->table[index].size = clen;
 324
 325        /* Update stats */
 326        zram_stat64_add(zram, &zram->stats.compr_size, clen);
 327        zram->stats.pages_stored++;
 328        if (clen <= PAGE_SIZE / 2)
 329                zram->stats.good_compress++;
 330
 331out:
 332        if (is_partial_io(bvec))
 333                kfree(uncmem);
 334
 335        if (ret)
 336                zram_stat64_inc(zram, &zram->stats.failed_writes);
 337        return ret;
 338}
 339
 340static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
 341                        int offset, struct bio *bio, int rw)
 342{
 343        int ret;
 344
 345        if (rw == READ) {
 346                down_read(&zram->lock);
 347                ret = zram_bvec_read(zram, bvec, index, offset, bio);
 348                up_read(&zram->lock);
 349        } else {
 350                down_write(&zram->lock);
 351                ret = zram_bvec_write(zram, bvec, index, offset);
 352                up_write(&zram->lock);
 353        }
 354
 355        return ret;
 356}
 357
 358static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
 359{
 360        if (*offset + bvec->bv_len >= PAGE_SIZE)
 361                (*index)++;
 362        *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
 363}
 364
 365static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
 366{
 367        int i, offset;
 368        u32 index;
 369        struct bio_vec *bvec;
 370
 371        switch (rw) {
 372        case READ:
 373                zram_stat64_inc(zram, &zram->stats.num_reads);
 374                break;
 375        case WRITE:
 376                zram_stat64_inc(zram, &zram->stats.num_writes);
 377                break;
 378        }
 379
 380        index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
 381        offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
 382
 383        bio_for_each_segment(bvec, bio, i) {
 384                int max_transfer_size = PAGE_SIZE - offset;
 385
 386                if (bvec->bv_len > max_transfer_size) {
 387                        /*
 388                         * zram_bvec_rw() can only make operation on a single
 389                         * zram page. Split the bio vector.
 390                         */
 391                        struct bio_vec bv;
 392
 393                        bv.bv_page = bvec->bv_page;
 394                        bv.bv_len = max_transfer_size;
 395                        bv.bv_offset = bvec->bv_offset;
 396
 397                        if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
 398                                goto out;
 399
 400                        bv.bv_len = bvec->bv_len - max_transfer_size;
 401                        bv.bv_offset += max_transfer_size;
 402                        if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
 403                                goto out;
 404                } else
 405                        if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
 406                            < 0)
 407                                goto out;
 408
 409                update_position(&index, &offset, bvec);
 410        }
 411
 412        set_bit(BIO_UPTODATE, &bio->bi_flags);
 413        bio_endio(bio, 0);
 414        return;
 415
 416out:
 417        bio_io_error(bio);
 418}
 419
 420/*
 421 * Check if request is within bounds and aligned on zram logical blocks.
 422 */
 423static inline int valid_io_request(struct zram *zram, struct bio *bio)
 424{
 425        if (unlikely(
 426                (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
 427                (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
 428                (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
 429
 430                return 0;
 431        }
 432
 433        /* I/O request is valid */
 434        return 1;
 435}
 436
 437/*
 438 * Handler function for all zram I/O requests.
 439 */
 440static void zram_make_request(struct request_queue *queue, struct bio *bio)
 441{
 442        struct zram *zram = queue->queuedata;
 443
 444        down_read(&zram->init_lock);
 445        if (unlikely(!zram->init_done))
 446                goto error;
 447
 448        if (!valid_io_request(zram, bio)) {
 449                zram_stat64_inc(zram, &zram->stats.invalid_io);
 450                goto error;
 451        }
 452
 453        __zram_make_request(zram, bio, bio_data_dir(bio));
 454        up_read(&zram->init_lock);
 455
 456        return;
 457
 458error:
 459        up_read(&zram->init_lock);
 460        bio_io_error(bio);
 461}
 462
 463static void __zram_reset_device(struct zram *zram)
 464{
 465        size_t index;
 466        struct zram_meta *meta;
 467
 468        if (!zram->init_done)
 469                return;
 470
 471        meta = zram->meta;
 472        zram->init_done = 0;
 473
 474        /* Free all pages that are still in this zram device */
 475        for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
 476                unsigned long handle = meta->table[index].handle;
 477                if (!handle)
 478                        continue;
 479
 480                zs_free(meta->mem_pool, handle);
 481        }
 482
 483        zram_meta_free(zram->meta);
 484        zram->meta = NULL;
 485        /* Reset stats */
 486        memset(&zram->stats, 0, sizeof(zram->stats));
 487
 488        zram->disksize = 0;
 489        set_capacity(zram->disk, 0);
 490}
 491
 492void zram_reset_device(struct zram *zram)
 493{
 494        down_write(&zram->init_lock);
 495        __zram_reset_device(zram);
 496        up_write(&zram->init_lock);
 497}
 498
 499void zram_meta_free(struct zram_meta *meta)
 500{
 501        zs_destroy_pool(meta->mem_pool);
 502        kfree(meta->compress_workmem);
 503        free_pages((unsigned long)meta->compress_buffer, 1);
 504        vfree(meta->table);
 505        kfree(meta);
 506}
 507
 508struct zram_meta *zram_meta_alloc(u64 disksize)
 509{
 510        size_t num_pages;
 511        struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
 512        if (!meta)
 513                goto out;
 514
 515        meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
 516        if (!meta->compress_workmem)
 517                goto free_meta;
 518
 519        meta->compress_buffer =
 520                (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
 521        if (!meta->compress_buffer) {
 522                pr_err("Error allocating compressor buffer space\n");
 523                goto free_workmem;
 524        }
 525
 526        num_pages = disksize >> PAGE_SHIFT;
 527        meta->table = vzalloc(num_pages * sizeof(*meta->table));
 528        if (!meta->table) {
 529                pr_err("Error allocating zram address table\n");
 530                goto free_buffer;
 531        }
 532
 533        meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
 534        if (!meta->mem_pool) {
 535                pr_err("Error creating memory pool\n");
 536                goto free_table;
 537        }
 538
 539        return meta;
 540
 541free_table:
 542        vfree(meta->table);
 543free_buffer:
 544        free_pages((unsigned long)meta->compress_buffer, 1);
 545free_workmem:
 546        kfree(meta->compress_workmem);
 547free_meta:
 548        kfree(meta);
 549        meta = NULL;
 550out:
 551        return meta;
 552}
 553
 554void zram_init_device(struct zram *zram, struct zram_meta *meta)
 555{
 556        if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
 557                pr_info(
 558                "There is little point creating a zram of greater than "
 559                "twice the size of memory since we expect a 2:1 compression "
 560                "ratio. Note that zram uses about 0.1%% of the size of "
 561                "the disk when not in use so a huge zram is "
 562                "wasteful.\n"
 563                "\tMemory Size: %lu kB\n"
 564                "\tSize you selected: %llu kB\n"
 565                "Continuing anyway ...\n",
 566                (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
 567                );
 568        }
 569
 570        /* zram devices sort of resembles non-rotational disks */
 571        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
 572
 573        zram->meta = meta;
 574        zram->init_done = 1;
 575
 576        pr_debug("Initialization done!\n");
 577}
 578
 579static void zram_slot_free_notify(struct block_device *bdev,
 580                                unsigned long index)
 581{
 582        struct zram *zram;
 583
 584        zram = bdev->bd_disk->private_data;
 585        zram_free_page(zram, index);
 586        zram_stat64_inc(zram, &zram->stats.notify_free);
 587}
 588
 589static const struct block_device_operations zram_devops = {
 590        .swap_slot_free_notify = zram_slot_free_notify,
 591        .owner = THIS_MODULE
 592};
 593
 594static int create_device(struct zram *zram, int device_id)
 595{
 596        int ret = 0;
 597
 598        init_rwsem(&zram->lock);
 599        init_rwsem(&zram->init_lock);
 600        spin_lock_init(&zram->stat64_lock);
 601
 602        zram->queue = blk_alloc_queue(GFP_KERNEL);
 603        if (!zram->queue) {
 604                pr_err("Error allocating disk queue for device %d\n",
 605                        device_id);
 606                ret = -ENOMEM;
 607                goto out;
 608        }
 609
 610        blk_queue_make_request(zram->queue, zram_make_request);
 611        zram->queue->queuedata = zram;
 612
 613         /* gendisk structure */
 614        zram->disk = alloc_disk(1);
 615        if (!zram->disk) {
 616                blk_cleanup_queue(zram->queue);
 617                pr_warn("Error allocating disk structure for device %d\n",
 618                        device_id);
 619                ret = -ENOMEM;
 620                goto out;
 621        }
 622
 623        zram->disk->major = zram_major;
 624        zram->disk->first_minor = device_id;
 625        zram->disk->fops = &zram_devops;
 626        zram->disk->queue = zram->queue;
 627        zram->disk->private_data = zram;
 628        snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
 629
 630        /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
 631        set_capacity(zram->disk, 0);
 632
 633        /*
 634         * To ensure that we always get PAGE_SIZE aligned
 635         * and n*PAGE_SIZED sized I/O requests.
 636         */
 637        blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
 638        blk_queue_logical_block_size(zram->disk->queue,
 639                                        ZRAM_LOGICAL_BLOCK_SIZE);
 640        blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
 641        blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
 642
 643        add_disk(zram->disk);
 644
 645        ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
 646                                &zram_disk_attr_group);
 647        if (ret < 0) {
 648                pr_warn("Error creating sysfs group");
 649                goto out;
 650        }
 651
 652        zram->init_done = 0;
 653
 654out:
 655        return ret;
 656}
 657
 658static void destroy_device(struct zram *zram)
 659{
 660        sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
 661                        &zram_disk_attr_group);
 662
 663        if (zram->disk) {
 664                del_gendisk(zram->disk);
 665                put_disk(zram->disk);
 666        }
 667
 668        if (zram->queue)
 669                blk_cleanup_queue(zram->queue);
 670}
 671
 672unsigned int zram_get_num_devices(void)
 673{
 674        return num_devices;
 675}
 676
 677static int __init zram_init(void)
 678{
 679        int ret, dev_id;
 680
 681        if (num_devices > max_num_devices) {
 682                pr_warn("Invalid value for num_devices: %u\n",
 683                                num_devices);
 684                ret = -EINVAL;
 685                goto out;
 686        }
 687
 688        zram_major = register_blkdev(0, "zram");
 689        if (zram_major <= 0) {
 690                pr_warn("Unable to get major number\n");
 691                ret = -EBUSY;
 692                goto out;
 693        }
 694
 695        /* Allocate the device array and initialize each one */
 696        zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
 697        if (!zram_devices) {
 698                ret = -ENOMEM;
 699                goto unregister;
 700        }
 701
 702        for (dev_id = 0; dev_id < num_devices; dev_id++) {
 703                ret = create_device(&zram_devices[dev_id], dev_id);
 704                if (ret)
 705                        goto free_devices;
 706        }
 707
 708        pr_info("Created %u device(s) ...\n", num_devices);
 709
 710        return 0;
 711
 712free_devices:
 713        while (dev_id)
 714                destroy_device(&zram_devices[--dev_id]);
 715        kfree(zram_devices);
 716unregister:
 717        unregister_blkdev(zram_major, "zram");
 718out:
 719        return ret;
 720}
 721
 722static void __exit zram_exit(void)
 723{
 724        int i;
 725        struct zram *zram;
 726
 727        for (i = 0; i < num_devices; i++) {
 728                zram = &zram_devices[i];
 729
 730                destroy_device(zram);
 731                zram_reset_device(zram);
 732        }
 733
 734        unregister_blkdev(zram_major, "zram");
 735
 736        kfree(zram_devices);
 737        pr_debug("Cleanup done!\n");
 738}
 739
 740module_param(num_devices, uint, 0);
 741MODULE_PARM_DESC(num_devices, "Number of zram devices");
 742
 743module_init(zram_init);
 744module_exit(zram_exit);
 745
 746MODULE_LICENSE("Dual BSD/GPL");
 747MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
 748MODULE_DESCRIPTION("Compressed RAM Block Device");
 749