linux/mm/zswap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * zswap.c - zswap driver file
   4 *
   5 * zswap is a backend for frontswap that takes pages that are in the process
   6 * of being swapped out and attempts to compress and store them in a
   7 * RAM-based memory pool.  This can result in a significant I/O reduction on
   8 * the swap device and, in the case where decompressing from RAM is faster
   9 * than reading from the swap device, can also improve workload performance.
  10 *
  11 * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
  12*/
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/module.h>
  17#include <linux/cpu.h>
  18#include <linux/highmem.h>
  19#include <linux/slab.h>
  20#include <linux/spinlock.h>
  21#include <linux/types.h>
  22#include <linux/atomic.h>
  23#include <linux/frontswap.h>
  24#include <linux/rbtree.h>
  25#include <linux/swap.h>
  26#include <linux/crypto.h>
  27#include <linux/scatterlist.h>
  28#include <linux/mempool.h>
  29#include <linux/zpool.h>
  30#include <crypto/acompress.h>
  31
  32#include <linux/mm_types.h>
  33#include <linux/page-flags.h>
  34#include <linux/swapops.h>
  35#include <linux/writeback.h>
  36#include <linux/pagemap.h>
  37#include <linux/workqueue.h>
  38
  39/*********************************
  40* statistics
  41**********************************/
  42/* Total bytes used by the compressed storage */
  43static u64 zswap_pool_total_size;
  44/* The number of compressed pages currently stored in zswap */
  45static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
  46/* The number of same-value filled pages currently stored in zswap */
  47static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
  48
  49/*
  50 * The statistics below are not protected from concurrent access for
  51 * performance reasons so they may not be a 100% accurate.  However,
  52 * they do provide useful information on roughly how many times a
  53 * certain event is occurring.
  54*/
  55
  56/* Pool limit was hit (see zswap_max_pool_percent) */
  57static u64 zswap_pool_limit_hit;
  58/* Pages written back when pool limit was reached */
  59static u64 zswap_written_back_pages;
  60/* Store failed due to a reclaim failure after pool limit was reached */
  61static u64 zswap_reject_reclaim_fail;
  62/* Compressed page was too big for the allocator to (optimally) store */
  63static u64 zswap_reject_compress_poor;
  64/* Store failed because underlying allocator could not get memory */
  65static u64 zswap_reject_alloc_fail;
  66/* Store failed because the entry metadata could not be allocated (rare) */
  67static u64 zswap_reject_kmemcache_fail;
  68/* Duplicate store was encountered (rare) */
  69static u64 zswap_duplicate_entry;
  70
  71/* Shrinker work queue */
  72static struct workqueue_struct *shrink_wq;
  73/* Pool limit was hit, we need to calm down */
  74static bool zswap_pool_reached_full;
  75
  76/*********************************
  77* tunables
  78**********************************/
  79
  80#define ZSWAP_PARAM_UNSET ""
  81
  82/* Enable/disable zswap */
  83static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
  84static int zswap_enabled_param_set(const char *,
  85                                   const struct kernel_param *);
  86static const struct kernel_param_ops zswap_enabled_param_ops = {
  87        .set =          zswap_enabled_param_set,
  88        .get =          param_get_bool,
  89};
  90module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
  91
  92/* Crypto compressor to use */
  93static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
  94static int zswap_compressor_param_set(const char *,
  95                                      const struct kernel_param *);
  96static const struct kernel_param_ops zswap_compressor_param_ops = {
  97        .set =          zswap_compressor_param_set,
  98        .get =          param_get_charp,
  99        .free =         param_free_charp,
 100};
 101module_param_cb(compressor, &zswap_compressor_param_ops,
 102                &zswap_compressor, 0644);
 103
 104/* Compressed storage zpool to use */
 105static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
 106static int zswap_zpool_param_set(const char *, const struct kernel_param *);
 107static const struct kernel_param_ops zswap_zpool_param_ops = {
 108        .set =          zswap_zpool_param_set,
 109        .get =          param_get_charp,
 110        .free =         param_free_charp,
 111};
 112module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
 113
 114/* The maximum percentage of memory that the compressed pool can occupy */
 115static unsigned int zswap_max_pool_percent = 20;
 116module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
 117
 118/* The threshold for accepting new pages after the max_pool_percent was hit */
 119static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
 120module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
 121                   uint, 0644);
 122
 123/* Enable/disable handling same-value filled pages (enabled by default) */
 124static bool zswap_same_filled_pages_enabled = true;
 125module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
 126                   bool, 0644);
 127
 128/*********************************
 129* data structures
 130**********************************/
 131
 132struct crypto_acomp_ctx {
 133        struct crypto_acomp *acomp;
 134        struct acomp_req *req;
 135        struct crypto_wait wait;
 136        u8 *dstmem;
 137        struct mutex *mutex;
 138};
 139
 140struct zswap_pool {
 141        struct zpool *zpool;
 142        struct crypto_acomp_ctx __percpu *acomp_ctx;
 143        struct kref kref;
 144        struct list_head list;
 145        struct work_struct release_work;
 146        struct work_struct shrink_work;
 147        struct hlist_node node;
 148        char tfm_name[CRYPTO_MAX_ALG_NAME];
 149};
 150
 151/*
 152 * struct zswap_entry
 153 *
 154 * This structure contains the metadata for tracking a single compressed
 155 * page within zswap.
 156 *
 157 * rbnode - links the entry into red-black tree for the appropriate swap type
 158 * offset - the swap offset for the entry.  Index into the red-black tree.
 159 * refcount - the number of outstanding reference to the entry. This is needed
 160 *            to protect against premature freeing of the entry by code
 161 *            concurrent calls to load, invalidate, and writeback.  The lock
 162 *            for the zswap_tree structure that contains the entry must
 163 *            be held while changing the refcount.  Since the lock must
 164 *            be held, there is no reason to also make refcount atomic.
 165 * length - the length in bytes of the compressed page data.  Needed during
 166 *          decompression. For a same value filled page length is 0.
 167 * pool - the zswap_pool the entry's data is in
 168 * handle - zpool allocation handle that stores the compressed page data
 169 * value - value of the same-value filled pages which have same content
 170 */
 171struct zswap_entry {
 172        struct rb_node rbnode;
 173        pgoff_t offset;
 174        int refcount;
 175        unsigned int length;
 176        struct zswap_pool *pool;
 177        union {
 178                unsigned long handle;
 179                unsigned long value;
 180        };
 181};
 182
 183struct zswap_header {
 184        swp_entry_t swpentry;
 185};
 186
 187/*
 188 * The tree lock in the zswap_tree struct protects a few things:
 189 * - the rbtree
 190 * - the refcount field of each entry in the tree
 191 */
 192struct zswap_tree {
 193        struct rb_root rbroot;
 194        spinlock_t lock;
 195};
 196
 197static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
 198
 199/* RCU-protected iteration */
 200static LIST_HEAD(zswap_pools);
 201/* protects zswap_pools list modification */
 202static DEFINE_SPINLOCK(zswap_pools_lock);
 203/* pool counter to provide unique names to zpool */
 204static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 205
 206/* used by param callback function */
 207static bool zswap_init_started;
 208
 209/* fatal error during init */
 210static bool zswap_init_failed;
 211
 212/* init completed, but couldn't create the initial pool */
 213static bool zswap_has_pool;
 214
 215/*********************************
 216* helpers and fwd declarations
 217**********************************/
 218
 219#define zswap_pool_debug(msg, p)                                \
 220        pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,         \
 221                 zpool_get_type((p)->zpool))
 222
 223static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
 224static int zswap_pool_get(struct zswap_pool *pool);
 225static void zswap_pool_put(struct zswap_pool *pool);
 226
 227static const struct zpool_ops zswap_zpool_ops = {
 228        .evict = zswap_writeback_entry
 229};
 230
 231static bool zswap_is_full(void)
 232{
 233        return totalram_pages() * zswap_max_pool_percent / 100 <
 234                        DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 235}
 236
 237static bool zswap_can_accept(void)
 238{
 239        return totalram_pages() * zswap_accept_thr_percent / 100 *
 240                                zswap_max_pool_percent / 100 >
 241                        DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 242}
 243
 244static void zswap_update_total_size(void)
 245{
 246        struct zswap_pool *pool;
 247        u64 total = 0;
 248
 249        rcu_read_lock();
 250
 251        list_for_each_entry_rcu(pool, &zswap_pools, list)
 252                total += zpool_get_total_size(pool->zpool);
 253
 254        rcu_read_unlock();
 255
 256        zswap_pool_total_size = total;
 257}
 258
 259/*********************************
 260* zswap entry functions
 261**********************************/
 262static struct kmem_cache *zswap_entry_cache;
 263
 264static int __init zswap_entry_cache_create(void)
 265{
 266        zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
 267        return zswap_entry_cache == NULL;
 268}
 269
 270static void __init zswap_entry_cache_destroy(void)
 271{
 272        kmem_cache_destroy(zswap_entry_cache);
 273}
 274
 275static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
 276{
 277        struct zswap_entry *entry;
 278        entry = kmem_cache_alloc(zswap_entry_cache, gfp);
 279        if (!entry)
 280                return NULL;
 281        entry->refcount = 1;
 282        RB_CLEAR_NODE(&entry->rbnode);
 283        return entry;
 284}
 285
 286static void zswap_entry_cache_free(struct zswap_entry *entry)
 287{
 288        kmem_cache_free(zswap_entry_cache, entry);
 289}
 290
 291/*********************************
 292* rbtree functions
 293**********************************/
 294static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
 295{
 296        struct rb_node *node = root->rb_node;
 297        struct zswap_entry *entry;
 298
 299        while (node) {
 300                entry = rb_entry(node, struct zswap_entry, rbnode);
 301                if (entry->offset > offset)
 302                        node = node->rb_left;
 303                else if (entry->offset < offset)
 304                        node = node->rb_right;
 305                else
 306                        return entry;
 307        }
 308        return NULL;
 309}
 310
 311/*
 312 * In the case that a entry with the same offset is found, a pointer to
 313 * the existing entry is stored in dupentry and the function returns -EEXIST
 314 */
 315static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
 316                        struct zswap_entry **dupentry)
 317{
 318        struct rb_node **link = &root->rb_node, *parent = NULL;
 319        struct zswap_entry *myentry;
 320
 321        while (*link) {
 322                parent = *link;
 323                myentry = rb_entry(parent, struct zswap_entry, rbnode);
 324                if (myentry->offset > entry->offset)
 325                        link = &(*link)->rb_left;
 326                else if (myentry->offset < entry->offset)
 327                        link = &(*link)->rb_right;
 328                else {
 329                        *dupentry = myentry;
 330                        return -EEXIST;
 331                }
 332        }
 333        rb_link_node(&entry->rbnode, parent, link);
 334        rb_insert_color(&entry->rbnode, root);
 335        return 0;
 336}
 337
 338static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
 339{
 340        if (!RB_EMPTY_NODE(&entry->rbnode)) {
 341                rb_erase(&entry->rbnode, root);
 342                RB_CLEAR_NODE(&entry->rbnode);
 343        }
 344}
 345
 346/*
 347 * Carries out the common pattern of freeing and entry's zpool allocation,
 348 * freeing the entry itself, and decrementing the number of stored pages.
 349 */
 350static void zswap_free_entry(struct zswap_entry *entry)
 351{
 352        if (!entry->length)
 353                atomic_dec(&zswap_same_filled_pages);
 354        else {
 355                zpool_free(entry->pool->zpool, entry->handle);
 356                zswap_pool_put(entry->pool);
 357        }
 358        zswap_entry_cache_free(entry);
 359        atomic_dec(&zswap_stored_pages);
 360        zswap_update_total_size();
 361}
 362
 363/* caller must hold the tree lock */
 364static void zswap_entry_get(struct zswap_entry *entry)
 365{
 366        entry->refcount++;
 367}
 368
 369/* caller must hold the tree lock
 370* remove from the tree and free it, if nobody reference the entry
 371*/
 372static void zswap_entry_put(struct zswap_tree *tree,
 373                        struct zswap_entry *entry)
 374{
 375        int refcount = --entry->refcount;
 376
 377        BUG_ON(refcount < 0);
 378        if (refcount == 0) {
 379                zswap_rb_erase(&tree->rbroot, entry);
 380                zswap_free_entry(entry);
 381        }
 382}
 383
 384/* caller must hold the tree lock */
 385static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
 386                                pgoff_t offset)
 387{
 388        struct zswap_entry *entry;
 389
 390        entry = zswap_rb_search(root, offset);
 391        if (entry)
 392                zswap_entry_get(entry);
 393
 394        return entry;
 395}
 396
 397/*********************************
 398* per-cpu code
 399**********************************/
 400static DEFINE_PER_CPU(u8 *, zswap_dstmem);
 401/*
 402 * If users dynamically change the zpool type and compressor at runtime, i.e.
 403 * zswap is running, zswap can have more than one zpool on one cpu, but they
 404 * are sharing dtsmem. So we need this mutex to be per-cpu.
 405 */
 406static DEFINE_PER_CPU(struct mutex *, zswap_mutex);
 407
 408static int zswap_dstmem_prepare(unsigned int cpu)
 409{
 410        struct mutex *mutex;
 411        u8 *dst;
 412
 413        dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
 414        if (!dst)
 415                return -ENOMEM;
 416
 417        mutex = kmalloc_node(sizeof(*mutex), GFP_KERNEL, cpu_to_node(cpu));
 418        if (!mutex) {
 419                kfree(dst);
 420                return -ENOMEM;
 421        }
 422
 423        mutex_init(mutex);
 424        per_cpu(zswap_dstmem, cpu) = dst;
 425        per_cpu(zswap_mutex, cpu) = mutex;
 426        return 0;
 427}
 428
 429static int zswap_dstmem_dead(unsigned int cpu)
 430{
 431        struct mutex *mutex;
 432        u8 *dst;
 433
 434        mutex = per_cpu(zswap_mutex, cpu);
 435        kfree(mutex);
 436        per_cpu(zswap_mutex, cpu) = NULL;
 437
 438        dst = per_cpu(zswap_dstmem, cpu);
 439        kfree(dst);
 440        per_cpu(zswap_dstmem, cpu) = NULL;
 441
 442        return 0;
 443}
 444
 445static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
 446{
 447        struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
 448        struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 449        struct crypto_acomp *acomp;
 450        struct acomp_req *req;
 451
 452        acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
 453        if (IS_ERR(acomp)) {
 454                pr_err("could not alloc crypto acomp %s : %ld\n",
 455                                pool->tfm_name, PTR_ERR(acomp));
 456                return PTR_ERR(acomp);
 457        }
 458        acomp_ctx->acomp = acomp;
 459
 460        req = acomp_request_alloc(acomp_ctx->acomp);
 461        if (!req) {
 462                pr_err("could not alloc crypto acomp_request %s\n",
 463                       pool->tfm_name);
 464                crypto_free_acomp(acomp_ctx->acomp);
 465                return -ENOMEM;
 466        }
 467        acomp_ctx->req = req;
 468
 469        crypto_init_wait(&acomp_ctx->wait);
 470        /*
 471         * if the backend of acomp is async zip, crypto_req_done() will wakeup
 472         * crypto_wait_req(); if the backend of acomp is scomp, the callback
 473         * won't be called, crypto_wait_req() will return without blocking.
 474         */
 475        acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 476                                   crypto_req_done, &acomp_ctx->wait);
 477
 478        acomp_ctx->mutex = per_cpu(zswap_mutex, cpu);
 479        acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu);
 480
 481        return 0;
 482}
 483
 484static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
 485{
 486        struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
 487        struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 488
 489        if (!IS_ERR_OR_NULL(acomp_ctx)) {
 490                if (!IS_ERR_OR_NULL(acomp_ctx->req))
 491                        acomp_request_free(acomp_ctx->req);
 492                if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
 493                        crypto_free_acomp(acomp_ctx->acomp);
 494        }
 495
 496        return 0;
 497}
 498
 499/*********************************
 500* pool functions
 501**********************************/
 502
 503static struct zswap_pool *__zswap_pool_current(void)
 504{
 505        struct zswap_pool *pool;
 506
 507        pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
 508        WARN_ONCE(!pool && zswap_has_pool,
 509                  "%s: no page storage pool!\n", __func__);
 510
 511        return pool;
 512}
 513
 514static struct zswap_pool *zswap_pool_current(void)
 515{
 516        assert_spin_locked(&zswap_pools_lock);
 517
 518        return __zswap_pool_current();
 519}
 520
 521static struct zswap_pool *zswap_pool_current_get(void)
 522{
 523        struct zswap_pool *pool;
 524
 525        rcu_read_lock();
 526
 527        pool = __zswap_pool_current();
 528        if (!zswap_pool_get(pool))
 529                pool = NULL;
 530
 531        rcu_read_unlock();
 532
 533        return pool;
 534}
 535
 536static struct zswap_pool *zswap_pool_last_get(void)
 537{
 538        struct zswap_pool *pool, *last = NULL;
 539
 540        rcu_read_lock();
 541
 542        list_for_each_entry_rcu(pool, &zswap_pools, list)
 543                last = pool;
 544        WARN_ONCE(!last && zswap_has_pool,
 545                  "%s: no page storage pool!\n", __func__);
 546        if (!zswap_pool_get(last))
 547                last = NULL;
 548
 549        rcu_read_unlock();
 550
 551        return last;
 552}
 553
 554/* type and compressor must be null-terminated */
 555static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 556{
 557        struct zswap_pool *pool;
 558
 559        assert_spin_locked(&zswap_pools_lock);
 560
 561        list_for_each_entry_rcu(pool, &zswap_pools, list) {
 562                if (strcmp(pool->tfm_name, compressor))
 563                        continue;
 564                if (strcmp(zpool_get_type(pool->zpool), type))
 565                        continue;
 566                /* if we can't get it, it's about to be destroyed */
 567                if (!zswap_pool_get(pool))
 568                        continue;
 569                return pool;
 570        }
 571
 572        return NULL;
 573}
 574
 575static void shrink_worker(struct work_struct *w)
 576{
 577        struct zswap_pool *pool = container_of(w, typeof(*pool),
 578                                                shrink_work);
 579
 580        if (zpool_shrink(pool->zpool, 1, NULL))
 581                zswap_reject_reclaim_fail++;
 582        zswap_pool_put(pool);
 583}
 584
 585static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 586{
 587        struct zswap_pool *pool;
 588        char name[38]; /* 'zswap' + 32 char (max) num + \0 */
 589        gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
 590        int ret;
 591
 592        if (!zswap_has_pool) {
 593                /* if either are unset, pool initialization failed, and we
 594                 * need both params to be set correctly before trying to
 595                 * create a pool.
 596                 */
 597                if (!strcmp(type, ZSWAP_PARAM_UNSET))
 598                        return NULL;
 599                if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
 600                        return NULL;
 601        }
 602
 603        pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 604        if (!pool)
 605                return NULL;
 606
 607        /* unique name for each pool specifically required by zsmalloc */
 608        snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
 609
 610        pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
 611        if (!pool->zpool) {
 612                pr_err("%s zpool not available\n", type);
 613                goto error;
 614        }
 615        pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
 616
 617        strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
 618
 619        pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
 620        if (!pool->acomp_ctx) {
 621                pr_err("percpu alloc failed\n");
 622                goto error;
 623        }
 624
 625        ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
 626                                       &pool->node);
 627        if (ret)
 628                goto error;
 629        pr_debug("using %s compressor\n", pool->tfm_name);
 630
 631        /* being the current pool takes 1 ref; this func expects the
 632         * caller to always add the new pool as the current pool
 633         */
 634        kref_init(&pool->kref);
 635        INIT_LIST_HEAD(&pool->list);
 636        INIT_WORK(&pool->shrink_work, shrink_worker);
 637
 638        zswap_pool_debug("created", pool);
 639
 640        return pool;
 641
 642error:
 643        if (pool->acomp_ctx)
 644                free_percpu(pool->acomp_ctx);
 645        if (pool->zpool)
 646                zpool_destroy_pool(pool->zpool);
 647        kfree(pool);
 648        return NULL;
 649}
 650
 651static __init struct zswap_pool *__zswap_pool_create_fallback(void)
 652{
 653        bool has_comp, has_zpool;
 654
 655        has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
 656        if (!has_comp && strcmp(zswap_compressor,
 657                                CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
 658                pr_err("compressor %s not available, using default %s\n",
 659                       zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
 660                param_free_charp(&zswap_compressor);
 661                zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
 662                has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
 663        }
 664        if (!has_comp) {
 665                pr_err("default compressor %s not available\n",
 666                       zswap_compressor);
 667                param_free_charp(&zswap_compressor);
 668                zswap_compressor = ZSWAP_PARAM_UNSET;
 669        }
 670
 671        has_zpool = zpool_has_pool(zswap_zpool_type);
 672        if (!has_zpool && strcmp(zswap_zpool_type,
 673                                 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
 674                pr_err("zpool %s not available, using default %s\n",
 675                       zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
 676                param_free_charp(&zswap_zpool_type);
 677                zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
 678                has_zpool = zpool_has_pool(zswap_zpool_type);
 679        }
 680        if (!has_zpool) {
 681                pr_err("default zpool %s not available\n",
 682                       zswap_zpool_type);
 683                param_free_charp(&zswap_zpool_type);
 684                zswap_zpool_type = ZSWAP_PARAM_UNSET;
 685        }
 686
 687        if (!has_comp || !has_zpool)
 688                return NULL;
 689
 690        return zswap_pool_create(zswap_zpool_type, zswap_compressor);
 691}
 692
 693static void zswap_pool_destroy(struct zswap_pool *pool)
 694{
 695        zswap_pool_debug("destroying", pool);
 696
 697        cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
 698        free_percpu(pool->acomp_ctx);
 699        zpool_destroy_pool(pool->zpool);
 700        kfree(pool);
 701}
 702
 703static int __must_check zswap_pool_get(struct zswap_pool *pool)
 704{
 705        if (!pool)
 706                return 0;
 707
 708        return kref_get_unless_zero(&pool->kref);
 709}
 710
 711static void __zswap_pool_release(struct work_struct *work)
 712{
 713        struct zswap_pool *pool = container_of(work, typeof(*pool),
 714                                                release_work);
 715
 716        synchronize_rcu();
 717
 718        /* nobody should have been able to get a kref... */
 719        WARN_ON(kref_get_unless_zero(&pool->kref));
 720
 721        /* pool is now off zswap_pools list and has no references. */
 722        zswap_pool_destroy(pool);
 723}
 724
 725static void __zswap_pool_empty(struct kref *kref)
 726{
 727        struct zswap_pool *pool;
 728
 729        pool = container_of(kref, typeof(*pool), kref);
 730
 731        spin_lock(&zswap_pools_lock);
 732
 733        WARN_ON(pool == zswap_pool_current());
 734
 735        list_del_rcu(&pool->list);
 736
 737        INIT_WORK(&pool->release_work, __zswap_pool_release);
 738        schedule_work(&pool->release_work);
 739
 740        spin_unlock(&zswap_pools_lock);
 741}
 742
 743static void zswap_pool_put(struct zswap_pool *pool)
 744{
 745        kref_put(&pool->kref, __zswap_pool_empty);
 746}
 747
 748/*********************************
 749* param callbacks
 750**********************************/
 751
 752/* val must be a null-terminated string */
 753static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 754                             char *type, char *compressor)
 755{
 756        struct zswap_pool *pool, *put_pool = NULL;
 757        char *s = strstrip((char *)val);
 758        int ret;
 759
 760        if (zswap_init_failed) {
 761                pr_err("can't set param, initialization failed\n");
 762                return -ENODEV;
 763        }
 764
 765        /* no change required */
 766        if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
 767                return 0;
 768
 769        /* if this is load-time (pre-init) param setting,
 770         * don't create a pool; that's done during init.
 771         */
 772        if (!zswap_init_started)
 773                return param_set_charp(s, kp);
 774
 775        if (!type) {
 776                if (!zpool_has_pool(s)) {
 777                        pr_err("zpool %s not available\n", s);
 778                        return -ENOENT;
 779                }
 780                type = s;
 781        } else if (!compressor) {
 782                if (!crypto_has_acomp(s, 0, 0)) {
 783                        pr_err("compressor %s not available\n", s);
 784                        return -ENOENT;
 785                }
 786                compressor = s;
 787        } else {
 788                WARN_ON(1);
 789                return -EINVAL;
 790        }
 791
 792        spin_lock(&zswap_pools_lock);
 793
 794        pool = zswap_pool_find_get(type, compressor);
 795        if (pool) {
 796                zswap_pool_debug("using existing", pool);
 797                WARN_ON(pool == zswap_pool_current());
 798                list_del_rcu(&pool->list);
 799        }
 800
 801        spin_unlock(&zswap_pools_lock);
 802
 803        if (!pool)
 804                pool = zswap_pool_create(type, compressor);
 805
 806        if (pool)
 807                ret = param_set_charp(s, kp);
 808        else
 809                ret = -EINVAL;
 810
 811        spin_lock(&zswap_pools_lock);
 812
 813        if (!ret) {
 814                put_pool = zswap_pool_current();
 815                list_add_rcu(&pool->list, &zswap_pools);
 816                zswap_has_pool = true;
 817        } else if (pool) {
 818                /* add the possibly pre-existing pool to the end of the pools
 819                 * list; if it's new (and empty) then it'll be removed and
 820                 * destroyed by the put after we drop the lock
 821                 */
 822                list_add_tail_rcu(&pool->list, &zswap_pools);
 823                put_pool = pool;
 824        }
 825
 826        spin_unlock(&zswap_pools_lock);
 827
 828        if (!zswap_has_pool && !pool) {
 829                /* if initial pool creation failed, and this pool creation also
 830                 * failed, maybe both compressor and zpool params were bad.
 831                 * Allow changing this param, so pool creation will succeed
 832                 * when the other param is changed. We already verified this
 833                 * param is ok in the zpool_has_pool() or crypto_has_acomp()
 834                 * checks above.
 835                 */
 836                ret = param_set_charp(s, kp);
 837        }
 838
 839        /* drop the ref from either the old current pool,
 840         * or the new pool we failed to add
 841         */
 842        if (put_pool)
 843                zswap_pool_put(put_pool);
 844
 845        return ret;
 846}
 847
 848static int zswap_compressor_param_set(const char *val,
 849                                      const struct kernel_param *kp)
 850{
 851        return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
 852}
 853
 854static int zswap_zpool_param_set(const char *val,
 855                                 const struct kernel_param *kp)
 856{
 857        return __zswap_param_set(val, kp, NULL, zswap_compressor);
 858}
 859
 860static int zswap_enabled_param_set(const char *val,
 861                                   const struct kernel_param *kp)
 862{
 863        if (zswap_init_failed) {
 864                pr_err("can't enable, initialization failed\n");
 865                return -ENODEV;
 866        }
 867        if (!zswap_has_pool && zswap_init_started) {
 868                pr_err("can't enable, no pool configured\n");
 869                return -ENODEV;
 870        }
 871
 872        return param_set_bool(val, kp);
 873}
 874
 875/*********************************
 876* writeback code
 877**********************************/
 878/* return enum for zswap_get_swap_cache_page */
 879enum zswap_get_swap_ret {
 880        ZSWAP_SWAPCACHE_NEW,
 881        ZSWAP_SWAPCACHE_EXIST,
 882        ZSWAP_SWAPCACHE_FAIL,
 883};
 884
 885/*
 886 * zswap_get_swap_cache_page
 887 *
 888 * This is an adaption of read_swap_cache_async()
 889 *
 890 * This function tries to find a page with the given swap entry
 891 * in the swapper_space address space (the swap cache).  If the page
 892 * is found, it is returned in retpage.  Otherwise, a page is allocated,
 893 * added to the swap cache, and returned in retpage.
 894 *
 895 * If success, the swap cache page is returned in retpage
 896 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
 897 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
 898 *     the new page is added to swapcache and locked
 899 * Returns ZSWAP_SWAPCACHE_FAIL on error
 900 */
 901static int zswap_get_swap_cache_page(swp_entry_t entry,
 902                                struct page **retpage)
 903{
 904        bool page_was_allocated;
 905
 906        *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
 907                        NULL, 0, &page_was_allocated);
 908        if (page_was_allocated)
 909                return ZSWAP_SWAPCACHE_NEW;
 910        if (!*retpage)
 911                return ZSWAP_SWAPCACHE_FAIL;
 912        return ZSWAP_SWAPCACHE_EXIST;
 913}
 914
 915/*
 916 * Attempts to free an entry by adding a page to the swap cache,
 917 * decompressing the entry data into the page, and issuing a
 918 * bio write to write the page back to the swap device.
 919 *
 920 * This can be thought of as a "resumed writeback" of the page
 921 * to the swap device.  We are basically resuming the same swap
 922 * writeback path that was intercepted with the frontswap_store()
 923 * in the first place.  After the page has been decompressed into
 924 * the swap cache, the compressed version stored by zswap can be
 925 * freed.
 926 */
 927static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
 928{
 929        struct zswap_header *zhdr;
 930        swp_entry_t swpentry;
 931        struct zswap_tree *tree;
 932        pgoff_t offset;
 933        struct zswap_entry *entry;
 934        struct page *page;
 935        struct scatterlist input, output;
 936        struct crypto_acomp_ctx *acomp_ctx;
 937
 938        u8 *src, *tmp = NULL;
 939        unsigned int dlen;
 940        int ret;
 941        struct writeback_control wbc = {
 942                .sync_mode = WB_SYNC_NONE,
 943        };
 944
 945        if (!zpool_can_sleep_mapped(pool)) {
 946                tmp = kmalloc(PAGE_SIZE, GFP_ATOMIC);
 947                if (!tmp)
 948                        return -ENOMEM;
 949        }
 950
 951        /* extract swpentry from data */
 952        zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
 953        swpentry = zhdr->swpentry; /* here */
 954        tree = zswap_trees[swp_type(swpentry)];
 955        offset = swp_offset(swpentry);
 956
 957        /* find and ref zswap entry */
 958        spin_lock(&tree->lock);
 959        entry = zswap_entry_find_get(&tree->rbroot, offset);
 960        if (!entry) {
 961                /* entry was invalidated */
 962                spin_unlock(&tree->lock);
 963                zpool_unmap_handle(pool, handle);
 964                kfree(tmp);
 965                return 0;
 966        }
 967        spin_unlock(&tree->lock);
 968        BUG_ON(offset != entry->offset);
 969
 970        src = (u8 *)zhdr + sizeof(struct zswap_header);
 971        if (!zpool_can_sleep_mapped(pool)) {
 972                memcpy(tmp, src, entry->length);
 973                src = tmp;
 974                zpool_unmap_handle(pool, handle);
 975        }
 976
 977        /* try to allocate swap cache page */
 978        switch (zswap_get_swap_cache_page(swpentry, &page)) {
 979        case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
 980                ret = -ENOMEM;
 981                goto fail;
 982
 983        case ZSWAP_SWAPCACHE_EXIST:
 984                /* page is already in the swap cache, ignore for now */
 985                put_page(page);
 986                ret = -EEXIST;
 987                goto fail;
 988
 989        case ZSWAP_SWAPCACHE_NEW: /* page is locked */
 990                /* decompress */
 991                acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
 992                dlen = PAGE_SIZE;
 993
 994                mutex_lock(acomp_ctx->mutex);
 995                sg_init_one(&input, src, entry->length);
 996                sg_init_table(&output, 1);
 997                sg_set_page(&output, page, PAGE_SIZE, 0);
 998                acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
 999                ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
1000                dlen = acomp_ctx->req->dlen;
1001                mutex_unlock(acomp_ctx->mutex);
1002
1003                BUG_ON(ret);
1004                BUG_ON(dlen != PAGE_SIZE);
1005
1006                /* page is up to date */
1007                SetPageUptodate(page);
1008        }
1009
1010        /* move it to the tail of the inactive list after end_writeback */
1011        SetPageReclaim(page);
1012
1013        /* start writeback */
1014        __swap_writepage(page, &wbc, end_swap_bio_write);
1015        put_page(page);
1016        zswap_written_back_pages++;
1017
1018        spin_lock(&tree->lock);
1019        /* drop local reference */
1020        zswap_entry_put(tree, entry);
1021
1022        /*
1023        * There are two possible situations for entry here:
1024        * (1) refcount is 1(normal case),  entry is valid and on the tree
1025        * (2) refcount is 0, entry is freed and not on the tree
1026        *     because invalidate happened during writeback
1027        *  search the tree and free the entry if find entry
1028        */
1029        if (entry == zswap_rb_search(&tree->rbroot, offset))
1030                zswap_entry_put(tree, entry);
1031        spin_unlock(&tree->lock);
1032
1033        goto end;
1034
1035        /*
1036        * if we get here due to ZSWAP_SWAPCACHE_EXIST
1037        * a load may be happening concurrently.
1038        * it is safe and okay to not free the entry.
1039        * if we free the entry in the following put
1040        * it is also okay to return !0
1041        */
1042fail:
1043        spin_lock(&tree->lock);
1044        zswap_entry_put(tree, entry);
1045        spin_unlock(&tree->lock);
1046
1047end:
1048        if (zpool_can_sleep_mapped(pool))
1049                zpool_unmap_handle(pool, handle);
1050        else
1051                kfree(tmp);
1052
1053        return ret;
1054}
1055
1056static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1057{
1058        unsigned int pos;
1059        unsigned long *page;
1060
1061        page = (unsigned long *)ptr;
1062        for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
1063                if (page[pos] != page[0])
1064                        return 0;
1065        }
1066        *value = page[0];
1067        return 1;
1068}
1069
1070static void zswap_fill_page(void *ptr, unsigned long value)
1071{
1072        unsigned long *page;
1073
1074        page = (unsigned long *)ptr;
1075        memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1076}
1077
1078/*********************************
1079* frontswap hooks
1080**********************************/
1081/* attempts to compress and store an single page */
1082static int zswap_frontswap_store(unsigned type, pgoff_t offset,
1083                                struct page *page)
1084{
1085        struct zswap_tree *tree = zswap_trees[type];
1086        struct zswap_entry *entry, *dupentry;
1087        struct scatterlist input, output;
1088        struct crypto_acomp_ctx *acomp_ctx;
1089        int ret;
1090        unsigned int hlen, dlen = PAGE_SIZE;
1091        unsigned long handle, value;
1092        char *buf;
1093        u8 *src, *dst;
1094        struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
1095        gfp_t gfp;
1096
1097        /* THP isn't supported */
1098        if (PageTransHuge(page)) {
1099                ret = -EINVAL;
1100                goto reject;
1101        }
1102
1103        if (!zswap_enabled || !tree) {
1104                ret = -ENODEV;
1105                goto reject;
1106        }
1107
1108        /* reclaim space if needed */
1109        if (zswap_is_full()) {
1110                struct zswap_pool *pool;
1111
1112                zswap_pool_limit_hit++;
1113                zswap_pool_reached_full = true;
1114                pool = zswap_pool_last_get();
1115                if (pool)
1116                        queue_work(shrink_wq, &pool->shrink_work);
1117                ret = -ENOMEM;
1118                goto reject;
1119        }
1120
1121        if (zswap_pool_reached_full) {
1122               if (!zswap_can_accept()) {
1123                        ret = -ENOMEM;
1124                        goto reject;
1125                } else
1126                        zswap_pool_reached_full = false;
1127        }
1128
1129        /* allocate entry */
1130        entry = zswap_entry_cache_alloc(GFP_KERNEL);
1131        if (!entry) {
1132                zswap_reject_kmemcache_fail++;
1133                ret = -ENOMEM;
1134                goto reject;
1135        }
1136
1137        if (zswap_same_filled_pages_enabled) {
1138                src = kmap_atomic(page);
1139                if (zswap_is_page_same_filled(src, &value)) {
1140                        kunmap_atomic(src);
1141                        entry->offset = offset;
1142                        entry->length = 0;
1143                        entry->value = value;
1144                        atomic_inc(&zswap_same_filled_pages);
1145                        goto insert_entry;
1146                }
1147                kunmap_atomic(src);
1148        }
1149
1150        /* if entry is successfully added, it keeps the reference */
1151        entry->pool = zswap_pool_current_get();
1152        if (!entry->pool) {
1153                ret = -EINVAL;
1154                goto freepage;
1155        }
1156
1157        /* compress */
1158        acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1159
1160        mutex_lock(acomp_ctx->mutex);
1161
1162        dst = acomp_ctx->dstmem;
1163        sg_init_table(&input, 1);
1164        sg_set_page(&input, page, PAGE_SIZE, 0);
1165
1166        /* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */
1167        sg_init_one(&output, dst, PAGE_SIZE * 2);
1168        acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1169        /*
1170         * it maybe looks a little bit silly that we send an asynchronous request,
1171         * then wait for its completion synchronously. This makes the process look
1172         * synchronous in fact.
1173         * Theoretically, acomp supports users send multiple acomp requests in one
1174         * acomp instance, then get those requests done simultaneously. but in this
1175         * case, frontswap actually does store and load page by page, there is no
1176         * existing method to send the second page before the first page is done
1177         * in one thread doing frontswap.
1178         * but in different threads running on different cpu, we have different
1179         * acomp instance, so multiple threads can do (de)compression in parallel.
1180         */
1181        ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1182        dlen = acomp_ctx->req->dlen;
1183
1184        if (ret) {
1185                ret = -EINVAL;
1186                goto put_dstmem;
1187        }
1188
1189        /* store */
1190        hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
1191        gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1192        if (zpool_malloc_support_movable(entry->pool->zpool))
1193                gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1194        ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
1195        if (ret == -ENOSPC) {
1196                zswap_reject_compress_poor++;
1197                goto put_dstmem;
1198        }
1199        if (ret) {
1200                zswap_reject_alloc_fail++;
1201                goto put_dstmem;
1202        }
1203        buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_WO);
1204        memcpy(buf, &zhdr, hlen);
1205        memcpy(buf + hlen, dst, dlen);
1206        zpool_unmap_handle(entry->pool->zpool, handle);
1207        mutex_unlock(acomp_ctx->mutex);
1208
1209        /* populate entry */
1210        entry->offset = offset;
1211        entry->handle = handle;
1212        entry->length = dlen;
1213
1214insert_entry:
1215        /* map */
1216        spin_lock(&tree->lock);
1217        do {
1218                ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1219                if (ret == -EEXIST) {
1220                        zswap_duplicate_entry++;
1221                        /* remove from rbtree */
1222                        zswap_rb_erase(&tree->rbroot, dupentry);
1223                        zswap_entry_put(tree, dupentry);
1224                }
1225        } while (ret == -EEXIST);
1226        spin_unlock(&tree->lock);
1227
1228        /* update stats */
1229        atomic_inc(&zswap_stored_pages);
1230        zswap_update_total_size();
1231
1232        return 0;
1233
1234put_dstmem:
1235        mutex_unlock(acomp_ctx->mutex);
1236        zswap_pool_put(entry->pool);
1237freepage:
1238        zswap_entry_cache_free(entry);
1239reject:
1240        return ret;
1241}
1242
1243/*
1244 * returns 0 if the page was successfully decompressed
1245 * return -1 on entry not found or error
1246*/
1247static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1248                                struct page *page)
1249{
1250        struct zswap_tree *tree = zswap_trees[type];
1251        struct zswap_entry *entry;
1252        struct scatterlist input, output;
1253        struct crypto_acomp_ctx *acomp_ctx;
1254        u8 *src, *dst, *tmp;
1255        unsigned int dlen;
1256        int ret;
1257
1258        /* find */
1259        spin_lock(&tree->lock);
1260        entry = zswap_entry_find_get(&tree->rbroot, offset);
1261        if (!entry) {
1262                /* entry was written back */
1263                spin_unlock(&tree->lock);
1264                return -1;
1265        }
1266        spin_unlock(&tree->lock);
1267
1268        if (!entry->length) {
1269                dst = kmap_atomic(page);
1270                zswap_fill_page(dst, entry->value);
1271                kunmap_atomic(dst);
1272                ret = 0;
1273                goto freeentry;
1274        }
1275
1276        if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
1277
1278                tmp = kmalloc(entry->length, GFP_ATOMIC);
1279                if (!tmp) {
1280                        ret = -ENOMEM;
1281                        goto freeentry;
1282                }
1283        }
1284
1285        /* decompress */
1286        dlen = PAGE_SIZE;
1287        src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
1288        if (zpool_evictable(entry->pool->zpool))
1289                src += sizeof(struct zswap_header);
1290
1291        if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
1292
1293                memcpy(tmp, src, entry->length);
1294                src = tmp;
1295
1296                zpool_unmap_handle(entry->pool->zpool, entry->handle);
1297        }
1298
1299        acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1300        mutex_lock(acomp_ctx->mutex);
1301        sg_init_one(&input, src, entry->length);
1302        sg_init_table(&output, 1);
1303        sg_set_page(&output, page, PAGE_SIZE, 0);
1304        acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
1305        ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
1306        mutex_unlock(acomp_ctx->mutex);
1307
1308        if (zpool_can_sleep_mapped(entry->pool->zpool))
1309                zpool_unmap_handle(entry->pool->zpool, entry->handle);
1310        else
1311                kfree(tmp);
1312
1313        BUG_ON(ret);
1314
1315freeentry:
1316        spin_lock(&tree->lock);
1317        zswap_entry_put(tree, entry);
1318        spin_unlock(&tree->lock);
1319
1320        return ret;
1321}
1322
1323/* frees an entry in zswap */
1324static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1325{
1326        struct zswap_tree *tree = zswap_trees[type];
1327        struct zswap_entry *entry;
1328
1329        /* find */
1330        spin_lock(&tree->lock);
1331        entry = zswap_rb_search(&tree->rbroot, offset);
1332        if (!entry) {
1333                /* entry was written back */
1334                spin_unlock(&tree->lock);
1335                return;
1336        }
1337
1338        /* remove from rbtree */
1339        zswap_rb_erase(&tree->rbroot, entry);
1340
1341        /* drop the initial reference from entry creation */
1342        zswap_entry_put(tree, entry);
1343
1344        spin_unlock(&tree->lock);
1345}
1346
1347/* frees all zswap entries for the given swap type */
1348static void zswap_frontswap_invalidate_area(unsigned type)
1349{
1350        struct zswap_tree *tree = zswap_trees[type];
1351        struct zswap_entry *entry, *n;
1352
1353        if (!tree)
1354                return;
1355
1356        /* walk the tree and free everything */
1357        spin_lock(&tree->lock);
1358        rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1359                zswap_free_entry(entry);
1360        tree->rbroot = RB_ROOT;
1361        spin_unlock(&tree->lock);
1362        kfree(tree);
1363        zswap_trees[type] = NULL;
1364}
1365
1366static void zswap_frontswap_init(unsigned type)
1367{
1368        struct zswap_tree *tree;
1369
1370        tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1371        if (!tree) {
1372                pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1373                return;
1374        }
1375
1376        tree->rbroot = RB_ROOT;
1377        spin_lock_init(&tree->lock);
1378        zswap_trees[type] = tree;
1379}
1380
1381static struct frontswap_ops zswap_frontswap_ops = {
1382        .store = zswap_frontswap_store,
1383        .load = zswap_frontswap_load,
1384        .invalidate_page = zswap_frontswap_invalidate_page,
1385        .invalidate_area = zswap_frontswap_invalidate_area,
1386        .init = zswap_frontswap_init
1387};
1388
1389/*********************************
1390* debugfs functions
1391**********************************/
1392#ifdef CONFIG_DEBUG_FS
1393#include <linux/debugfs.h>
1394
1395static struct dentry *zswap_debugfs_root;
1396
1397static int __init zswap_debugfs_init(void)
1398{
1399        if (!debugfs_initialized())
1400                return -ENODEV;
1401
1402        zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1403
1404        debugfs_create_u64("pool_limit_hit", 0444,
1405                           zswap_debugfs_root, &zswap_pool_limit_hit);
1406        debugfs_create_u64("reject_reclaim_fail", 0444,
1407                           zswap_debugfs_root, &zswap_reject_reclaim_fail);
1408        debugfs_create_u64("reject_alloc_fail", 0444,
1409                           zswap_debugfs_root, &zswap_reject_alloc_fail);
1410        debugfs_create_u64("reject_kmemcache_fail", 0444,
1411                           zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1412        debugfs_create_u64("reject_compress_poor", 0444,
1413                           zswap_debugfs_root, &zswap_reject_compress_poor);
1414        debugfs_create_u64("written_back_pages", 0444,
1415                           zswap_debugfs_root, &zswap_written_back_pages);
1416        debugfs_create_u64("duplicate_entry", 0444,
1417                           zswap_debugfs_root, &zswap_duplicate_entry);
1418        debugfs_create_u64("pool_total_size", 0444,
1419                           zswap_debugfs_root, &zswap_pool_total_size);
1420        debugfs_create_atomic_t("stored_pages", 0444,
1421                                zswap_debugfs_root, &zswap_stored_pages);
1422        debugfs_create_atomic_t("same_filled_pages", 0444,
1423                                zswap_debugfs_root, &zswap_same_filled_pages);
1424
1425        return 0;
1426}
1427#else
1428static int __init zswap_debugfs_init(void)
1429{
1430        return 0;
1431}
1432#endif
1433
1434/*********************************
1435* module init and exit
1436**********************************/
1437static int __init init_zswap(void)
1438{
1439        struct zswap_pool *pool;
1440        int ret;
1441
1442        zswap_init_started = true;
1443
1444        if (zswap_entry_cache_create()) {
1445                pr_err("entry cache creation failed\n");
1446                goto cache_fail;
1447        }
1448
1449        ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1450                                zswap_dstmem_prepare, zswap_dstmem_dead);
1451        if (ret) {
1452                pr_err("dstmem alloc failed\n");
1453                goto dstmem_fail;
1454        }
1455
1456        ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1457                                      "mm/zswap_pool:prepare",
1458                                      zswap_cpu_comp_prepare,
1459                                      zswap_cpu_comp_dead);
1460        if (ret)
1461                goto hp_fail;
1462
1463        pool = __zswap_pool_create_fallback();
1464        if (pool) {
1465                pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1466                        zpool_get_type(pool->zpool));
1467                list_add(&pool->list, &zswap_pools);
1468                zswap_has_pool = true;
1469        } else {
1470                pr_err("pool creation failed\n");
1471                zswap_enabled = false;
1472        }
1473
1474        shrink_wq = create_workqueue("zswap-shrink");
1475        if (!shrink_wq)
1476                goto fallback_fail;
1477
1478        frontswap_register_ops(&zswap_frontswap_ops);
1479        if (zswap_debugfs_init())
1480                pr_warn("debugfs initialization failed\n");
1481        return 0;
1482
1483fallback_fail:
1484        if (pool)
1485                zswap_pool_destroy(pool);
1486hp_fail:
1487        cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
1488dstmem_fail:
1489        zswap_entry_cache_destroy();
1490cache_fail:
1491        /* if built-in, we aren't unloaded on failure; don't allow use */
1492        zswap_init_failed = true;
1493        zswap_enabled = false;
1494        return -ENOMEM;
1495}
1496/* must be late so crypto has time to come up */
1497late_initcall(init_zswap);
1498
1499MODULE_LICENSE("GPL");
1500MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1501MODULE_DESCRIPTION("Compressed cache for swap pages");
1502