linux/arch/powerpc/platforms/pseries/cmm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Collaborative memory management interface.
   4 *
   5 * Copyright (C) 2008 IBM Corporation
   6 * Author(s): Brian King (brking@linux.vnet.ibm.com),
   7 */
   8
   9#include <linux/ctype.h>
  10#include <linux/delay.h>
  11#include <linux/errno.h>
  12#include <linux/fs.h>
  13#include <linux/gfp.h>
  14#include <linux/kthread.h>
  15#include <linux/module.h>
  16#include <linux/oom.h>
  17#include <linux/reboot.h>
  18#include <linux/sched.h>
  19#include <linux/stringify.h>
  20#include <linux/swap.h>
  21#include <linux/device.h>
  22#include <linux/mount.h>
  23#include <linux/pseudo_fs.h>
  24#include <linux/magic.h>
  25#include <linux/balloon_compaction.h>
  26#include <asm/firmware.h>
  27#include <asm/hvcall.h>
  28#include <asm/mmu.h>
  29#include <linux/uaccess.h>
  30#include <linux/memory.h>
  31#include <asm/plpar_wrappers.h>
  32
  33#include "pseries.h"
  34
  35#define CMM_DRIVER_VERSION      "1.0.0"
  36#define CMM_DEFAULT_DELAY       1
  37#define CMM_HOTPLUG_DELAY       5
  38#define CMM_DEBUG                       0
  39#define CMM_DISABLE             0
  40#define CMM_OOM_KB              1024
  41#define CMM_MIN_MEM_MB          256
  42#define KB2PAGES(_p)            ((_p)>>(PAGE_SHIFT-10))
  43#define PAGES2KB(_p)            ((_p)<<(PAGE_SHIFT-10))
  44
  45#define CMM_MEM_HOTPLUG_PRI     1
  46
  47static unsigned int delay = CMM_DEFAULT_DELAY;
  48static unsigned int hotplug_delay = CMM_HOTPLUG_DELAY;
  49static unsigned int oom_kb = CMM_OOM_KB;
  50static unsigned int cmm_debug = CMM_DEBUG;
  51static unsigned int cmm_disabled = CMM_DISABLE;
  52static unsigned long min_mem_mb = CMM_MIN_MEM_MB;
  53static bool __read_mostly simulate;
  54static unsigned long simulate_loan_target_kb;
  55static struct device cmm_dev;
  56
  57MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
  58MODULE_DESCRIPTION("IBM System p Collaborative Memory Manager");
  59MODULE_LICENSE("GPL");
  60MODULE_VERSION(CMM_DRIVER_VERSION);
  61
  62module_param_named(delay, delay, uint, 0644);
  63MODULE_PARM_DESC(delay, "Delay (in seconds) between polls to query hypervisor paging requests. "
  64                 "[Default=" __stringify(CMM_DEFAULT_DELAY) "]");
  65module_param_named(hotplug_delay, hotplug_delay, uint, 0644);
  66MODULE_PARM_DESC(hotplug_delay, "Delay (in seconds) after memory hotplug remove "
  67                 "before loaning resumes. "
  68                 "[Default=" __stringify(CMM_HOTPLUG_DELAY) "]");
  69module_param_named(oom_kb, oom_kb, uint, 0644);
  70MODULE_PARM_DESC(oom_kb, "Amount of memory in kb to free on OOM. "
  71                 "[Default=" __stringify(CMM_OOM_KB) "]");
  72module_param_named(min_mem_mb, min_mem_mb, ulong, 0644);
  73MODULE_PARM_DESC(min_mem_mb, "Minimum amount of memory (in MB) to not balloon. "
  74                 "[Default=" __stringify(CMM_MIN_MEM_MB) "]");
  75module_param_named(debug, cmm_debug, uint, 0644);
  76MODULE_PARM_DESC(debug, "Enable module debugging logging. Set to 1 to enable. "
  77                 "[Default=" __stringify(CMM_DEBUG) "]");
  78module_param_named(simulate, simulate, bool, 0444);
  79MODULE_PARM_DESC(simulate, "Enable simulation mode (no communication with hw).");
  80
  81#define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); }
  82
  83static atomic_long_t loaned_pages;
  84static unsigned long loaned_pages_target;
  85static unsigned long oom_freed_pages;
  86
  87static DEFINE_MUTEX(hotplug_mutex);
  88static int hotplug_occurred; /* protected by the hotplug mutex */
  89
  90static struct task_struct *cmm_thread_ptr;
  91static struct balloon_dev_info b_dev_info;
  92
  93static long plpar_page_set_loaned(struct page *page)
  94{
  95        const unsigned long vpa = page_to_phys(page);
  96        unsigned long cmo_page_sz = cmo_get_page_size();
  97        long rc = 0;
  98        int i;
  99
 100        if (unlikely(simulate))
 101                return 0;
 102
 103        for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
 104                rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
 105
 106        for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
 107                plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
 108                                   vpa + i - cmo_page_sz, 0);
 109
 110        return rc;
 111}
 112
 113static long plpar_page_set_active(struct page *page)
 114{
 115        const unsigned long vpa = page_to_phys(page);
 116        unsigned long cmo_page_sz = cmo_get_page_size();
 117        long rc = 0;
 118        int i;
 119
 120        if (unlikely(simulate))
 121                return 0;
 122
 123        for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
 124                rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
 125
 126        for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
 127                plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
 128                                   vpa + i - cmo_page_sz, 0);
 129
 130        return rc;
 131}
 132
 133/**
 134 * cmm_alloc_pages - Allocate pages and mark them as loaned
 135 * @nr: number of pages to allocate
 136 *
 137 * Return value:
 138 *      number of pages requested to be allocated which were not
 139 **/
 140static long cmm_alloc_pages(long nr)
 141{
 142        struct page *page;
 143        long rc;
 144
 145        cmm_dbg("Begin request for %ld pages\n", nr);
 146
 147        while (nr) {
 148                /* Exit if a hotplug operation is in progress or occurred */
 149                if (mutex_trylock(&hotplug_mutex)) {
 150                        if (hotplug_occurred) {
 151                                mutex_unlock(&hotplug_mutex);
 152                                break;
 153                        }
 154                        mutex_unlock(&hotplug_mutex);
 155                } else {
 156                        break;
 157                }
 158
 159                page = balloon_page_alloc();
 160                if (!page)
 161                        break;
 162                rc = plpar_page_set_loaned(page);
 163                if (rc) {
 164                        pr_err("%s: Can not set page to loaned. rc=%ld\n", __func__, rc);
 165                        __free_page(page);
 166                        break;
 167                }
 168
 169                balloon_page_enqueue(&b_dev_info, page);
 170                atomic_long_inc(&loaned_pages);
 171                adjust_managed_page_count(page, -1);
 172                nr--;
 173        }
 174
 175        cmm_dbg("End request with %ld pages unfulfilled\n", nr);
 176        return nr;
 177}
 178
 179/**
 180 * cmm_free_pages - Free pages and mark them as active
 181 * @nr: number of pages to free
 182 *
 183 * Return value:
 184 *      number of pages requested to be freed which were not
 185 **/
 186static long cmm_free_pages(long nr)
 187{
 188        struct page *page;
 189
 190        cmm_dbg("Begin free of %ld pages.\n", nr);
 191        while (nr) {
 192                page = balloon_page_dequeue(&b_dev_info);
 193                if (!page)
 194                        break;
 195                plpar_page_set_active(page);
 196                adjust_managed_page_count(page, 1);
 197                __free_page(page);
 198                atomic_long_dec(&loaned_pages);
 199                nr--;
 200        }
 201        cmm_dbg("End request with %ld pages unfulfilled\n", nr);
 202        return nr;
 203}
 204
 205/**
 206 * cmm_oom_notify - OOM notifier
 207 * @self:       notifier block struct
 208 * @dummy:      not used
 209 * @parm:       returned - number of pages freed
 210 *
 211 * Return value:
 212 *      NOTIFY_OK
 213 **/
 214static int cmm_oom_notify(struct notifier_block *self,
 215                          unsigned long dummy, void *parm)
 216{
 217        unsigned long *freed = parm;
 218        long nr = KB2PAGES(oom_kb);
 219
 220        cmm_dbg("OOM processing started\n");
 221        nr = cmm_free_pages(nr);
 222        loaned_pages_target = atomic_long_read(&loaned_pages);
 223        *freed += KB2PAGES(oom_kb) - nr;
 224        oom_freed_pages += KB2PAGES(oom_kb) - nr;
 225        cmm_dbg("OOM processing complete\n");
 226        return NOTIFY_OK;
 227}
 228
 229/**
 230 * cmm_get_mpp - Read memory performance parameters
 231 *
 232 * Makes hcall to query the current page loan request from the hypervisor.
 233 *
 234 * Return value:
 235 *      nothing
 236 **/
 237static void cmm_get_mpp(void)
 238{
 239        const long __loaned_pages = atomic_long_read(&loaned_pages);
 240        const long total_pages = totalram_pages() + __loaned_pages;
 241        int rc;
 242        struct hvcall_mpp_data mpp_data;
 243        signed long active_pages_target, page_loan_request, target;
 244        signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
 245
 246        if (likely(!simulate)) {
 247                rc = h_get_mpp(&mpp_data);
 248                if (rc != H_SUCCESS)
 249                        return;
 250                page_loan_request = div_s64((s64)mpp_data.loan_request,
 251                                            PAGE_SIZE);
 252                target = page_loan_request + __loaned_pages;
 253        } else {
 254                target = KB2PAGES(simulate_loan_target_kb);
 255                page_loan_request = target - __loaned_pages;
 256        }
 257
 258        if (target < 0 || total_pages < min_mem_pages)
 259                target = 0;
 260
 261        if (target > oom_freed_pages)
 262                target -= oom_freed_pages;
 263        else
 264                target = 0;
 265
 266        active_pages_target = total_pages - target;
 267
 268        if (min_mem_pages > active_pages_target)
 269                target = total_pages - min_mem_pages;
 270
 271        if (target < 0)
 272                target = 0;
 273
 274        loaned_pages_target = target;
 275
 276        cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
 277                page_loan_request, __loaned_pages, loaned_pages_target,
 278                oom_freed_pages, totalram_pages());
 279}
 280
 281static struct notifier_block cmm_oom_nb = {
 282        .notifier_call = cmm_oom_notify
 283};
 284
 285/**
 286 * cmm_thread - CMM task thread
 287 * @dummy:      not used
 288 *
 289 * Return value:
 290 *      0
 291 **/
 292static int cmm_thread(void *dummy)
 293{
 294        unsigned long timeleft;
 295        long __loaned_pages;
 296
 297        while (1) {
 298                timeleft = msleep_interruptible(delay * 1000);
 299
 300                if (kthread_should_stop() || timeleft)
 301                        break;
 302
 303                if (mutex_trylock(&hotplug_mutex)) {
 304                        if (hotplug_occurred) {
 305                                hotplug_occurred = 0;
 306                                mutex_unlock(&hotplug_mutex);
 307                                cmm_dbg("Hotplug operation has occurred, "
 308                                                "loaning activity suspended "
 309                                                "for %d seconds.\n",
 310                                                hotplug_delay);
 311                                timeleft = msleep_interruptible(hotplug_delay *
 312                                                1000);
 313                                if (kthread_should_stop() || timeleft)
 314                                        break;
 315                                continue;
 316                        }
 317                        mutex_unlock(&hotplug_mutex);
 318                } else {
 319                        cmm_dbg("Hotplug operation in progress, activity "
 320                                        "suspended\n");
 321                        continue;
 322                }
 323
 324                cmm_get_mpp();
 325
 326                __loaned_pages = atomic_long_read(&loaned_pages);
 327                if (loaned_pages_target > __loaned_pages) {
 328                        if (cmm_alloc_pages(loaned_pages_target - __loaned_pages))
 329                                loaned_pages_target = __loaned_pages;
 330                } else if (loaned_pages_target < __loaned_pages)
 331                        cmm_free_pages(__loaned_pages - loaned_pages_target);
 332        }
 333        return 0;
 334}
 335
 336#define CMM_SHOW(name, format, args...)                 \
 337        static ssize_t show_##name(struct device *dev,  \
 338                                   struct device_attribute *attr,       \
 339                                   char *buf)                   \
 340        {                                                       \
 341                return sprintf(buf, format, ##args);            \
 342        }                                                       \
 343        static DEVICE_ATTR(name, 0444, show_##name, NULL)
 344
 345CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(atomic_long_read(&loaned_pages)));
 346CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
 347
 348static ssize_t show_oom_pages(struct device *dev,
 349                              struct device_attribute *attr, char *buf)
 350{
 351        return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
 352}
 353
 354static ssize_t store_oom_pages(struct device *dev,
 355                               struct device_attribute *attr,
 356                               const char *buf, size_t count)
 357{
 358        unsigned long val = simple_strtoul (buf, NULL, 10);
 359
 360        if (!capable(CAP_SYS_ADMIN))
 361                return -EPERM;
 362        if (val != 0)
 363                return -EBADMSG;
 364
 365        oom_freed_pages = 0;
 366        return count;
 367}
 368
 369static DEVICE_ATTR(oom_freed_kb, 0644,
 370                   show_oom_pages, store_oom_pages);
 371
 372static struct device_attribute *cmm_attrs[] = {
 373        &dev_attr_loaned_kb,
 374        &dev_attr_loaned_target_kb,
 375        &dev_attr_oom_freed_kb,
 376};
 377
 378static DEVICE_ULONG_ATTR(simulate_loan_target_kb, 0644,
 379                         simulate_loan_target_kb);
 380
 381static struct bus_type cmm_subsys = {
 382        .name = "cmm",
 383        .dev_name = "cmm",
 384};
 385
 386static void cmm_release_device(struct device *dev)
 387{
 388}
 389
 390/**
 391 * cmm_sysfs_register - Register with sysfs
 392 *
 393 * Return value:
 394 *      0 on success / other on failure
 395 **/
 396static int cmm_sysfs_register(struct device *dev)
 397{
 398        int i, rc;
 399
 400        if ((rc = subsys_system_register(&cmm_subsys, NULL)))
 401                return rc;
 402
 403        dev->id = 0;
 404        dev->bus = &cmm_subsys;
 405        dev->release = cmm_release_device;
 406
 407        if ((rc = device_register(dev)))
 408                goto subsys_unregister;
 409
 410        for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++) {
 411                if ((rc = device_create_file(dev, cmm_attrs[i])))
 412                        goto fail;
 413        }
 414
 415        if (!simulate)
 416                return 0;
 417        rc = device_create_file(dev, &dev_attr_simulate_loan_target_kb.attr);
 418        if (rc)
 419                goto fail;
 420        return 0;
 421
 422fail:
 423        while (--i >= 0)
 424                device_remove_file(dev, cmm_attrs[i]);
 425        device_unregister(dev);
 426subsys_unregister:
 427        bus_unregister(&cmm_subsys);
 428        return rc;
 429}
 430
 431/**
 432 * cmm_unregister_sysfs - Unregister from sysfs
 433 *
 434 **/
 435static void cmm_unregister_sysfs(struct device *dev)
 436{
 437        int i;
 438
 439        for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++)
 440                device_remove_file(dev, cmm_attrs[i]);
 441        device_unregister(dev);
 442        bus_unregister(&cmm_subsys);
 443}
 444
 445/**
 446 * cmm_reboot_notifier - Make sure pages are not still marked as "loaned"
 447 *
 448 **/
 449static int cmm_reboot_notifier(struct notifier_block *nb,
 450                               unsigned long action, void *unused)
 451{
 452        if (action == SYS_RESTART) {
 453                if (cmm_thread_ptr)
 454                        kthread_stop(cmm_thread_ptr);
 455                cmm_thread_ptr = NULL;
 456                cmm_free_pages(atomic_long_read(&loaned_pages));
 457        }
 458        return NOTIFY_DONE;
 459}
 460
 461static struct notifier_block cmm_reboot_nb = {
 462        .notifier_call = cmm_reboot_notifier,
 463};
 464
 465/**
 466 * cmm_memory_cb - Handle memory hotplug notifier calls
 467 * @self:       notifier block struct
 468 * @action:     action to take
 469 * @arg:        struct memory_notify data for handler
 470 *
 471 * Return value:
 472 *      NOTIFY_OK or notifier error based on subfunction return value
 473 *
 474 **/
 475static int cmm_memory_cb(struct notifier_block *self,
 476                        unsigned long action, void *arg)
 477{
 478        int ret = 0;
 479
 480        switch (action) {
 481        case MEM_GOING_OFFLINE:
 482                mutex_lock(&hotplug_mutex);
 483                hotplug_occurred = 1;
 484                break;
 485        case MEM_OFFLINE:
 486        case MEM_CANCEL_OFFLINE:
 487                mutex_unlock(&hotplug_mutex);
 488                cmm_dbg("Memory offline operation complete.\n");
 489                break;
 490        case MEM_GOING_ONLINE:
 491        case MEM_ONLINE:
 492        case MEM_CANCEL_ONLINE:
 493                break;
 494        }
 495
 496        return notifier_from_errno(ret);
 497}
 498
 499static struct notifier_block cmm_mem_nb = {
 500        .notifier_call = cmm_memory_cb,
 501        .priority = CMM_MEM_HOTPLUG_PRI
 502};
 503
 504#ifdef CONFIG_BALLOON_COMPACTION
 505static struct vfsmount *balloon_mnt;
 506
 507static int cmm_init_fs_context(struct fs_context *fc)
 508{
 509        return init_pseudo(fc, PPC_CMM_MAGIC) ? 0 : -ENOMEM;
 510}
 511
 512static struct file_system_type balloon_fs = {
 513        .name = "ppc-cmm",
 514        .init_fs_context = cmm_init_fs_context,
 515        .kill_sb = kill_anon_super,
 516};
 517
 518static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
 519                           struct page *newpage, struct page *page,
 520                           enum migrate_mode mode)
 521{
 522        unsigned long flags;
 523
 524        /*
 525         * loan/"inflate" the newpage first.
 526         *
 527         * We might race against the cmm_thread who might discover after our
 528         * loan request that another page is to be unloaned. However, once
 529         * the cmm_thread runs again later, this error will automatically
 530         * be corrected.
 531         */
 532        if (plpar_page_set_loaned(newpage)) {
 533                /* Unlikely, but possible. Tell the caller not to retry now. */
 534                pr_err_ratelimited("%s: Cannot set page to loaned.", __func__);
 535                return -EBUSY;
 536        }
 537
 538        /* balloon page list reference */
 539        get_page(newpage);
 540
 541        /*
 542         * When we migrate a page to a different zone, we have to fixup the
 543         * count of both involved zones as we adjusted the managed page count
 544         * when inflating.
 545         */
 546        if (page_zone(page) != page_zone(newpage)) {
 547                adjust_managed_page_count(page, 1);
 548                adjust_managed_page_count(newpage, -1);
 549        }
 550
 551        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 552        balloon_page_insert(b_dev_info, newpage);
 553        balloon_page_delete(page);
 554        b_dev_info->isolated_pages--;
 555        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 556
 557        /*
 558         * activate/"deflate" the old page. We ignore any errors just like the
 559         * other callers.
 560         */
 561        plpar_page_set_active(page);
 562
 563        /* balloon page list reference */
 564        put_page(page);
 565
 566        return MIGRATEPAGE_SUCCESS;
 567}
 568
 569static int cmm_balloon_compaction_init(void)
 570{
 571        int rc;
 572
 573        balloon_devinfo_init(&b_dev_info);
 574        b_dev_info.migratepage = cmm_migratepage;
 575
 576        balloon_mnt = kern_mount(&balloon_fs);
 577        if (IS_ERR(balloon_mnt)) {
 578                rc = PTR_ERR(balloon_mnt);
 579                balloon_mnt = NULL;
 580                return rc;
 581        }
 582
 583        b_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
 584        if (IS_ERR(b_dev_info.inode)) {
 585                rc = PTR_ERR(b_dev_info.inode);
 586                b_dev_info.inode = NULL;
 587                kern_unmount(balloon_mnt);
 588                balloon_mnt = NULL;
 589                return rc;
 590        }
 591
 592        b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
 593        return 0;
 594}
 595static void cmm_balloon_compaction_deinit(void)
 596{
 597        if (b_dev_info.inode)
 598                iput(b_dev_info.inode);
 599        b_dev_info.inode = NULL;
 600        kern_unmount(balloon_mnt);
 601        balloon_mnt = NULL;
 602}
 603#else /* CONFIG_BALLOON_COMPACTION */
 604static int cmm_balloon_compaction_init(void)
 605{
 606        return 0;
 607}
 608
 609static void cmm_balloon_compaction_deinit(void)
 610{
 611}
 612#endif /* CONFIG_BALLOON_COMPACTION */
 613
 614/**
 615 * cmm_init - Module initialization
 616 *
 617 * Return value:
 618 *      0 on success / other on failure
 619 **/
 620static int cmm_init(void)
 621{
 622        int rc;
 623
 624        if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate)
 625                return -EOPNOTSUPP;
 626
 627        rc = cmm_balloon_compaction_init();
 628        if (rc)
 629                return rc;
 630
 631        rc = register_oom_notifier(&cmm_oom_nb);
 632        if (rc < 0)
 633                goto out_balloon_compaction;
 634
 635        if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
 636                goto out_oom_notifier;
 637
 638        if ((rc = cmm_sysfs_register(&cmm_dev)))
 639                goto out_reboot_notifier;
 640
 641        rc = register_memory_notifier(&cmm_mem_nb);
 642        if (rc)
 643                goto out_unregister_notifier;
 644
 645        if (cmm_disabled)
 646                return 0;
 647
 648        cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
 649        if (IS_ERR(cmm_thread_ptr)) {
 650                rc = PTR_ERR(cmm_thread_ptr);
 651                goto out_unregister_notifier;
 652        }
 653
 654        return 0;
 655out_unregister_notifier:
 656        unregister_memory_notifier(&cmm_mem_nb);
 657        cmm_unregister_sysfs(&cmm_dev);
 658out_reboot_notifier:
 659        unregister_reboot_notifier(&cmm_reboot_nb);
 660out_oom_notifier:
 661        unregister_oom_notifier(&cmm_oom_nb);
 662out_balloon_compaction:
 663        cmm_balloon_compaction_deinit();
 664        return rc;
 665}
 666
 667/**
 668 * cmm_exit - Module exit
 669 *
 670 * Return value:
 671 *      nothing
 672 **/
 673static void cmm_exit(void)
 674{
 675        if (cmm_thread_ptr)
 676                kthread_stop(cmm_thread_ptr);
 677        unregister_oom_notifier(&cmm_oom_nb);
 678        unregister_reboot_notifier(&cmm_reboot_nb);
 679        unregister_memory_notifier(&cmm_mem_nb);
 680        cmm_free_pages(atomic_long_read(&loaned_pages));
 681        cmm_unregister_sysfs(&cmm_dev);
 682        cmm_balloon_compaction_deinit();
 683}
 684
 685/**
 686 * cmm_set_disable - Disable/Enable CMM
 687 *
 688 * Return value:
 689 *      0 on success / other on failure
 690 **/
 691static int cmm_set_disable(const char *val, const struct kernel_param *kp)
 692{
 693        int disable = simple_strtoul(val, NULL, 10);
 694
 695        if (disable != 0 && disable != 1)
 696                return -EINVAL;
 697
 698        if (disable && !cmm_disabled) {
 699                if (cmm_thread_ptr)
 700                        kthread_stop(cmm_thread_ptr);
 701                cmm_thread_ptr = NULL;
 702                cmm_free_pages(atomic_long_read(&loaned_pages));
 703        } else if (!disable && cmm_disabled) {
 704                cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
 705                if (IS_ERR(cmm_thread_ptr))
 706                        return PTR_ERR(cmm_thread_ptr);
 707        }
 708
 709        cmm_disabled = disable;
 710        return 0;
 711}
 712
 713module_param_call(disable, cmm_set_disable, param_get_uint,
 714                  &cmm_disabled, 0644);
 715MODULE_PARM_DESC(disable, "Disable CMM. Set to 1 to disable. "
 716                 "[Default=" __stringify(CMM_DISABLE) "]");
 717
 718module_init(cmm_init);
 719module_exit(cmm_exit);
 720