linux/drivers/virtio/virtio_balloon.c
<<
>>
Prefs
   1/*
   2 * Virtio balloon implementation, inspired by Dor Laor and Marcelo
   3 * Tosatti's implementations.
   4 *
   5 *  Copyright 2008 Rusty Russell IBM Corporation
   6 *
   7 *  This program is free software; you can redistribute it and/or modify
   8 *  it under the terms of the GNU General Public License as published by
   9 *  the Free Software Foundation; either version 2 of the License, or
  10 *  (at your option) any later version.
  11 *
  12 *  This program is distributed in the hope that it will be useful,
  13 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 *  GNU General Public License for more details.
  16 *
  17 *  You should have received a copy of the GNU General Public License
  18 *  along with this program; if not, write to the Free Software
  19 *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  20 */
  21
  22#include <linux/virtio.h>
  23#include <linux/virtio_balloon.h>
  24#include <linux/swap.h>
  25#include <linux/workqueue.h>
  26#include <linux/delay.h>
  27#include <linux/slab.h>
  28#include <linux/module.h>
  29#include <linux/balloon_compaction.h>
  30#include <linux/oom.h>
  31#include <linux/wait.h>
  32#include <linux/mm.h>
  33#include <linux/mount.h>
  34#include <linux/magic.h>
  35
  36/*
  37 * Balloon device works in 4K page units.  So each page is pointed to by
  38 * multiple balloon pages.  All memory counters in this driver are in balloon
  39 * page units.
  40 */
  41#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
  42#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
  43#define OOM_VBALLOON_DEFAULT_PAGES 256
  44#define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80
  45
  46static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES;
  47module_param(oom_pages, int, S_IRUSR | S_IWUSR);
  48MODULE_PARM_DESC(oom_pages, "pages to free on OOM");
  49
  50#ifdef CONFIG_BALLOON_COMPACTION
  51static struct vfsmount *balloon_mnt;
  52#endif
  53
  54struct virtio_balloon {
  55        struct virtio_device *vdev;
  56        struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
  57
  58        /* The balloon servicing is delegated to a freezable workqueue. */
  59        struct work_struct update_balloon_stats_work;
  60        struct work_struct update_balloon_size_work;
  61
  62        /* Prevent updating balloon when it is being canceled. */
  63        spinlock_t stop_update_lock;
  64        bool stop_update;
  65
  66        /* Waiting for host to ack the pages we released. */
  67        wait_queue_head_t acked;
  68
  69        /* Number of balloon pages we've told the Host we're not using. */
  70        unsigned int num_pages;
  71        /*
  72         * The pages we've told the Host we're not using are enqueued
  73         * at vb_dev_info->pages list.
  74         * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
  75         * to num_pages above.
  76         */
  77        struct balloon_dev_info vb_dev_info;
  78
  79        /* Synchronize access/update to this struct virtio_balloon elements */
  80        struct mutex balloon_lock;
  81
  82        /* The array of pfns we tell the Host about. */
  83        unsigned int num_pfns;
  84        __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
  85
  86        /* Memory statistics */
  87        struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
  88
  89        /* To register callback in oom notifier call chain */
  90        struct notifier_block nb;
  91};
  92
  93static struct virtio_device_id id_table[] = {
  94        { VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
  95        { 0 },
  96};
  97
  98static u32 page_to_balloon_pfn(struct page *page)
  99{
 100        unsigned long pfn = page_to_pfn(page);
 101
 102        BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT);
 103        /* Convert pfn from Linux page size to balloon page size. */
 104        return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
 105}
 106
 107static void balloon_ack(struct virtqueue *vq)
 108{
 109        struct virtio_balloon *vb = vq->vdev->priv;
 110
 111        wake_up(&vb->acked);
 112}
 113
 114static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
 115{
 116        struct scatterlist sg;
 117        unsigned int len;
 118
 119        sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
 120
 121        /* We should always be able to add one buffer to an empty queue. */
 122        virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
 123        virtqueue_kick(vq);
 124
 125        /* When host has read buffer, this completes via balloon_ack */
 126        wait_event(vb->acked, virtqueue_get_buf(vq, &len));
 127
 128}
 129
 130static void set_page_pfns(struct virtio_balloon *vb,
 131                          __virtio32 pfns[], struct page *page)
 132{
 133        unsigned int i;
 134
 135        /*
 136         * Set balloon pfns pointing at this page.
 137         * Note that the first pfn points at start of the page.
 138         */
 139        for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
 140                pfns[i] = cpu_to_virtio32(vb->vdev,
 141                                          page_to_balloon_pfn(page) + i);
 142}
 143
 144static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
 145{
 146        struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
 147        unsigned num_allocated_pages;
 148
 149        /* We can only do one array worth at a time. */
 150        num = min(num, ARRAY_SIZE(vb->pfns));
 151
 152        mutex_lock(&vb->balloon_lock);
 153        for (vb->num_pfns = 0; vb->num_pfns < num;
 154             vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
 155                struct page *page = balloon_page_enqueue(vb_dev_info);
 156
 157                if (!page) {
 158                        dev_info_ratelimited(&vb->vdev->dev,
 159                                             "Out of puff! Can't get %u pages\n",
 160                                             VIRTIO_BALLOON_PAGES_PER_PAGE);
 161                        /* Sleep for at least 1/5 of a second before retry. */
 162                        msleep(200);
 163                        break;
 164                }
 165                set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
 166                vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
 167                if (!virtio_has_feature(vb->vdev,
 168                                        VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
 169                        adjust_managed_page_count(page, -1);
 170        }
 171
 172        num_allocated_pages = vb->num_pfns;
 173        /* Did we get any? */
 174        if (vb->num_pfns != 0)
 175                tell_host(vb, vb->inflate_vq);
 176        mutex_unlock(&vb->balloon_lock);
 177
 178        return num_allocated_pages;
 179}
 180
 181static void release_pages_balloon(struct virtio_balloon *vb,
 182                                 struct list_head *pages)
 183{
 184        struct page *page, *next;
 185
 186        list_for_each_entry_safe(page, next, pages, lru) {
 187                if (!virtio_has_feature(vb->vdev,
 188                                        VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
 189                        adjust_managed_page_count(page, 1);
 190                list_del(&page->lru);
 191                put_page(page); /* balloon reference */
 192        }
 193}
 194
 195static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
 196{
 197        unsigned num_freed_pages;
 198        struct page *page;
 199        struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
 200        LIST_HEAD(pages);
 201
 202        /* We can only do one array worth at a time. */
 203        num = min(num, ARRAY_SIZE(vb->pfns));
 204
 205        mutex_lock(&vb->balloon_lock);
 206        /* We can't release more pages than taken */
 207        num = min(num, (size_t)vb->num_pages);
 208        for (vb->num_pfns = 0; vb->num_pfns < num;
 209             vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
 210                page = balloon_page_dequeue(vb_dev_info);
 211                if (!page)
 212                        break;
 213                set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
 214                list_add(&page->lru, &pages);
 215                vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
 216        }
 217
 218        num_freed_pages = vb->num_pfns;
 219        /*
 220         * Note that if
 221         * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
 222         * is true, we *have* to do it in this order
 223         */
 224        if (vb->num_pfns != 0)
 225                tell_host(vb, vb->deflate_vq);
 226        release_pages_balloon(vb, &pages);
 227        mutex_unlock(&vb->balloon_lock);
 228        return num_freed_pages;
 229}
 230
 231static inline void update_stat(struct virtio_balloon *vb, int idx,
 232                               u16 tag, u64 val)
 233{
 234        BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
 235        vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag);
 236        vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val);
 237}
 238
 239#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
 240
 241static unsigned int update_balloon_stats(struct virtio_balloon *vb)
 242{
 243        unsigned long events[NR_VM_EVENT_ITEMS];
 244        struct sysinfo i;
 245        unsigned int idx = 0;
 246        long available;
 247
 248        all_vm_events(events);
 249        si_meminfo(&i);
 250
 251        available = si_mem_available();
 252
 253#ifdef CONFIG_VM_EVENT_COUNTERS
 254        update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
 255                                pages_to_bytes(events[PSWPIN]));
 256        update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
 257                                pages_to_bytes(events[PSWPOUT]));
 258        update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
 259        update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
 260#endif
 261        update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
 262                                pages_to_bytes(i.freeram));
 263        update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
 264                                pages_to_bytes(i.totalram));
 265        update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
 266                                pages_to_bytes(available));
 267
 268        return idx;
 269}
 270
 271/*
 272 * While most virtqueues communicate guest-initiated requests to the hypervisor,
 273 * the stats queue operates in reverse.  The driver initializes the virtqueue
 274 * with a single buffer.  From that point forward, all conversations consist of
 275 * a hypervisor request (a call to this function) which directs us to refill
 276 * the virtqueue with a fresh stats buffer.  Since stats collection can sleep,
 277 * we delegate the job to a freezable workqueue that will do the actual work via
 278 * stats_handle_request().
 279 */
 280static void stats_request(struct virtqueue *vq)
 281{
 282        struct virtio_balloon *vb = vq->vdev->priv;
 283
 284        spin_lock(&vb->stop_update_lock);
 285        if (!vb->stop_update)
 286                queue_work(system_freezable_wq, &vb->update_balloon_stats_work);
 287        spin_unlock(&vb->stop_update_lock);
 288}
 289
 290static void stats_handle_request(struct virtio_balloon *vb)
 291{
 292        struct virtqueue *vq;
 293        struct scatterlist sg;
 294        unsigned int len, num_stats;
 295
 296        num_stats = update_balloon_stats(vb);
 297
 298        vq = vb->stats_vq;
 299        if (!virtqueue_get_buf(vq, &len))
 300                return;
 301        sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
 302        virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
 303        virtqueue_kick(vq);
 304}
 305
 306static void virtballoon_changed(struct virtio_device *vdev)
 307{
 308        struct virtio_balloon *vb = vdev->priv;
 309        unsigned long flags;
 310
 311        spin_lock_irqsave(&vb->stop_update_lock, flags);
 312        if (!vb->stop_update)
 313                queue_work(system_freezable_wq, &vb->update_balloon_size_work);
 314        spin_unlock_irqrestore(&vb->stop_update_lock, flags);
 315}
 316
 317static inline s64 towards_target(struct virtio_balloon *vb)
 318{
 319        s64 target;
 320        u32 num_pages;
 321
 322        virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages,
 323                     &num_pages);
 324
 325        /* Legacy balloon config space is LE, unlike all other devices. */
 326        if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
 327                num_pages = le32_to_cpu((__force __le32)num_pages);
 328
 329        target = num_pages;
 330        return target - vb->num_pages;
 331}
 332
 333static void update_balloon_size(struct virtio_balloon *vb)
 334{
 335        u32 actual = vb->num_pages;
 336
 337        /* Legacy balloon config space is LE, unlike all other devices. */
 338        if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
 339                actual = (__force u32)cpu_to_le32(actual);
 340
 341        virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
 342                      &actual);
 343}
 344
 345/*
 346 * virtballoon_oom_notify - release pages when system is under severe
 347 *                          memory pressure (called from out_of_memory())
 348 * @self : notifier block struct
 349 * @dummy: not used
 350 * @parm : returned - number of freed pages
 351 *
 352 * The balancing of memory by use of the virtio balloon should not cause
 353 * the termination of processes while there are pages in the balloon.
 354 * If virtio balloon manages to release some memory, it will make the
 355 * system return and retry the allocation that forced the OOM killer
 356 * to run.
 357 */
 358static int virtballoon_oom_notify(struct notifier_block *self,
 359                                  unsigned long dummy, void *parm)
 360{
 361        struct virtio_balloon *vb;
 362        unsigned long *freed;
 363        unsigned num_freed_pages;
 364
 365        vb = container_of(self, struct virtio_balloon, nb);
 366        if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
 367                return NOTIFY_OK;
 368
 369        freed = parm;
 370        num_freed_pages = leak_balloon(vb, oom_pages);
 371        update_balloon_size(vb);
 372        *freed += num_freed_pages;
 373
 374        return NOTIFY_OK;
 375}
 376
 377static void update_balloon_stats_func(struct work_struct *work)
 378{
 379        struct virtio_balloon *vb;
 380
 381        vb = container_of(work, struct virtio_balloon,
 382                          update_balloon_stats_work);
 383        stats_handle_request(vb);
 384}
 385
 386static void update_balloon_size_func(struct work_struct *work)
 387{
 388        struct virtio_balloon *vb;
 389        s64 diff;
 390
 391        vb = container_of(work, struct virtio_balloon,
 392                          update_balloon_size_work);
 393        diff = towards_target(vb);
 394
 395        if (diff > 0)
 396                diff -= fill_balloon(vb, diff);
 397        else if (diff < 0)
 398                diff += leak_balloon(vb, -diff);
 399        update_balloon_size(vb);
 400
 401        if (diff)
 402                queue_work(system_freezable_wq, work);
 403}
 404
 405static int init_vqs(struct virtio_balloon *vb)
 406{
 407        struct virtqueue *vqs[3];
 408        vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
 409        static const char * const names[] = { "inflate", "deflate", "stats" };
 410        int err, nvqs;
 411
 412        /*
 413         * We expect two virtqueues: inflate and deflate, and
 414         * optionally stat.
 415         */
 416        nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
 417        err = virtio_find_vqs(vb->vdev, nvqs, vqs, callbacks, names, NULL);
 418        if (err)
 419                return err;
 420
 421        vb->inflate_vq = vqs[0];
 422        vb->deflate_vq = vqs[1];
 423        if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
 424                struct scatterlist sg;
 425                unsigned int num_stats;
 426                vb->stats_vq = vqs[2];
 427
 428                /*
 429                 * Prime this virtqueue with one buffer so the hypervisor can
 430                 * use it to signal us later (it can't be broken yet!).
 431                 */
 432                num_stats = update_balloon_stats(vb);
 433
 434                sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
 435                if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
 436                    < 0)
 437                        BUG();
 438                virtqueue_kick(vb->stats_vq);
 439        }
 440        return 0;
 441}
 442
 443#ifdef CONFIG_BALLOON_COMPACTION
 444/*
 445 * virtballoon_migratepage - perform the balloon page migration on behalf of
 446 *                           a compation thread.     (called under page lock)
 447 * @vb_dev_info: the balloon device
 448 * @newpage: page that will replace the isolated page after migration finishes.
 449 * @page   : the isolated (old) page that is about to be migrated to newpage.
 450 * @mode   : compaction mode -- not used for balloon page migration.
 451 *
 452 * After a ballooned page gets isolated by compaction procedures, this is the
 453 * function that performs the page migration on behalf of a compaction thread
 454 * The page migration for virtio balloon is done in a simple swap fashion which
 455 * follows these two macro steps:
 456 *  1) insert newpage into vb->pages list and update the host about it;
 457 *  2) update the host about the old page removed from vb->pages list;
 458 *
 459 * This function preforms the balloon page migration task.
 460 * Called through balloon_mapping->a_ops->migratepage
 461 */
 462static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
 463                struct page *newpage, struct page *page, enum migrate_mode mode)
 464{
 465        struct virtio_balloon *vb = container_of(vb_dev_info,
 466                        struct virtio_balloon, vb_dev_info);
 467        unsigned long flags;
 468
 469        /*
 470         * In order to avoid lock contention while migrating pages concurrently
 471         * to leak_balloon() or fill_balloon() we just give up the balloon_lock
 472         * this turn, as it is easier to retry the page migration later.
 473         * This also prevents fill_balloon() getting stuck into a mutex
 474         * recursion in the case it ends up triggering memory compaction
 475         * while it is attempting to inflate the ballon.
 476         */
 477        if (!mutex_trylock(&vb->balloon_lock))
 478                return -EAGAIN;
 479
 480        get_page(newpage); /* balloon reference */
 481
 482        /* balloon's page migration 1st step  -- inflate "newpage" */
 483        spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
 484        balloon_page_insert(vb_dev_info, newpage);
 485        vb_dev_info->isolated_pages--;
 486        __count_vm_event(BALLOON_MIGRATE);
 487        spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
 488        vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
 489        set_page_pfns(vb, vb->pfns, newpage);
 490        tell_host(vb, vb->inflate_vq);
 491
 492        /* balloon's page migration 2nd step -- deflate "page" */
 493        balloon_page_delete(page);
 494        vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
 495        set_page_pfns(vb, vb->pfns, page);
 496        tell_host(vb, vb->deflate_vq);
 497
 498        mutex_unlock(&vb->balloon_lock);
 499
 500        put_page(page); /* balloon reference */
 501
 502        return MIGRATEPAGE_SUCCESS;
 503}
 504
 505static struct dentry *balloon_mount(struct file_system_type *fs_type,
 506                int flags, const char *dev_name, void *data)
 507{
 508        static const struct dentry_operations ops = {
 509                .d_dname = simple_dname,
 510        };
 511
 512        return mount_pseudo(fs_type, "balloon-kvm:", NULL, &ops,
 513                                BALLOON_KVM_MAGIC);
 514}
 515
 516static struct file_system_type balloon_fs = {
 517        .name           = "balloon-kvm",
 518        .mount          = balloon_mount,
 519        .kill_sb        = kill_anon_super,
 520};
 521
 522#endif /* CONFIG_BALLOON_COMPACTION */
 523
 524static int virtballoon_probe(struct virtio_device *vdev)
 525{
 526        struct virtio_balloon *vb;
 527        int err;
 528
 529        if (!vdev->config->get) {
 530                dev_err(&vdev->dev, "%s failure: config access disabled\n",
 531                        __func__);
 532                return -EINVAL;
 533        }
 534
 535        vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
 536        if (!vb) {
 537                err = -ENOMEM;
 538                goto out;
 539        }
 540
 541        INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func);
 542        INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func);
 543        spin_lock_init(&vb->stop_update_lock);
 544        vb->stop_update = false;
 545        vb->num_pages = 0;
 546        mutex_init(&vb->balloon_lock);
 547        init_waitqueue_head(&vb->acked);
 548        vb->vdev = vdev;
 549
 550        balloon_devinfo_init(&vb->vb_dev_info);
 551
 552        err = init_vqs(vb);
 553        if (err)
 554                goto out_free_vb;
 555
 556        vb->nb.notifier_call = virtballoon_oom_notify;
 557        vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
 558        err = register_oom_notifier(&vb->nb);
 559        if (err < 0)
 560                goto out_del_vqs;
 561
 562#ifdef CONFIG_BALLOON_COMPACTION
 563        balloon_mnt = kern_mount(&balloon_fs);
 564        if (IS_ERR(balloon_mnt)) {
 565                err = PTR_ERR(balloon_mnt);
 566                unregister_oom_notifier(&vb->nb);
 567                goto out_del_vqs;
 568        }
 569
 570        vb->vb_dev_info.migratepage = virtballoon_migratepage;
 571        vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
 572        if (IS_ERR(vb->vb_dev_info.inode)) {
 573                err = PTR_ERR(vb->vb_dev_info.inode);
 574                kern_unmount(balloon_mnt);
 575                unregister_oom_notifier(&vb->nb);
 576                vb->vb_dev_info.inode = NULL;
 577                goto out_del_vqs;
 578        }
 579        vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
 580#endif
 581
 582        virtio_device_ready(vdev);
 583
 584        if (towards_target(vb))
 585                virtballoon_changed(vdev);
 586        return 0;
 587
 588out_del_vqs:
 589        vdev->config->del_vqs(vdev);
 590out_free_vb:
 591        kfree(vb);
 592out:
 593        return err;
 594}
 595
 596static void remove_common(struct virtio_balloon *vb)
 597{
 598        /* There might be pages left in the balloon: free them. */
 599        while (vb->num_pages)
 600                leak_balloon(vb, vb->num_pages);
 601        update_balloon_size(vb);
 602
 603        /* Now we reset the device so we can clean up the queues. */
 604        vb->vdev->config->reset(vb->vdev);
 605
 606        vb->vdev->config->del_vqs(vb->vdev);
 607}
 608
 609static void virtballoon_remove(struct virtio_device *vdev)
 610{
 611        struct virtio_balloon *vb = vdev->priv;
 612
 613        unregister_oom_notifier(&vb->nb);
 614
 615        spin_lock_irq(&vb->stop_update_lock);
 616        vb->stop_update = true;
 617        spin_unlock_irq(&vb->stop_update_lock);
 618        cancel_work_sync(&vb->update_balloon_size_work);
 619        cancel_work_sync(&vb->update_balloon_stats_work);
 620
 621        remove_common(vb);
 622#ifdef CONFIG_BALLOON_COMPACTION
 623        if (vb->vb_dev_info.inode)
 624                iput(vb->vb_dev_info.inode);
 625
 626        kern_unmount(balloon_mnt);
 627#endif
 628        kfree(vb);
 629}
 630
 631#ifdef CONFIG_PM_SLEEP
 632static int virtballoon_freeze(struct virtio_device *vdev)
 633{
 634        struct virtio_balloon *vb = vdev->priv;
 635
 636        /*
 637         * The workqueue is already frozen by the PM core before this
 638         * function is called.
 639         */
 640        remove_common(vb);
 641        return 0;
 642}
 643
 644static int virtballoon_restore(struct virtio_device *vdev)
 645{
 646        struct virtio_balloon *vb = vdev->priv;
 647        int ret;
 648
 649        ret = init_vqs(vdev->priv);
 650        if (ret)
 651                return ret;
 652
 653        virtio_device_ready(vdev);
 654
 655        if (towards_target(vb))
 656                virtballoon_changed(vdev);
 657        update_balloon_size(vb);
 658        return 0;
 659}
 660#endif
 661
 662static int virtballoon_validate(struct virtio_device *vdev)
 663{
 664        __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
 665        return 0;
 666}
 667
 668static unsigned int features[] = {
 669        VIRTIO_BALLOON_F_MUST_TELL_HOST,
 670        VIRTIO_BALLOON_F_STATS_VQ,
 671        VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
 672};
 673
 674static struct virtio_driver virtio_balloon_driver = {
 675        .feature_table = features,
 676        .feature_table_size = ARRAY_SIZE(features),
 677        .driver.name =  KBUILD_MODNAME,
 678        .driver.owner = THIS_MODULE,
 679        .id_table =     id_table,
 680        .validate =     virtballoon_validate,
 681        .probe =        virtballoon_probe,
 682        .remove =       virtballoon_remove,
 683        .config_changed = virtballoon_changed,
 684#ifdef CONFIG_PM_SLEEP
 685        .freeze =       virtballoon_freeze,
 686        .restore =      virtballoon_restore,
 687#endif
 688};
 689
 690module_virtio_driver(virtio_balloon_driver);
 691MODULE_DEVICE_TABLE(virtio, id_table);
 692MODULE_DESCRIPTION("Virtio balloon driver");
 693MODULE_LICENSE("GPL");
 694