linux/drivers/hv/hv_balloon.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012, Microsoft Corporation.
   3 *
   4 * Author:
   5 *   K. Y. Srinivasan <kys@microsoft.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms of the GNU General Public License version 2 as published
   9 * by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  14 * NON INFRINGEMENT.  See the GNU General Public License for more
  15 * details.
  16 *
  17 */
  18
  19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  20
  21#include <linux/kernel.h>
  22#include <linux/jiffies.h>
  23#include <linux/mman.h>
  24#include <linux/delay.h>
  25#include <linux/init.h>
  26#include <linux/module.h>
  27#include <linux/slab.h>
  28#include <linux/kthread.h>
  29#include <linux/completion.h>
  30#include <linux/memory_hotplug.h>
  31#include <linux/memory.h>
  32#include <linux/notifier.h>
  33#include <linux/percpu_counter.h>
  34
  35#include <linux/hyperv.h>
  36
  37/*
  38 * We begin with definitions supporting the Dynamic Memory protocol
  39 * with the host.
  40 *
  41 * Begin protocol definitions.
  42 */
  43
  44
  45
  46/*
  47 * Protocol versions. The low word is the minor version, the high word the major
  48 * version.
  49 *
  50 * History:
  51 * Initial version 1.0
  52 * Changed to 0.1 on 2009/03/25
  53 * Changes to 0.2 on 2009/05/14
  54 * Changes to 0.3 on 2009/12/03
  55 * Changed to 1.0 on 2011/04/05
  56 */
  57
  58#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
  59#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
  60#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
  61
  62enum {
  63        DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
  64        DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
  65        DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
  66
  67        DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
  68        DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
  69        DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
  70
  71        DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
  72};
  73
  74
  75
  76/*
  77 * Message Types
  78 */
  79
  80enum dm_message_type {
  81        /*
  82         * Version 0.3
  83         */
  84        DM_ERROR                        = 0,
  85        DM_VERSION_REQUEST              = 1,
  86        DM_VERSION_RESPONSE             = 2,
  87        DM_CAPABILITIES_REPORT          = 3,
  88        DM_CAPABILITIES_RESPONSE        = 4,
  89        DM_STATUS_REPORT                = 5,
  90        DM_BALLOON_REQUEST              = 6,
  91        DM_BALLOON_RESPONSE             = 7,
  92        DM_UNBALLOON_REQUEST            = 8,
  93        DM_UNBALLOON_RESPONSE           = 9,
  94        DM_MEM_HOT_ADD_REQUEST          = 10,
  95        DM_MEM_HOT_ADD_RESPONSE         = 11,
  96        DM_VERSION_03_MAX               = 11,
  97        /*
  98         * Version 1.0.
  99         */
 100        DM_INFO_MESSAGE                 = 12,
 101        DM_VERSION_1_MAX                = 12
 102};
 103
 104
 105/*
 106 * Structures defining the dynamic memory management
 107 * protocol.
 108 */
 109
 110union dm_version {
 111        struct {
 112                __u16 minor_version;
 113                __u16 major_version;
 114        };
 115        __u32 version;
 116} __packed;
 117
 118
 119union dm_caps {
 120        struct {
 121                __u64 balloon:1;
 122                __u64 hot_add:1;
 123                /*
 124                 * To support guests that may have alignment
 125                 * limitations on hot-add, the guest can specify
 126                 * its alignment requirements; a value of n
 127                 * represents an alignment of 2^n in mega bytes.
 128                 */
 129                __u64 hot_add_alignment:4;
 130                __u64 reservedz:58;
 131        } cap_bits;
 132        __u64 caps;
 133} __packed;
 134
 135union dm_mem_page_range {
 136        struct  {
 137                /*
 138                 * The PFN number of the first page in the range.
 139                 * 40 bits is the architectural limit of a PFN
 140                 * number for AMD64.
 141                 */
 142                __u64 start_page:40;
 143                /*
 144                 * The number of pages in the range.
 145                 */
 146                __u64 page_cnt:24;
 147        } finfo;
 148        __u64  page_range;
 149} __packed;
 150
 151
 152
 153/*
 154 * The header for all dynamic memory messages:
 155 *
 156 * type: Type of the message.
 157 * size: Size of the message in bytes; including the header.
 158 * trans_id: The guest is responsible for manufacturing this ID.
 159 */
 160
 161struct dm_header {
 162        __u16 type;
 163        __u16 size;
 164        __u32 trans_id;
 165} __packed;
 166
 167/*
 168 * A generic message format for dynamic memory.
 169 * Specific message formats are defined later in the file.
 170 */
 171
 172struct dm_message {
 173        struct dm_header hdr;
 174        __u8 data[]; /* enclosed message */
 175} __packed;
 176
 177
 178/*
 179 * Specific message types supporting the dynamic memory protocol.
 180 */
 181
 182/*
 183 * Version negotiation message. Sent from the guest to the host.
 184 * The guest is free to try different versions until the host
 185 * accepts the version.
 186 *
 187 * dm_version: The protocol version requested.
 188 * is_last_attempt: If TRUE, this is the last version guest will request.
 189 * reservedz: Reserved field, set to zero.
 190 */
 191
 192struct dm_version_request {
 193        struct dm_header hdr;
 194        union dm_version version;
 195        __u32 is_last_attempt:1;
 196        __u32 reservedz:31;
 197} __packed;
 198
 199/*
 200 * Version response message; Host to Guest and indicates
 201 * if the host has accepted the version sent by the guest.
 202 *
 203 * is_accepted: If TRUE, host has accepted the version and the guest
 204 * should proceed to the next stage of the protocol. FALSE indicates that
 205 * guest should re-try with a different version.
 206 *
 207 * reservedz: Reserved field, set to zero.
 208 */
 209
 210struct dm_version_response {
 211        struct dm_header hdr;
 212        __u64 is_accepted:1;
 213        __u64 reservedz:63;
 214} __packed;
 215
 216/*
 217 * Message reporting capabilities. This is sent from the guest to the
 218 * host.
 219 */
 220
 221struct dm_capabilities {
 222        struct dm_header hdr;
 223        union dm_caps caps;
 224        __u64 min_page_cnt;
 225        __u64 max_page_number;
 226} __packed;
 227
 228/*
 229 * Response to the capabilities message. This is sent from the host to the
 230 * guest. This message notifies if the host has accepted the guest's
 231 * capabilities. If the host has not accepted, the guest must shutdown
 232 * the service.
 233 *
 234 * is_accepted: Indicates if the host has accepted guest's capabilities.
 235 * reservedz: Must be 0.
 236 */
 237
 238struct dm_capabilities_resp_msg {
 239        struct dm_header hdr;
 240        __u64 is_accepted:1;
 241        __u64 reservedz:63;
 242} __packed;
 243
 244/*
 245 * This message is used to report memory pressure from the guest.
 246 * This message is not part of any transaction and there is no
 247 * response to this message.
 248 *
 249 * num_avail: Available memory in pages.
 250 * num_committed: Committed memory in pages.
 251 * page_file_size: The accumulated size of all page files
 252 *                 in the system in pages.
 253 * zero_free: The nunber of zero and free pages.
 254 * page_file_writes: The writes to the page file in pages.
 255 * io_diff: An indicator of file cache efficiency or page file activity,
 256 *          calculated as File Cache Page Fault Count - Page Read Count.
 257 *          This value is in pages.
 258 *
 259 * Some of these metrics are Windows specific and fortunately
 260 * the algorithm on the host side that computes the guest memory
 261 * pressure only uses num_committed value.
 262 */
 263
 264struct dm_status {
 265        struct dm_header hdr;
 266        __u64 num_avail;
 267        __u64 num_committed;
 268        __u64 page_file_size;
 269        __u64 zero_free;
 270        __u32 page_file_writes;
 271        __u32 io_diff;
 272} __packed;
 273
 274
 275/*
 276 * Message to ask the guest to allocate memory - balloon up message.
 277 * This message is sent from the host to the guest. The guest may not be
 278 * able to allocate as much memory as requested.
 279 *
 280 * num_pages: number of pages to allocate.
 281 */
 282
 283struct dm_balloon {
 284        struct dm_header hdr;
 285        __u32 num_pages;
 286        __u32 reservedz;
 287} __packed;
 288
 289
 290/*
 291 * Balloon response message; this message is sent from the guest
 292 * to the host in response to the balloon message.
 293 *
 294 * reservedz: Reserved; must be set to zero.
 295 * more_pages: If FALSE, this is the last message of the transaction.
 296 * if TRUE there will atleast one more message from the guest.
 297 *
 298 * range_count: The number of ranges in the range array.
 299 *
 300 * range_array: An array of page ranges returned to the host.
 301 *
 302 */
 303
 304struct dm_balloon_response {
 305        struct dm_header hdr;
 306        __u32 reservedz;
 307        __u32 more_pages:1;
 308        __u32 range_count:31;
 309        union dm_mem_page_range range_array[];
 310} __packed;
 311
 312/*
 313 * Un-balloon message; this message is sent from the host
 314 * to the guest to give guest more memory.
 315 *
 316 * more_pages: If FALSE, this is the last message of the transaction.
 317 * if TRUE there will atleast one more message from the guest.
 318 *
 319 * reservedz: Reserved; must be set to zero.
 320 *
 321 * range_count: The number of ranges in the range array.
 322 *
 323 * range_array: An array of page ranges returned to the host.
 324 *
 325 */
 326
 327struct dm_unballoon_request {
 328        struct dm_header hdr;
 329        __u32 more_pages:1;
 330        __u32 reservedz:31;
 331        __u32 range_count;
 332        union dm_mem_page_range range_array[];
 333} __packed;
 334
 335/*
 336 * Un-balloon response message; this message is sent from the guest
 337 * to the host in response to an unballoon request.
 338 *
 339 */
 340
 341struct dm_unballoon_response {
 342        struct dm_header hdr;
 343} __packed;
 344
 345
 346/*
 347 * Hot add request message. Message sent from the host to the guest.
 348 *
 349 * mem_range: Memory range to hot add.
 350 *
 351 * On Linux we currently don't support this since we cannot hot add
 352 * arbitrary granularity of memory.
 353 */
 354
 355struct dm_hot_add {
 356        struct dm_header hdr;
 357        union dm_mem_page_range range;
 358} __packed;
 359
 360/*
 361 * Hot add response message.
 362 * This message is sent by the guest to report the status of a hot add request.
 363 * If page_count is less than the requested page count, then the host should
 364 * assume all further hot add requests will fail, since this indicates that
 365 * the guest has hit an upper physical memory barrier.
 366 *
 367 * Hot adds may also fail due to low resources; in this case, the guest must
 368 * not complete this message until the hot add can succeed, and the host must
 369 * not send a new hot add request until the response is sent.
 370 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
 371 * times it fails the request.
 372 *
 373 *
 374 * page_count: number of pages that were successfully hot added.
 375 *
 376 * result: result of the operation 1: success, 0: failure.
 377 *
 378 */
 379
 380struct dm_hot_add_response {
 381        struct dm_header hdr;
 382        __u32 page_count;
 383        __u32 result;
 384} __packed;
 385
 386/*
 387 * Types of information sent from host to the guest.
 388 */
 389
 390enum dm_info_type {
 391        INFO_TYPE_MAX_PAGE_CNT = 0,
 392        MAX_INFO_TYPE
 393};
 394
 395
 396/*
 397 * Header for the information message.
 398 */
 399
 400struct dm_info_header {
 401        enum dm_info_type type;
 402        __u32 data_size;
 403} __packed;
 404
 405/*
 406 * This message is sent from the host to the guest to pass
 407 * some relevant information (win8 addition).
 408 *
 409 * reserved: no used.
 410 * info_size: size of the information blob.
 411 * info: information blob.
 412 */
 413
 414struct dm_info_msg {
 415        struct dm_header hdr;
 416        __u32 reserved;
 417        __u32 info_size;
 418        __u8  info[];
 419};
 420
 421/*
 422 * End protocol definitions.
 423 */
 424
 425/*
 426 * State to manage hot adding memory into the guest.
 427 * The range start_pfn : end_pfn specifies the range
 428 * that the host has asked us to hot add. The range
 429 * start_pfn : ha_end_pfn specifies the range that we have
 430 * currently hot added. We hot add in multiples of 128M
 431 * chunks; it is possible that we may not be able to bring
 432 * online all the pages in the region. The range
 433 * covered_start_pfn:covered_end_pfn defines the pages that can
 434 * be brough online.
 435 */
 436
 437struct hv_hotadd_state {
 438        struct list_head list;
 439        unsigned long start_pfn;
 440        unsigned long covered_start_pfn;
 441        unsigned long covered_end_pfn;
 442        unsigned long ha_end_pfn;
 443        unsigned long end_pfn;
 444        /*
 445         * A list of gaps.
 446         */
 447        struct list_head gap_list;
 448};
 449
 450struct hv_hotadd_gap {
 451        struct list_head list;
 452        unsigned long start_pfn;
 453        unsigned long end_pfn;
 454};
 455
 456struct balloon_state {
 457        __u32 num_pages;
 458        struct work_struct wrk;
 459};
 460
 461struct hot_add_wrk {
 462        union dm_mem_page_range ha_page_range;
 463        union dm_mem_page_range ha_region_range;
 464        struct work_struct wrk;
 465};
 466
 467static bool hot_add = true;
 468static bool do_hot_add;
 469/*
 470 * Delay reporting memory pressure by
 471 * the specified number of seconds.
 472 */
 473static uint pressure_report_delay = 45;
 474
 475/*
 476 * The last time we posted a pressure report to host.
 477 */
 478static unsigned long last_post_time;
 479
 480module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
 481MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
 482
 483module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
 484MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
 485static atomic_t trans_id = ATOMIC_INIT(0);
 486
 487static int dm_ring_size = (5 * PAGE_SIZE);
 488
 489/*
 490 * Driver specific state.
 491 */
 492
 493enum hv_dm_state {
 494        DM_INITIALIZING = 0,
 495        DM_INITIALIZED,
 496        DM_BALLOON_UP,
 497        DM_BALLOON_DOWN,
 498        DM_HOT_ADD,
 499        DM_INIT_ERROR
 500};
 501
 502
 503static __u8 recv_buffer[PAGE_SIZE];
 504static __u8 *send_buffer;
 505#define PAGES_IN_2M     512
 506#define HA_CHUNK (32 * 1024)
 507
 508struct hv_dynmem_device {
 509        struct hv_device *dev;
 510        enum hv_dm_state state;
 511        struct completion host_event;
 512        struct completion config_event;
 513
 514        /*
 515         * Number of pages we have currently ballooned out.
 516         */
 517        unsigned int num_pages_ballooned;
 518        unsigned int num_pages_onlined;
 519        unsigned int num_pages_added;
 520
 521        /*
 522         * State to manage the ballooning (up) operation.
 523         */
 524        struct balloon_state balloon_wrk;
 525
 526        /*
 527         * State to execute the "hot-add" operation.
 528         */
 529        struct hot_add_wrk ha_wrk;
 530
 531        /*
 532         * This state tracks if the host has specified a hot-add
 533         * region.
 534         */
 535        bool host_specified_ha_region;
 536
 537        /*
 538         * State to synchronize hot-add.
 539         */
 540        struct completion  ol_waitevent;
 541        bool ha_waiting;
 542        /*
 543         * This thread handles hot-add
 544         * requests from the host as well as notifying
 545         * the host with regards to memory pressure in
 546         * the guest.
 547         */
 548        struct task_struct *thread;
 549
 550        /*
 551         * Protects ha_region_list, num_pages_onlined counter and individual
 552         * regions from ha_region_list.
 553         */
 554        spinlock_t ha_lock;
 555
 556        /*
 557         * A list of hot-add regions.
 558         */
 559        struct list_head ha_region_list;
 560
 561        /*
 562         * We start with the highest version we can support
 563         * and downgrade based on the host; we save here the
 564         * next version to try.
 565         */
 566        __u32 next_version;
 567};
 568
 569static struct hv_dynmem_device dm_device;
 570
 571static void post_status(struct hv_dynmem_device *dm);
 572
 573#ifdef CONFIG_MEMORY_HOTPLUG
 574static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
 575                              void *v)
 576{
 577        struct memory_notify *mem = (struct memory_notify *)v;
 578        unsigned long flags;
 579
 580        switch (val) {
 581        case MEM_ONLINE:
 582                spin_lock_irqsave(&dm_device.ha_lock, flags);
 583                dm_device.num_pages_onlined += mem->nr_pages;
 584                spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 585        case MEM_CANCEL_ONLINE:
 586                if (dm_device.ha_waiting) {
 587                        dm_device.ha_waiting = false;
 588                        complete(&dm_device.ol_waitevent);
 589                }
 590                break;
 591
 592        case MEM_OFFLINE:
 593                spin_lock_irqsave(&dm_device.ha_lock, flags);
 594                dm_device.num_pages_onlined -= mem->nr_pages;
 595                spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 596                break;
 597        case MEM_GOING_ONLINE:
 598        case MEM_GOING_OFFLINE:
 599        case MEM_CANCEL_OFFLINE:
 600                break;
 601        }
 602        return NOTIFY_OK;
 603}
 604
 605static struct notifier_block hv_memory_nb = {
 606        .notifier_call = hv_memory_notifier,
 607        .priority = 0
 608};
 609
 610/* Check if the particular page is backed and can be onlined and online it. */
 611static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
 612{
 613        unsigned long cur_start_pgp;
 614        unsigned long cur_end_pgp;
 615        struct hv_hotadd_gap *gap;
 616
 617        cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
 618        cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
 619
 620        /* The page is not backed. */
 621        if (((unsigned long)pg < cur_start_pgp) ||
 622            ((unsigned long)pg >= cur_end_pgp))
 623                return;
 624
 625        /* Check for gaps. */
 626        list_for_each_entry(gap, &has->gap_list, list) {
 627                cur_start_pgp = (unsigned long)
 628                        pfn_to_page(gap->start_pfn);
 629                cur_end_pgp = (unsigned long)
 630                        pfn_to_page(gap->end_pfn);
 631                if (((unsigned long)pg >= cur_start_pgp) &&
 632                    ((unsigned long)pg < cur_end_pgp)) {
 633                        return;
 634                }
 635        }
 636
 637        /* This frame is currently backed; online the page. */
 638        __online_page_set_limits(pg);
 639        __online_page_increment_counters(pg);
 640        __online_page_free(pg);
 641}
 642
 643static void hv_bring_pgs_online(struct hv_hotadd_state *has,
 644                                unsigned long start_pfn, unsigned long size)
 645{
 646        int i;
 647
 648        for (i = 0; i < size; i++)
 649                hv_page_online_one(has, pfn_to_page(start_pfn + i));
 650}
 651
 652static void hv_mem_hot_add(unsigned long start, unsigned long size,
 653                                unsigned long pfn_count,
 654                                struct hv_hotadd_state *has)
 655{
 656        int ret = 0;
 657        int i, nid;
 658        unsigned long start_pfn;
 659        unsigned long processed_pfn;
 660        unsigned long total_pfn = pfn_count;
 661        unsigned long flags;
 662
 663        for (i = 0; i < (size/HA_CHUNK); i++) {
 664                start_pfn = start + (i * HA_CHUNK);
 665
 666                spin_lock_irqsave(&dm_device.ha_lock, flags);
 667                has->ha_end_pfn +=  HA_CHUNK;
 668
 669                if (total_pfn > HA_CHUNK) {
 670                        processed_pfn = HA_CHUNK;
 671                        total_pfn -= HA_CHUNK;
 672                } else {
 673                        processed_pfn = total_pfn;
 674                        total_pfn = 0;
 675                }
 676
 677                has->covered_end_pfn +=  processed_pfn;
 678                spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 679
 680                init_completion(&dm_device.ol_waitevent);
 681                dm_device.ha_waiting = !memhp_auto_online;
 682
 683                nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
 684                ret = add_memory(nid, PFN_PHYS((start_pfn)),
 685                                (HA_CHUNK << PAGE_SHIFT));
 686
 687                if (ret) {
 688                        pr_info("hot_add memory failed error is %d\n", ret);
 689                        if (ret == -EEXIST) {
 690                                /*
 691                                 * This error indicates that the error
 692                                 * is not a transient failure. This is the
 693                                 * case where the guest's physical address map
 694                                 * precludes hot adding memory. Stop all further
 695                                 * memory hot-add.
 696                                 */
 697                                do_hot_add = false;
 698                        }
 699                        spin_lock_irqsave(&dm_device.ha_lock, flags);
 700                        has->ha_end_pfn -= HA_CHUNK;
 701                        has->covered_end_pfn -=  processed_pfn;
 702                        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 703                        break;
 704                }
 705
 706                /*
 707                 * Wait for the memory block to be onlined when memory onlining
 708                 * is done outside of kernel (memhp_auto_online). Since the hot
 709                 * add has succeeded, it is ok to proceed even if the pages in
 710                 * the hot added region have not been "onlined" within the
 711                 * allowed time.
 712                 */
 713                if (dm_device.ha_waiting)
 714                        wait_for_completion_timeout(&dm_device.ol_waitevent,
 715                                                    5*HZ);
 716                post_status(&dm_device);
 717        }
 718
 719        return;
 720}
 721
 722static void hv_online_page(struct page *pg)
 723{
 724        struct hv_hotadd_state *has;
 725        unsigned long cur_start_pgp;
 726        unsigned long cur_end_pgp;
 727        unsigned long flags;
 728
 729        spin_lock_irqsave(&dm_device.ha_lock, flags);
 730        list_for_each_entry(has, &dm_device.ha_region_list, list) {
 731                cur_start_pgp = (unsigned long)
 732                        pfn_to_page(has->start_pfn);
 733                cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
 734
 735                /* The page belongs to a different HAS. */
 736                if (((unsigned long)pg < cur_start_pgp) ||
 737                    ((unsigned long)pg >= cur_end_pgp))
 738                        continue;
 739
 740                hv_page_online_one(has, pg);
 741                break;
 742        }
 743        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 744}
 745
 746static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
 747{
 748        struct hv_hotadd_state *has;
 749        struct hv_hotadd_gap *gap;
 750        unsigned long residual, new_inc;
 751        int ret = 0;
 752        unsigned long flags;
 753
 754        spin_lock_irqsave(&dm_device.ha_lock, flags);
 755        list_for_each_entry(has, &dm_device.ha_region_list, list) {
 756                /*
 757                 * If the pfn range we are dealing with is not in the current
 758                 * "hot add block", move on.
 759                 */
 760                if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
 761                        continue;
 762
 763                /*
 764                 * If the current start pfn is not where the covered_end
 765                 * is, create a gap and update covered_end_pfn.
 766                 */
 767                if (has->covered_end_pfn != start_pfn) {
 768                        gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
 769                        if (!gap) {
 770                                ret = -ENOMEM;
 771                                break;
 772                        }
 773
 774                        INIT_LIST_HEAD(&gap->list);
 775                        gap->start_pfn = has->covered_end_pfn;
 776                        gap->end_pfn = start_pfn;
 777                        list_add_tail(&gap->list, &has->gap_list);
 778
 779                        has->covered_end_pfn = start_pfn;
 780                }
 781
 782                /*
 783                 * If the current hot add-request extends beyond
 784                 * our current limit; extend it.
 785                 */
 786                if ((start_pfn + pfn_cnt) > has->end_pfn) {
 787                        residual = (start_pfn + pfn_cnt - has->end_pfn);
 788                        /*
 789                         * Extend the region by multiples of HA_CHUNK.
 790                         */
 791                        new_inc = (residual / HA_CHUNK) * HA_CHUNK;
 792                        if (residual % HA_CHUNK)
 793                                new_inc += HA_CHUNK;
 794
 795                        has->end_pfn += new_inc;
 796                }
 797
 798                ret = 1;
 799                break;
 800        }
 801        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 802
 803        return ret;
 804}
 805
 806static unsigned long handle_pg_range(unsigned long pg_start,
 807                                        unsigned long pg_count)
 808{
 809        unsigned long start_pfn = pg_start;
 810        unsigned long pfn_cnt = pg_count;
 811        unsigned long size;
 812        struct hv_hotadd_state *has;
 813        unsigned long pgs_ol = 0;
 814        unsigned long old_covered_state;
 815        unsigned long res = 0, flags;
 816
 817        spin_lock_irqsave(&dm_device.ha_lock, flags);
 818        list_for_each_entry(has, &dm_device.ha_region_list, list) {
 819                /*
 820                 * If the pfn range we are dealing with is not in the current
 821                 * "hot add block", move on.
 822                 */
 823                if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
 824                        continue;
 825
 826                old_covered_state = has->covered_end_pfn;
 827
 828                if (start_pfn < has->ha_end_pfn) {
 829                        /*
 830                         * This is the case where we are backing pages
 831                         * in an already hot added region. Bring
 832                         * these pages online first.
 833                         */
 834                        pgs_ol = has->ha_end_pfn - start_pfn;
 835                        if (pgs_ol > pfn_cnt)
 836                                pgs_ol = pfn_cnt;
 837
 838                        has->covered_end_pfn +=  pgs_ol;
 839                        pfn_cnt -= pgs_ol;
 840                        /*
 841                         * Check if the corresponding memory block is already
 842                         * online by checking its last previously backed page.
 843                         * In case it is we need to bring rest (which was not
 844                         * backed previously) online too.
 845                         */
 846                        if (start_pfn > has->start_pfn &&
 847                            !PageReserved(pfn_to_page(start_pfn - 1)))
 848                                hv_bring_pgs_online(has, start_pfn, pgs_ol);
 849
 850                }
 851
 852                if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
 853                        /*
 854                         * We have some residual hot add range
 855                         * that needs to be hot added; hot add
 856                         * it now. Hot add a multiple of
 857                         * of HA_CHUNK that fully covers the pages
 858                         * we have.
 859                         */
 860                        size = (has->end_pfn - has->ha_end_pfn);
 861                        if (pfn_cnt <= size) {
 862                                size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
 863                                if (pfn_cnt % HA_CHUNK)
 864                                        size += HA_CHUNK;
 865                        } else {
 866                                pfn_cnt = size;
 867                        }
 868                        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 869                        hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
 870                        spin_lock_irqsave(&dm_device.ha_lock, flags);
 871                }
 872                /*
 873                 * If we managed to online any pages that were given to us,
 874                 * we declare success.
 875                 */
 876                res = has->covered_end_pfn - old_covered_state;
 877                break;
 878        }
 879        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 880
 881        return res;
 882}
 883
 884static unsigned long process_hot_add(unsigned long pg_start,
 885                                        unsigned long pfn_cnt,
 886                                        unsigned long rg_start,
 887                                        unsigned long rg_size)
 888{
 889        struct hv_hotadd_state *ha_region = NULL;
 890        int covered;
 891        unsigned long flags;
 892
 893        if (pfn_cnt == 0)
 894                return 0;
 895
 896        if (!dm_device.host_specified_ha_region) {
 897                covered = pfn_covered(pg_start, pfn_cnt);
 898                if (covered < 0)
 899                        return 0;
 900
 901                if (covered)
 902                        goto do_pg_range;
 903        }
 904
 905        /*
 906         * If the host has specified a hot-add range; deal with it first.
 907         */
 908
 909        if (rg_size != 0) {
 910                ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
 911                if (!ha_region)
 912                        return 0;
 913
 914                INIT_LIST_HEAD(&ha_region->list);
 915                INIT_LIST_HEAD(&ha_region->gap_list);
 916
 917                ha_region->start_pfn = rg_start;
 918                ha_region->ha_end_pfn = rg_start;
 919                ha_region->covered_start_pfn = pg_start;
 920                ha_region->covered_end_pfn = pg_start;
 921                ha_region->end_pfn = rg_start + rg_size;
 922
 923                spin_lock_irqsave(&dm_device.ha_lock, flags);
 924                list_add_tail(&ha_region->list, &dm_device.ha_region_list);
 925                spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 926        }
 927
 928do_pg_range:
 929        /*
 930         * Process the page range specified; bringing them
 931         * online if possible.
 932         */
 933        return handle_pg_range(pg_start, pfn_cnt);
 934}
 935
 936#endif
 937
 938static void hot_add_req(struct work_struct *dummy)
 939{
 940        struct dm_hot_add_response resp;
 941#ifdef CONFIG_MEMORY_HOTPLUG
 942        unsigned long pg_start, pfn_cnt;
 943        unsigned long rg_start, rg_sz;
 944#endif
 945        struct hv_dynmem_device *dm = &dm_device;
 946
 947        memset(&resp, 0, sizeof(struct dm_hot_add_response));
 948        resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
 949        resp.hdr.size = sizeof(struct dm_hot_add_response);
 950
 951#ifdef CONFIG_MEMORY_HOTPLUG
 952        pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
 953        pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
 954
 955        rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
 956        rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
 957
 958        if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
 959                unsigned long region_size;
 960                unsigned long region_start;
 961
 962                /*
 963                 * The host has not specified the hot-add region.
 964                 * Based on the hot-add page range being specified,
 965                 * compute a hot-add region that can cover the pages
 966                 * that need to be hot-added while ensuring the alignment
 967                 * and size requirements of Linux as it relates to hot-add.
 968                 */
 969                region_start = pg_start;
 970                region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
 971                if (pfn_cnt % HA_CHUNK)
 972                        region_size += HA_CHUNK;
 973
 974                region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
 975
 976                rg_start = region_start;
 977                rg_sz = region_size;
 978        }
 979
 980        if (do_hot_add)
 981                resp.page_count = process_hot_add(pg_start, pfn_cnt,
 982                                                rg_start, rg_sz);
 983
 984        dm->num_pages_added += resp.page_count;
 985#endif
 986        /*
 987         * The result field of the response structure has the
 988         * following semantics:
 989         *
 990         * 1. If all or some pages hot-added: Guest should return success.
 991         *
 992         * 2. If no pages could be hot-added:
 993         *
 994         * If the guest returns success, then the host
 995         * will not attempt any further hot-add operations. This
 996         * signifies a permanent failure.
 997         *
 998         * If the guest returns failure, then this failure will be
 999         * treated as a transient failure and the host may retry the
1000         * hot-add operation after some delay.
1001         */
1002        if (resp.page_count > 0)
1003                resp.result = 1;
1004        else if (!do_hot_add)
1005                resp.result = 1;
1006        else
1007                resp.result = 0;
1008
1009        if (!do_hot_add || (resp.page_count == 0))
1010                pr_info("Memory hot add failed\n");
1011
1012        dm->state = DM_INITIALIZED;
1013        resp.hdr.trans_id = atomic_inc_return(&trans_id);
1014        vmbus_sendpacket(dm->dev->channel, &resp,
1015                        sizeof(struct dm_hot_add_response),
1016                        (unsigned long)NULL,
1017                        VM_PKT_DATA_INBAND, 0);
1018}
1019
1020static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1021{
1022        struct dm_info_header *info_hdr;
1023
1024        info_hdr = (struct dm_info_header *)msg->info;
1025
1026        switch (info_hdr->type) {
1027        case INFO_TYPE_MAX_PAGE_CNT:
1028                pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
1029                pr_info("Data Size is %d\n", info_hdr->data_size);
1030                break;
1031        default:
1032                pr_info("Received Unknown type: %d\n", info_hdr->type);
1033        }
1034}
1035
1036static unsigned long compute_balloon_floor(void)
1037{
1038        unsigned long min_pages;
1039#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1040        /* Simple continuous piecewiese linear function:
1041         *  max MiB -> min MiB  gradient
1042         *       0         0
1043         *      16        16
1044         *      32        24
1045         *     128        72    (1/2)
1046         *     512       168    (1/4)
1047         *    2048       360    (1/8)
1048         *    8192       744    (1/16)
1049         *   32768      1512    (1/32)
1050         */
1051        if (totalram_pages < MB2PAGES(128))
1052                min_pages = MB2PAGES(8) + (totalram_pages >> 1);
1053        else if (totalram_pages < MB2PAGES(512))
1054                min_pages = MB2PAGES(40) + (totalram_pages >> 2);
1055        else if (totalram_pages < MB2PAGES(2048))
1056                min_pages = MB2PAGES(104) + (totalram_pages >> 3);
1057        else if (totalram_pages < MB2PAGES(8192))
1058                min_pages = MB2PAGES(232) + (totalram_pages >> 4);
1059        else
1060                min_pages = MB2PAGES(488) + (totalram_pages >> 5);
1061#undef MB2PAGES
1062        return min_pages;
1063}
1064
1065/*
1066 * Post our status as it relates memory pressure to the
1067 * host. Host expects the guests to post this status
1068 * periodically at 1 second intervals.
1069 *
1070 * The metrics specified in this protocol are very Windows
1071 * specific and so we cook up numbers here to convey our memory
1072 * pressure.
1073 */
1074
1075static void post_status(struct hv_dynmem_device *dm)
1076{
1077        struct dm_status status;
1078        unsigned long now = jiffies;
1079        unsigned long last_post = last_post_time;
1080
1081        if (pressure_report_delay > 0) {
1082                --pressure_report_delay;
1083                return;
1084        }
1085
1086        if (!time_after(now, (last_post_time + HZ)))
1087                return;
1088
1089        memset(&status, 0, sizeof(struct dm_status));
1090        status.hdr.type = DM_STATUS_REPORT;
1091        status.hdr.size = sizeof(struct dm_status);
1092        status.hdr.trans_id = atomic_inc_return(&trans_id);
1093
1094        /*
1095         * The host expects the guest to report free and committed memory.
1096         * Furthermore, the host expects the pressure information to include
1097         * the ballooned out pages. For a given amount of memory that we are
1098         * managing we need to compute a floor below which we should not
1099         * balloon. Compute this and add it to the pressure report.
1100         * We also need to report all offline pages (num_pages_added -
1101         * num_pages_onlined) as committed to the host, otherwise it can try
1102         * asking us to balloon them out.
1103         */
1104        status.num_avail = si_mem_available();
1105        status.num_committed = vm_memory_committed() +
1106                dm->num_pages_ballooned +
1107                (dm->num_pages_added > dm->num_pages_onlined ?
1108                 dm->num_pages_added - dm->num_pages_onlined : 0) +
1109                compute_balloon_floor();
1110
1111        /*
1112         * If our transaction ID is no longer current, just don't
1113         * send the status. This can happen if we were interrupted
1114         * after we picked our transaction ID.
1115         */
1116        if (status.hdr.trans_id != atomic_read(&trans_id))
1117                return;
1118
1119        /*
1120         * If the last post time that we sampled has changed,
1121         * we have raced, don't post the status.
1122         */
1123        if (last_post != last_post_time)
1124                return;
1125
1126        last_post_time = jiffies;
1127        vmbus_sendpacket(dm->dev->channel, &status,
1128                                sizeof(struct dm_status),
1129                                (unsigned long)NULL,
1130                                VM_PKT_DATA_INBAND, 0);
1131
1132}
1133
1134static void free_balloon_pages(struct hv_dynmem_device *dm,
1135                         union dm_mem_page_range *range_array)
1136{
1137        int num_pages = range_array->finfo.page_cnt;
1138        __u64 start_frame = range_array->finfo.start_page;
1139        struct page *pg;
1140        int i;
1141
1142        for (i = 0; i < num_pages; i++) {
1143                pg = pfn_to_page(i + start_frame);
1144                __free_page(pg);
1145                dm->num_pages_ballooned--;
1146        }
1147}
1148
1149
1150
1151static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1152                                        unsigned int num_pages,
1153                                        struct dm_balloon_response *bl_resp,
1154                                        int alloc_unit)
1155{
1156        unsigned int i = 0;
1157        struct page *pg;
1158
1159        if (num_pages < alloc_unit)
1160                return 0;
1161
1162        for (i = 0; (i * alloc_unit) < num_pages; i++) {
1163                if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1164                        PAGE_SIZE)
1165                        return i * alloc_unit;
1166
1167                /*
1168                 * We execute this code in a thread context. Furthermore,
1169                 * we don't want the kernel to try too hard.
1170                 */
1171                pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1172                                __GFP_NOMEMALLOC | __GFP_NOWARN,
1173                                get_order(alloc_unit << PAGE_SHIFT));
1174
1175                if (!pg)
1176                        return i * alloc_unit;
1177
1178                dm->num_pages_ballooned += alloc_unit;
1179
1180                /*
1181                 * If we allocatted 2M pages; split them so we
1182                 * can free them in any order we get.
1183                 */
1184
1185                if (alloc_unit != 1)
1186                        split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1187
1188                bl_resp->range_count++;
1189                bl_resp->range_array[i].finfo.start_page =
1190                        page_to_pfn(pg);
1191                bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1192                bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1193
1194        }
1195
1196        return num_pages;
1197}
1198
1199
1200
1201static void balloon_up(struct work_struct *dummy)
1202{
1203        unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1204        unsigned int num_ballooned = 0;
1205        struct dm_balloon_response *bl_resp;
1206        int alloc_unit;
1207        int ret;
1208        bool done = false;
1209        int i;
1210        long avail_pages;
1211        unsigned long floor;
1212
1213        /* The host balloons pages in 2M granularity. */
1214        WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1215
1216        /*
1217         * We will attempt 2M allocations. However, if we fail to
1218         * allocate 2M chunks, we will go back to 4k allocations.
1219         */
1220        alloc_unit = 512;
1221
1222        avail_pages = si_mem_available();
1223        floor = compute_balloon_floor();
1224
1225        /* Refuse to balloon below the floor, keep the 2M granularity. */
1226        if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1227                num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1228                num_pages -= num_pages % PAGES_IN_2M;
1229        }
1230
1231        while (!done) {
1232                bl_resp = (struct dm_balloon_response *)send_buffer;
1233                memset(send_buffer, 0, PAGE_SIZE);
1234                bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1235                bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1236                bl_resp->more_pages = 1;
1237
1238                num_pages -= num_ballooned;
1239                num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1240                                                    bl_resp, alloc_unit);
1241
1242                if (alloc_unit != 1 && num_ballooned == 0) {
1243                        alloc_unit = 1;
1244                        continue;
1245                }
1246
1247                if (num_ballooned == 0 || num_ballooned == num_pages) {
1248                        bl_resp->more_pages = 0;
1249                        done = true;
1250                        dm_device.state = DM_INITIALIZED;
1251                }
1252
1253                /*
1254                 * We are pushing a lot of data through the channel;
1255                 * deal with transient failures caused because of the
1256                 * lack of space in the ring buffer.
1257                 */
1258
1259                do {
1260                        bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1261                        ret = vmbus_sendpacket(dm_device.dev->channel,
1262                                                bl_resp,
1263                                                bl_resp->hdr.size,
1264                                                (unsigned long)NULL,
1265                                                VM_PKT_DATA_INBAND, 0);
1266
1267                        if (ret == -EAGAIN)
1268                                msleep(20);
1269                        post_status(&dm_device);
1270                } while (ret == -EAGAIN);
1271
1272                if (ret) {
1273                        /*
1274                         * Free up the memory we allocatted.
1275                         */
1276                        pr_info("Balloon response failed\n");
1277
1278                        for (i = 0; i < bl_resp->range_count; i++)
1279                                free_balloon_pages(&dm_device,
1280                                                 &bl_resp->range_array[i]);
1281
1282                        done = true;
1283                }
1284        }
1285
1286}
1287
1288static void balloon_down(struct hv_dynmem_device *dm,
1289                        struct dm_unballoon_request *req)
1290{
1291        union dm_mem_page_range *range_array = req->range_array;
1292        int range_count = req->range_count;
1293        struct dm_unballoon_response resp;
1294        int i;
1295
1296        for (i = 0; i < range_count; i++) {
1297                free_balloon_pages(dm, &range_array[i]);
1298                complete(&dm_device.config_event);
1299        }
1300
1301        if (req->more_pages == 1)
1302                return;
1303
1304        memset(&resp, 0, sizeof(struct dm_unballoon_response));
1305        resp.hdr.type = DM_UNBALLOON_RESPONSE;
1306        resp.hdr.trans_id = atomic_inc_return(&trans_id);
1307        resp.hdr.size = sizeof(struct dm_unballoon_response);
1308
1309        vmbus_sendpacket(dm_device.dev->channel, &resp,
1310                                sizeof(struct dm_unballoon_response),
1311                                (unsigned long)NULL,
1312                                VM_PKT_DATA_INBAND, 0);
1313
1314        dm->state = DM_INITIALIZED;
1315}
1316
1317static void balloon_onchannelcallback(void *context);
1318
1319static int dm_thread_func(void *dm_dev)
1320{
1321        struct hv_dynmem_device *dm = dm_dev;
1322
1323        while (!kthread_should_stop()) {
1324                wait_for_completion_interruptible_timeout(
1325                                                &dm_device.config_event, 1*HZ);
1326                /*
1327                 * The host expects us to post information on the memory
1328                 * pressure every second.
1329                 */
1330                reinit_completion(&dm_device.config_event);
1331                post_status(dm);
1332        }
1333
1334        return 0;
1335}
1336
1337
1338static void version_resp(struct hv_dynmem_device *dm,
1339                        struct dm_version_response *vresp)
1340{
1341        struct dm_version_request version_req;
1342        int ret;
1343
1344        if (vresp->is_accepted) {
1345                /*
1346                 * We are done; wakeup the
1347                 * context waiting for version
1348                 * negotiation.
1349                 */
1350                complete(&dm->host_event);
1351                return;
1352        }
1353        /*
1354         * If there are more versions to try, continue
1355         * with negotiations; if not
1356         * shutdown the service since we are not able
1357         * to negotiate a suitable version number
1358         * with the host.
1359         */
1360        if (dm->next_version == 0)
1361                goto version_error;
1362
1363        memset(&version_req, 0, sizeof(struct dm_version_request));
1364        version_req.hdr.type = DM_VERSION_REQUEST;
1365        version_req.hdr.size = sizeof(struct dm_version_request);
1366        version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1367        version_req.version.version = dm->next_version;
1368
1369        /*
1370         * Set the next version to try in case current version fails.
1371         * Win7 protocol ought to be the last one to try.
1372         */
1373        switch (version_req.version.version) {
1374        case DYNMEM_PROTOCOL_VERSION_WIN8:
1375                dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1376                version_req.is_last_attempt = 0;
1377                break;
1378        default:
1379                dm->next_version = 0;
1380                version_req.is_last_attempt = 1;
1381        }
1382
1383        ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1384                                sizeof(struct dm_version_request),
1385                                (unsigned long)NULL,
1386                                VM_PKT_DATA_INBAND, 0);
1387
1388        if (ret)
1389                goto version_error;
1390
1391        return;
1392
1393version_error:
1394        dm->state = DM_INIT_ERROR;
1395        complete(&dm->host_event);
1396}
1397
1398static void cap_resp(struct hv_dynmem_device *dm,
1399                        struct dm_capabilities_resp_msg *cap_resp)
1400{
1401        if (!cap_resp->is_accepted) {
1402                pr_info("Capabilities not accepted by host\n");
1403                dm->state = DM_INIT_ERROR;
1404        }
1405        complete(&dm->host_event);
1406}
1407
1408static void balloon_onchannelcallback(void *context)
1409{
1410        struct hv_device *dev = context;
1411        u32 recvlen;
1412        u64 requestid;
1413        struct dm_message *dm_msg;
1414        struct dm_header *dm_hdr;
1415        struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1416        struct dm_balloon *bal_msg;
1417        struct dm_hot_add *ha_msg;
1418        union dm_mem_page_range *ha_pg_range;
1419        union dm_mem_page_range *ha_region;
1420
1421        memset(recv_buffer, 0, sizeof(recv_buffer));
1422        vmbus_recvpacket(dev->channel, recv_buffer,
1423                         PAGE_SIZE, &recvlen, &requestid);
1424
1425        if (recvlen > 0) {
1426                dm_msg = (struct dm_message *)recv_buffer;
1427                dm_hdr = &dm_msg->hdr;
1428
1429                switch (dm_hdr->type) {
1430                case DM_VERSION_RESPONSE:
1431                        version_resp(dm,
1432                                 (struct dm_version_response *)dm_msg);
1433                        break;
1434
1435                case DM_CAPABILITIES_RESPONSE:
1436                        cap_resp(dm,
1437                                 (struct dm_capabilities_resp_msg *)dm_msg);
1438                        break;
1439
1440                case DM_BALLOON_REQUEST:
1441                        if (dm->state == DM_BALLOON_UP)
1442                                pr_warn("Currently ballooning\n");
1443                        bal_msg = (struct dm_balloon *)recv_buffer;
1444                        dm->state = DM_BALLOON_UP;
1445                        dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1446                        schedule_work(&dm_device.balloon_wrk.wrk);
1447                        break;
1448
1449                case DM_UNBALLOON_REQUEST:
1450                        dm->state = DM_BALLOON_DOWN;
1451                        balloon_down(dm,
1452                                 (struct dm_unballoon_request *)recv_buffer);
1453                        break;
1454
1455                case DM_MEM_HOT_ADD_REQUEST:
1456                        if (dm->state == DM_HOT_ADD)
1457                                pr_warn("Currently hot-adding\n");
1458                        dm->state = DM_HOT_ADD;
1459                        ha_msg = (struct dm_hot_add *)recv_buffer;
1460                        if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1461                                /*
1462                                 * This is a normal hot-add request specifying
1463                                 * hot-add memory.
1464                                 */
1465                                dm->host_specified_ha_region = false;
1466                                ha_pg_range = &ha_msg->range;
1467                                dm->ha_wrk.ha_page_range = *ha_pg_range;
1468                                dm->ha_wrk.ha_region_range.page_range = 0;
1469                        } else {
1470                                /*
1471                                 * Host is specifying that we first hot-add
1472                                 * a region and then partially populate this
1473                                 * region.
1474                                 */
1475                                dm->host_specified_ha_region = true;
1476                                ha_pg_range = &ha_msg->range;
1477                                ha_region = &ha_pg_range[1];
1478                                dm->ha_wrk.ha_page_range = *ha_pg_range;
1479                                dm->ha_wrk.ha_region_range = *ha_region;
1480                        }
1481                        schedule_work(&dm_device.ha_wrk.wrk);
1482                        break;
1483
1484                case DM_INFO_MESSAGE:
1485                        process_info(dm, (struct dm_info_msg *)dm_msg);
1486                        break;
1487
1488                default:
1489                        pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1490
1491                }
1492        }
1493
1494}
1495
1496static int balloon_probe(struct hv_device *dev,
1497                        const struct hv_vmbus_device_id *dev_id)
1498{
1499        int ret;
1500        unsigned long t;
1501        struct dm_version_request version_req;
1502        struct dm_capabilities cap_msg;
1503
1504        do_hot_add = hot_add;
1505
1506        /*
1507         * First allocate a send buffer.
1508         */
1509
1510        send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1511        if (!send_buffer)
1512                return -ENOMEM;
1513
1514        ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1515                        balloon_onchannelcallback, dev);
1516
1517        if (ret)
1518                goto probe_error0;
1519
1520        dm_device.dev = dev;
1521        dm_device.state = DM_INITIALIZING;
1522        dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1523        init_completion(&dm_device.host_event);
1524        init_completion(&dm_device.config_event);
1525        INIT_LIST_HEAD(&dm_device.ha_region_list);
1526        spin_lock_init(&dm_device.ha_lock);
1527        INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1528        INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1529        dm_device.host_specified_ha_region = false;
1530
1531        dm_device.thread =
1532                 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1533        if (IS_ERR(dm_device.thread)) {
1534                ret = PTR_ERR(dm_device.thread);
1535                goto probe_error1;
1536        }
1537
1538#ifdef CONFIG_MEMORY_HOTPLUG
1539        set_online_page_callback(&hv_online_page);
1540        register_memory_notifier(&hv_memory_nb);
1541#endif
1542
1543        hv_set_drvdata(dev, &dm_device);
1544        /*
1545         * Initiate the hand shake with the host and negotiate
1546         * a version that the host can support. We start with the
1547         * highest version number and go down if the host cannot
1548         * support it.
1549         */
1550        memset(&version_req, 0, sizeof(struct dm_version_request));
1551        version_req.hdr.type = DM_VERSION_REQUEST;
1552        version_req.hdr.size = sizeof(struct dm_version_request);
1553        version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1554        version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1555        version_req.is_last_attempt = 0;
1556
1557        ret = vmbus_sendpacket(dev->channel, &version_req,
1558                                sizeof(struct dm_version_request),
1559                                (unsigned long)NULL,
1560                                VM_PKT_DATA_INBAND, 0);
1561        if (ret)
1562                goto probe_error2;
1563
1564        t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1565        if (t == 0) {
1566                ret = -ETIMEDOUT;
1567                goto probe_error2;
1568        }
1569
1570        /*
1571         * If we could not negotiate a compatible version with the host
1572         * fail the probe function.
1573         */
1574        if (dm_device.state == DM_INIT_ERROR) {
1575                ret = -ETIMEDOUT;
1576                goto probe_error2;
1577        }
1578        /*
1579         * Now submit our capabilities to the host.
1580         */
1581        memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1582        cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1583        cap_msg.hdr.size = sizeof(struct dm_capabilities);
1584        cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1585
1586        cap_msg.caps.cap_bits.balloon = 1;
1587        cap_msg.caps.cap_bits.hot_add = 1;
1588
1589        /*
1590         * Specify our alignment requirements as it relates
1591         * memory hot-add. Specify 128MB alignment.
1592         */
1593        cap_msg.caps.cap_bits.hot_add_alignment = 7;
1594
1595        /*
1596         * Currently the host does not use these
1597         * values and we set them to what is done in the
1598         * Windows driver.
1599         */
1600        cap_msg.min_page_cnt = 0;
1601        cap_msg.max_page_number = -1;
1602
1603        ret = vmbus_sendpacket(dev->channel, &cap_msg,
1604                                sizeof(struct dm_capabilities),
1605                                (unsigned long)NULL,
1606                                VM_PKT_DATA_INBAND, 0);
1607        if (ret)
1608                goto probe_error2;
1609
1610        t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1611        if (t == 0) {
1612                ret = -ETIMEDOUT;
1613                goto probe_error2;
1614        }
1615
1616        /*
1617         * If the host does not like our capabilities,
1618         * fail the probe function.
1619         */
1620        if (dm_device.state == DM_INIT_ERROR) {
1621                ret = -ETIMEDOUT;
1622                goto probe_error2;
1623        }
1624
1625        dm_device.state = DM_INITIALIZED;
1626
1627        return 0;
1628
1629probe_error2:
1630#ifdef CONFIG_MEMORY_HOTPLUG
1631        restore_online_page_callback(&hv_online_page);
1632#endif
1633        kthread_stop(dm_device.thread);
1634
1635probe_error1:
1636        vmbus_close(dev->channel);
1637probe_error0:
1638        kfree(send_buffer);
1639        return ret;
1640}
1641
1642static int balloon_remove(struct hv_device *dev)
1643{
1644        struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1645        struct hv_hotadd_state *has, *tmp;
1646        struct hv_hotadd_gap *gap, *tmp_gap;
1647        unsigned long flags;
1648
1649        if (dm->num_pages_ballooned != 0)
1650                pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1651
1652        cancel_work_sync(&dm->balloon_wrk.wrk);
1653        cancel_work_sync(&dm->ha_wrk.wrk);
1654
1655        vmbus_close(dev->channel);
1656        kthread_stop(dm->thread);
1657        kfree(send_buffer);
1658#ifdef CONFIG_MEMORY_HOTPLUG
1659        restore_online_page_callback(&hv_online_page);
1660        unregister_memory_notifier(&hv_memory_nb);
1661#endif
1662        spin_lock_irqsave(&dm_device.ha_lock, flags);
1663        list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1664                list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1665                        list_del(&gap->list);
1666                        kfree(gap);
1667                }
1668                list_del(&has->list);
1669                kfree(has);
1670        }
1671        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1672
1673        return 0;
1674}
1675
1676static const struct hv_vmbus_device_id id_table[] = {
1677        /* Dynamic Memory Class ID */
1678        /* 525074DC-8985-46e2-8057-A307DC18A502 */
1679        { HV_DM_GUID, },
1680        { },
1681};
1682
1683MODULE_DEVICE_TABLE(vmbus, id_table);
1684
1685static  struct hv_driver balloon_drv = {
1686        .name = "hv_balloon",
1687        .id_table = id_table,
1688        .probe =  balloon_probe,
1689        .remove =  balloon_remove,
1690};
1691
1692static int __init init_balloon_drv(void)
1693{
1694
1695        return vmbus_driver_register(&balloon_drv);
1696}
1697
1698module_init(init_balloon_drv);
1699
1700MODULE_DESCRIPTION("Hyper-V Balloon");
1701MODULE_LICENSE("GPL");
1702