linux/drivers/hv/hv_balloon.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012, Microsoft Corporation.
   3 *
   4 * Author:
   5 *   K. Y. Srinivasan <kys@microsoft.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms of the GNU General Public License version 2 as published
   9 * by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  14 * NON INFRINGEMENT.  See the GNU General Public License for more
  15 * details.
  16 *
  17 */
  18
  19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  20
  21#include <linux/kernel.h>
  22#include <linux/jiffies.h>
  23#include <linux/mman.h>
  24#include <linux/delay.h>
  25#include <linux/init.h>
  26#include <linux/module.h>
  27#include <linux/slab.h>
  28#include <linux/kthread.h>
  29#include <linux/completion.h>
  30#include <linux/memory_hotplug.h>
  31#include <linux/memory.h>
  32#include <linux/notifier.h>
  33#include <linux/percpu_counter.h>
  34
  35#include <linux/hyperv.h>
  36
  37/*
  38 * We begin with definitions supporting the Dynamic Memory protocol
  39 * with the host.
  40 *
  41 * Begin protocol definitions.
  42 */
  43
  44
  45
  46/*
  47 * Protocol versions. The low word is the minor version, the high word the major
  48 * version.
  49 *
  50 * History:
  51 * Initial version 1.0
  52 * Changed to 0.1 on 2009/03/25
  53 * Changes to 0.2 on 2009/05/14
  54 * Changes to 0.3 on 2009/12/03
  55 * Changed to 1.0 on 2011/04/05
  56 */
  57
  58#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
  59#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
  60#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
  61
  62enum {
  63        DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
  64        DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
  65        DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
  66
  67        DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
  68        DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
  69        DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
  70
  71        DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
  72};
  73
  74
  75
  76/*
  77 * Message Types
  78 */
  79
  80enum dm_message_type {
  81        /*
  82         * Version 0.3
  83         */
  84        DM_ERROR                        = 0,
  85        DM_VERSION_REQUEST              = 1,
  86        DM_VERSION_RESPONSE             = 2,
  87        DM_CAPABILITIES_REPORT          = 3,
  88        DM_CAPABILITIES_RESPONSE        = 4,
  89        DM_STATUS_REPORT                = 5,
  90        DM_BALLOON_REQUEST              = 6,
  91        DM_BALLOON_RESPONSE             = 7,
  92        DM_UNBALLOON_REQUEST            = 8,
  93        DM_UNBALLOON_RESPONSE           = 9,
  94        DM_MEM_HOT_ADD_REQUEST          = 10,
  95        DM_MEM_HOT_ADD_RESPONSE         = 11,
  96        DM_VERSION_03_MAX               = 11,
  97        /*
  98         * Version 1.0.
  99         */
 100        DM_INFO_MESSAGE                 = 12,
 101        DM_VERSION_1_MAX                = 12
 102};
 103
 104
 105/*
 106 * Structures defining the dynamic memory management
 107 * protocol.
 108 */
 109
 110union dm_version {
 111        struct {
 112                __u16 minor_version;
 113                __u16 major_version;
 114        };
 115        __u32 version;
 116} __packed;
 117
 118
 119union dm_caps {
 120        struct {
 121                __u64 balloon:1;
 122                __u64 hot_add:1;
 123                /*
 124                 * To support guests that may have alignment
 125                 * limitations on hot-add, the guest can specify
 126                 * its alignment requirements; a value of n
 127                 * represents an alignment of 2^n in mega bytes.
 128                 */
 129                __u64 hot_add_alignment:4;
 130                __u64 reservedz:58;
 131        } cap_bits;
 132        __u64 caps;
 133} __packed;
 134
 135union dm_mem_page_range {
 136        struct  {
 137                /*
 138                 * The PFN number of the first page in the range.
 139                 * 40 bits is the architectural limit of a PFN
 140                 * number for AMD64.
 141                 */
 142                __u64 start_page:40;
 143                /*
 144                 * The number of pages in the range.
 145                 */
 146                __u64 page_cnt:24;
 147        } finfo;
 148        __u64  page_range;
 149} __packed;
 150
 151
 152
 153/*
 154 * The header for all dynamic memory messages:
 155 *
 156 * type: Type of the message.
 157 * size: Size of the message in bytes; including the header.
 158 * trans_id: The guest is responsible for manufacturing this ID.
 159 */
 160
 161struct dm_header {
 162        __u16 type;
 163        __u16 size;
 164        __u32 trans_id;
 165} __packed;
 166
 167/*
 168 * A generic message format for dynamic memory.
 169 * Specific message formats are defined later in the file.
 170 */
 171
 172struct dm_message {
 173        struct dm_header hdr;
 174        __u8 data[]; /* enclosed message */
 175} __packed;
 176
 177
 178/*
 179 * Specific message types supporting the dynamic memory protocol.
 180 */
 181
 182/*
 183 * Version negotiation message. Sent from the guest to the host.
 184 * The guest is free to try different versions until the host
 185 * accepts the version.
 186 *
 187 * dm_version: The protocol version requested.
 188 * is_last_attempt: If TRUE, this is the last version guest will request.
 189 * reservedz: Reserved field, set to zero.
 190 */
 191
 192struct dm_version_request {
 193        struct dm_header hdr;
 194        union dm_version version;
 195        __u32 is_last_attempt:1;
 196        __u32 reservedz:31;
 197} __packed;
 198
 199/*
 200 * Version response message; Host to Guest and indicates
 201 * if the host has accepted the version sent by the guest.
 202 *
 203 * is_accepted: If TRUE, host has accepted the version and the guest
 204 * should proceed to the next stage of the protocol. FALSE indicates that
 205 * guest should re-try with a different version.
 206 *
 207 * reservedz: Reserved field, set to zero.
 208 */
 209
 210struct dm_version_response {
 211        struct dm_header hdr;
 212        __u64 is_accepted:1;
 213        __u64 reservedz:63;
 214} __packed;
 215
 216/*
 217 * Message reporting capabilities. This is sent from the guest to the
 218 * host.
 219 */
 220
 221struct dm_capabilities {
 222        struct dm_header hdr;
 223        union dm_caps caps;
 224        __u64 min_page_cnt;
 225        __u64 max_page_number;
 226} __packed;
 227
 228/*
 229 * Response to the capabilities message. This is sent from the host to the
 230 * guest. This message notifies if the host has accepted the guest's
 231 * capabilities. If the host has not accepted, the guest must shutdown
 232 * the service.
 233 *
 234 * is_accepted: Indicates if the host has accepted guest's capabilities.
 235 * reservedz: Must be 0.
 236 */
 237
 238struct dm_capabilities_resp_msg {
 239        struct dm_header hdr;
 240        __u64 is_accepted:1;
 241        __u64 reservedz:63;
 242} __packed;
 243
 244/*
 245 * This message is used to report memory pressure from the guest.
 246 * This message is not part of any transaction and there is no
 247 * response to this message.
 248 *
 249 * num_avail: Available memory in pages.
 250 * num_committed: Committed memory in pages.
 251 * page_file_size: The accumulated size of all page files
 252 *                 in the system in pages.
 253 * zero_free: The nunber of zero and free pages.
 254 * page_file_writes: The writes to the page file in pages.
 255 * io_diff: An indicator of file cache efficiency or page file activity,
 256 *          calculated as File Cache Page Fault Count - Page Read Count.
 257 *          This value is in pages.
 258 *
 259 * Some of these metrics are Windows specific and fortunately
 260 * the algorithm on the host side that computes the guest memory
 261 * pressure only uses num_committed value.
 262 */
 263
 264struct dm_status {
 265        struct dm_header hdr;
 266        __u64 num_avail;
 267        __u64 num_committed;
 268        __u64 page_file_size;
 269        __u64 zero_free;
 270        __u32 page_file_writes;
 271        __u32 io_diff;
 272} __packed;
 273
 274
 275/*
 276 * Message to ask the guest to allocate memory - balloon up message.
 277 * This message is sent from the host to the guest. The guest may not be
 278 * able to allocate as much memory as requested.
 279 *
 280 * num_pages: number of pages to allocate.
 281 */
 282
 283struct dm_balloon {
 284        struct dm_header hdr;
 285        __u32 num_pages;
 286        __u32 reservedz;
 287} __packed;
 288
 289
 290/*
 291 * Balloon response message; this message is sent from the guest
 292 * to the host in response to the balloon message.
 293 *
 294 * reservedz: Reserved; must be set to zero.
 295 * more_pages: If FALSE, this is the last message of the transaction.
 296 * if TRUE there will atleast one more message from the guest.
 297 *
 298 * range_count: The number of ranges in the range array.
 299 *
 300 * range_array: An array of page ranges returned to the host.
 301 *
 302 */
 303
 304struct dm_balloon_response {
 305        struct dm_header hdr;
 306        __u32 reservedz;
 307        __u32 more_pages:1;
 308        __u32 range_count:31;
 309        union dm_mem_page_range range_array[];
 310} __packed;
 311
 312/*
 313 * Un-balloon message; this message is sent from the host
 314 * to the guest to give guest more memory.
 315 *
 316 * more_pages: If FALSE, this is the last message of the transaction.
 317 * if TRUE there will atleast one more message from the guest.
 318 *
 319 * reservedz: Reserved; must be set to zero.
 320 *
 321 * range_count: The number of ranges in the range array.
 322 *
 323 * range_array: An array of page ranges returned to the host.
 324 *
 325 */
 326
 327struct dm_unballoon_request {
 328        struct dm_header hdr;
 329        __u32 more_pages:1;
 330        __u32 reservedz:31;
 331        __u32 range_count;
 332        union dm_mem_page_range range_array[];
 333} __packed;
 334
 335/*
 336 * Un-balloon response message; this message is sent from the guest
 337 * to the host in response to an unballoon request.
 338 *
 339 */
 340
 341struct dm_unballoon_response {
 342        struct dm_header hdr;
 343} __packed;
 344
 345
 346/*
 347 * Hot add request message. Message sent from the host to the guest.
 348 *
 349 * mem_range: Memory range to hot add.
 350 *
 351 * On Linux we currently don't support this since we cannot hot add
 352 * arbitrary granularity of memory.
 353 */
 354
 355struct dm_hot_add {
 356        struct dm_header hdr;
 357        union dm_mem_page_range range;
 358} __packed;
 359
 360/*
 361 * Hot add response message.
 362 * This message is sent by the guest to report the status of a hot add request.
 363 * If page_count is less than the requested page count, then the host should
 364 * assume all further hot add requests will fail, since this indicates that
 365 * the guest has hit an upper physical memory barrier.
 366 *
 367 * Hot adds may also fail due to low resources; in this case, the guest must
 368 * not complete this message until the hot add can succeed, and the host must
 369 * not send a new hot add request until the response is sent.
 370 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
 371 * times it fails the request.
 372 *
 373 *
 374 * page_count: number of pages that were successfully hot added.
 375 *
 376 * result: result of the operation 1: success, 0: failure.
 377 *
 378 */
 379
 380struct dm_hot_add_response {
 381        struct dm_header hdr;
 382        __u32 page_count;
 383        __u32 result;
 384} __packed;
 385
 386/*
 387 * Types of information sent from host to the guest.
 388 */
 389
 390enum dm_info_type {
 391        INFO_TYPE_MAX_PAGE_CNT = 0,
 392        MAX_INFO_TYPE
 393};
 394
 395
 396/*
 397 * Header for the information message.
 398 */
 399
 400struct dm_info_header {
 401        enum dm_info_type type;
 402        __u32 data_size;
 403} __packed;
 404
 405/*
 406 * This message is sent from the host to the guest to pass
 407 * some relevant information (win8 addition).
 408 *
 409 * reserved: no used.
 410 * info_size: size of the information blob.
 411 * info: information blob.
 412 */
 413
 414struct dm_info_msg {
 415        struct dm_header hdr;
 416        __u32 reserved;
 417        __u32 info_size;
 418        __u8  info[];
 419};
 420
 421/*
 422 * End protocol definitions.
 423 */
 424
 425/*
 426 * State to manage hot adding memory into the guest.
 427 * The range start_pfn : end_pfn specifies the range
 428 * that the host has asked us to hot add. The range
 429 * start_pfn : ha_end_pfn specifies the range that we have
 430 * currently hot added. We hot add in multiples of 128M
 431 * chunks; it is possible that we may not be able to bring
 432 * online all the pages in the region. The range
 433 * covered_start_pfn:covered_end_pfn defines the pages that can
 434 * be brough online.
 435 */
 436
 437struct hv_hotadd_state {
 438        struct list_head list;
 439        unsigned long start_pfn;
 440        unsigned long covered_start_pfn;
 441        unsigned long covered_end_pfn;
 442        unsigned long ha_end_pfn;
 443        unsigned long end_pfn;
 444        /*
 445         * A list of gaps.
 446         */
 447        struct list_head gap_list;
 448};
 449
 450struct hv_hotadd_gap {
 451        struct list_head list;
 452        unsigned long start_pfn;
 453        unsigned long end_pfn;
 454};
 455
 456struct balloon_state {
 457        __u32 num_pages;
 458        struct work_struct wrk;
 459};
 460
 461struct hot_add_wrk {
 462        union dm_mem_page_range ha_page_range;
 463        union dm_mem_page_range ha_region_range;
 464        struct work_struct wrk;
 465};
 466
 467static bool hot_add = true;
 468static bool do_hot_add;
 469/*
 470 * Delay reporting memory pressure by
 471 * the specified number of seconds.
 472 */
 473static uint pressure_report_delay = 45;
 474
 475/*
 476 * The last time we posted a pressure report to host.
 477 */
 478static unsigned long last_post_time;
 479
 480module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
 481MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
 482
 483module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
 484MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
 485static atomic_t trans_id = ATOMIC_INIT(0);
 486
 487static int dm_ring_size = (5 * PAGE_SIZE);
 488
 489/*
 490 * Driver specific state.
 491 */
 492
 493enum hv_dm_state {
 494        DM_INITIALIZING = 0,
 495        DM_INITIALIZED,
 496        DM_BALLOON_UP,
 497        DM_BALLOON_DOWN,
 498        DM_HOT_ADD,
 499        DM_INIT_ERROR
 500};
 501
 502
 503static __u8 recv_buffer[PAGE_SIZE];
 504static __u8 *send_buffer;
 505#define PAGES_IN_2M     512
 506#define HA_CHUNK (32 * 1024)
 507
 508struct hv_dynmem_device {
 509        struct hv_device *dev;
 510        enum hv_dm_state state;
 511        struct completion host_event;
 512        struct completion config_event;
 513
 514        /*
 515         * Number of pages we have currently ballooned out.
 516         */
 517        unsigned int num_pages_ballooned;
 518        unsigned int num_pages_onlined;
 519        unsigned int num_pages_added;
 520
 521        /*
 522         * State to manage the ballooning (up) operation.
 523         */
 524        struct balloon_state balloon_wrk;
 525
 526        /*
 527         * State to execute the "hot-add" operation.
 528         */
 529        struct hot_add_wrk ha_wrk;
 530
 531        /*
 532         * This state tracks if the host has specified a hot-add
 533         * region.
 534         */
 535        bool host_specified_ha_region;
 536
 537        /*
 538         * State to synchronize hot-add.
 539         */
 540        struct completion  ol_waitevent;
 541        bool ha_waiting;
 542        /*
 543         * This thread handles hot-add
 544         * requests from the host as well as notifying
 545         * the host with regards to memory pressure in
 546         * the guest.
 547         */
 548        struct task_struct *thread;
 549
 550        /*
 551         * Protects ha_region_list, num_pages_onlined counter and individual
 552         * regions from ha_region_list.
 553         */
 554        spinlock_t ha_lock;
 555
 556        /*
 557         * A list of hot-add regions.
 558         */
 559        struct list_head ha_region_list;
 560
 561        /*
 562         * We start with the highest version we can support
 563         * and downgrade based on the host; we save here the
 564         * next version to try.
 565         */
 566        __u32 next_version;
 567
 568        /*
 569         * The negotiated version agreed by host.
 570         */
 571        __u32 version;
 572};
 573
 574static struct hv_dynmem_device dm_device;
 575
 576static void post_status(struct hv_dynmem_device *dm);
 577
 578#ifdef CONFIG_MEMORY_HOTPLUG
 579static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
 580                              void *v)
 581{
 582        struct memory_notify *mem = (struct memory_notify *)v;
 583        unsigned long flags;
 584
 585        switch (val) {
 586        case MEM_ONLINE:
 587                spin_lock_irqsave(&dm_device.ha_lock, flags);
 588                dm_device.num_pages_onlined += mem->nr_pages;
 589                spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 590        case MEM_CANCEL_ONLINE:
 591                if (dm_device.ha_waiting) {
 592                        dm_device.ha_waiting = false;
 593                        complete(&dm_device.ol_waitevent);
 594                }
 595                break;
 596
 597        case MEM_OFFLINE:
 598                spin_lock_irqsave(&dm_device.ha_lock, flags);
 599                dm_device.num_pages_onlined -= mem->nr_pages;
 600                spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 601                break;
 602        case MEM_GOING_ONLINE:
 603        case MEM_GOING_OFFLINE:
 604        case MEM_CANCEL_OFFLINE:
 605                break;
 606        }
 607        return NOTIFY_OK;
 608}
 609
 610static struct notifier_block hv_memory_nb = {
 611        .notifier_call = hv_memory_notifier,
 612        .priority = 0
 613};
 614
 615/* Check if the particular page is backed and can be onlined and online it. */
 616static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
 617{
 618        unsigned long cur_start_pgp;
 619        unsigned long cur_end_pgp;
 620        struct hv_hotadd_gap *gap;
 621
 622        cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
 623        cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
 624
 625        /* The page is not backed. */
 626        if (((unsigned long)pg < cur_start_pgp) ||
 627            ((unsigned long)pg >= cur_end_pgp))
 628                return;
 629
 630        /* Check for gaps. */
 631        list_for_each_entry(gap, &has->gap_list, list) {
 632                cur_start_pgp = (unsigned long)
 633                        pfn_to_page(gap->start_pfn);
 634                cur_end_pgp = (unsigned long)
 635                        pfn_to_page(gap->end_pfn);
 636                if (((unsigned long)pg >= cur_start_pgp) &&
 637                    ((unsigned long)pg < cur_end_pgp)) {
 638                        return;
 639                }
 640        }
 641
 642        /* This frame is currently backed; online the page. */
 643        __online_page_set_limits(pg);
 644        __online_page_increment_counters(pg);
 645        __online_page_free(pg);
 646}
 647
 648static void hv_bring_pgs_online(struct hv_hotadd_state *has,
 649                                unsigned long start_pfn, unsigned long size)
 650{
 651        int i;
 652
 653        pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
 654        for (i = 0; i < size; i++)
 655                hv_page_online_one(has, pfn_to_page(start_pfn + i));
 656}
 657
 658static void hv_mem_hot_add(unsigned long start, unsigned long size,
 659                                unsigned long pfn_count,
 660                                struct hv_hotadd_state *has)
 661{
 662        int ret = 0;
 663        int i, nid;
 664        unsigned long start_pfn;
 665        unsigned long processed_pfn;
 666        unsigned long total_pfn = pfn_count;
 667        unsigned long flags;
 668
 669        for (i = 0; i < (size/HA_CHUNK); i++) {
 670                start_pfn = start + (i * HA_CHUNK);
 671
 672                spin_lock_irqsave(&dm_device.ha_lock, flags);
 673                has->ha_end_pfn +=  HA_CHUNK;
 674
 675                if (total_pfn > HA_CHUNK) {
 676                        processed_pfn = HA_CHUNK;
 677                        total_pfn -= HA_CHUNK;
 678                } else {
 679                        processed_pfn = total_pfn;
 680                        total_pfn = 0;
 681                }
 682
 683                has->covered_end_pfn +=  processed_pfn;
 684                spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 685
 686                init_completion(&dm_device.ol_waitevent);
 687                dm_device.ha_waiting = true; /* memhp_auto_online is missing in RHEL */
 688
 689                nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
 690                ret = add_memory(nid, PFN_PHYS((start_pfn)),
 691                                (HA_CHUNK << PAGE_SHIFT));
 692
 693                if (ret) {
 694                        pr_warn("hot_add memory failed error is %d\n", ret);
 695                        if (ret == -EEXIST) {
 696                                /*
 697                                 * This error indicates that the error
 698                                 * is not a transient failure. This is the
 699                                 * case where the guest's physical address map
 700                                 * precludes hot adding memory. Stop all further
 701                                 * memory hot-add.
 702                                 */
 703                                do_hot_add = false;
 704                        }
 705                        spin_lock_irqsave(&dm_device.ha_lock, flags);
 706                        has->ha_end_pfn -= HA_CHUNK;
 707                        has->covered_end_pfn -=  processed_pfn;
 708                        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 709                        break;
 710                }
 711
 712                /*
 713                 * Wait for the memory block to be onlined when memory onlining
 714                 * is done outside of kernel (memhp_auto_online). Since the hot
 715                 * add has succeeded, it is ok to proceed even if the pages in
 716                 * the hot added region have not been "onlined" within the
 717                 * allowed time.
 718                 */
 719                if (dm_device.ha_waiting)
 720                        wait_for_completion_timeout(&dm_device.ol_waitevent,
 721                                                    5*HZ);
 722                post_status(&dm_device);
 723        }
 724
 725        return;
 726}
 727
 728static void hv_online_page(struct page *pg)
 729{
 730        struct hv_hotadd_state *has;
 731        unsigned long cur_start_pgp;
 732        unsigned long cur_end_pgp;
 733        unsigned long flags;
 734
 735        spin_lock_irqsave(&dm_device.ha_lock, flags);
 736        list_for_each_entry(has, &dm_device.ha_region_list, list) {
 737                cur_start_pgp = (unsigned long)
 738                        pfn_to_page(has->start_pfn);
 739                cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
 740
 741                /* The page belongs to a different HAS. */
 742                if (((unsigned long)pg < cur_start_pgp) ||
 743                    ((unsigned long)pg >= cur_end_pgp))
 744                        continue;
 745
 746                hv_page_online_one(has, pg);
 747                break;
 748        }
 749        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 750}
 751
 752static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
 753{
 754        struct hv_hotadd_state *has;
 755        struct hv_hotadd_gap *gap;
 756        unsigned long residual, new_inc;
 757        int ret = 0;
 758        unsigned long flags;
 759
 760        spin_lock_irqsave(&dm_device.ha_lock, flags);
 761        list_for_each_entry(has, &dm_device.ha_region_list, list) {
 762                /*
 763                 * If the pfn range we are dealing with is not in the current
 764                 * "hot add block", move on.
 765                 */
 766                if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
 767                        continue;
 768
 769                /*
 770                 * If the current start pfn is not where the covered_end
 771                 * is, create a gap and update covered_end_pfn.
 772                 */
 773                if (has->covered_end_pfn != start_pfn) {
 774                        gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
 775                        if (!gap) {
 776                                ret = -ENOMEM;
 777                                break;
 778                        }
 779
 780                        INIT_LIST_HEAD(&gap->list);
 781                        gap->start_pfn = has->covered_end_pfn;
 782                        gap->end_pfn = start_pfn;
 783                        list_add_tail(&gap->list, &has->gap_list);
 784
 785                        has->covered_end_pfn = start_pfn;
 786                }
 787
 788                /*
 789                 * If the current hot add-request extends beyond
 790                 * our current limit; extend it.
 791                 */
 792                if ((start_pfn + pfn_cnt) > has->end_pfn) {
 793                        residual = (start_pfn + pfn_cnt - has->end_pfn);
 794                        /*
 795                         * Extend the region by multiples of HA_CHUNK.
 796                         */
 797                        new_inc = (residual / HA_CHUNK) * HA_CHUNK;
 798                        if (residual % HA_CHUNK)
 799                                new_inc += HA_CHUNK;
 800
 801                        has->end_pfn += new_inc;
 802                }
 803
 804                ret = 1;
 805                break;
 806        }
 807        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 808
 809        return ret;
 810}
 811
 812static unsigned long handle_pg_range(unsigned long pg_start,
 813                                        unsigned long pg_count)
 814{
 815        unsigned long start_pfn = pg_start;
 816        unsigned long pfn_cnt = pg_count;
 817        unsigned long size;
 818        struct hv_hotadd_state *has;
 819        unsigned long pgs_ol = 0;
 820        unsigned long old_covered_state;
 821        unsigned long res = 0, flags;
 822
 823        pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
 824                pg_start);
 825
 826        spin_lock_irqsave(&dm_device.ha_lock, flags);
 827        list_for_each_entry(has, &dm_device.ha_region_list, list) {
 828                /*
 829                 * If the pfn range we are dealing with is not in the current
 830                 * "hot add block", move on.
 831                 */
 832                if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
 833                        continue;
 834
 835                old_covered_state = has->covered_end_pfn;
 836
 837                if (start_pfn < has->ha_end_pfn) {
 838                        /*
 839                         * This is the case where we are backing pages
 840                         * in an already hot added region. Bring
 841                         * these pages online first.
 842                         */
 843                        pgs_ol = has->ha_end_pfn - start_pfn;
 844                        if (pgs_ol > pfn_cnt)
 845                                pgs_ol = pfn_cnt;
 846
 847                        has->covered_end_pfn +=  pgs_ol;
 848                        pfn_cnt -= pgs_ol;
 849                        /*
 850                         * Check if the corresponding memory block is already
 851                         * online by checking its last previously backed page.
 852                         * In case it is we need to bring rest (which was not
 853                         * backed previously) online too.
 854                         */
 855                        if (start_pfn > has->start_pfn &&
 856                            !PageReserved(pfn_to_page(start_pfn - 1)))
 857                                hv_bring_pgs_online(has, start_pfn, pgs_ol);
 858
 859                }
 860
 861                if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
 862                        /*
 863                         * We have some residual hot add range
 864                         * that needs to be hot added; hot add
 865                         * it now. Hot add a multiple of
 866                         * of HA_CHUNK that fully covers the pages
 867                         * we have.
 868                         */
 869                        size = (has->end_pfn - has->ha_end_pfn);
 870                        if (pfn_cnt <= size) {
 871                                size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
 872                                if (pfn_cnt % HA_CHUNK)
 873                                        size += HA_CHUNK;
 874                        } else {
 875                                pfn_cnt = size;
 876                        }
 877                        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 878                        hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
 879                        spin_lock_irqsave(&dm_device.ha_lock, flags);
 880                }
 881                /*
 882                 * If we managed to online any pages that were given to us,
 883                 * we declare success.
 884                 */
 885                res = has->covered_end_pfn - old_covered_state;
 886                break;
 887        }
 888        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 889
 890        return res;
 891}
 892
 893static unsigned long process_hot_add(unsigned long pg_start,
 894                                        unsigned long pfn_cnt,
 895                                        unsigned long rg_start,
 896                                        unsigned long rg_size)
 897{
 898        struct hv_hotadd_state *ha_region = NULL;
 899        int covered;
 900        unsigned long flags;
 901
 902        if (pfn_cnt == 0)
 903                return 0;
 904
 905        if (!dm_device.host_specified_ha_region) {
 906                covered = pfn_covered(pg_start, pfn_cnt);
 907                if (covered < 0)
 908                        return 0;
 909
 910                if (covered)
 911                        goto do_pg_range;
 912        }
 913
 914        /*
 915         * If the host has specified a hot-add range; deal with it first.
 916         */
 917
 918        if (rg_size != 0) {
 919                ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
 920                if (!ha_region)
 921                        return 0;
 922
 923                INIT_LIST_HEAD(&ha_region->list);
 924                INIT_LIST_HEAD(&ha_region->gap_list);
 925
 926                ha_region->start_pfn = rg_start;
 927                ha_region->ha_end_pfn = rg_start;
 928                ha_region->covered_start_pfn = pg_start;
 929                ha_region->covered_end_pfn = pg_start;
 930                ha_region->end_pfn = rg_start + rg_size;
 931
 932                spin_lock_irqsave(&dm_device.ha_lock, flags);
 933                list_add_tail(&ha_region->list, &dm_device.ha_region_list);
 934                spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 935        }
 936
 937do_pg_range:
 938        /*
 939         * Process the page range specified; bringing them
 940         * online if possible.
 941         */
 942        return handle_pg_range(pg_start, pfn_cnt);
 943}
 944
 945#endif
 946
 947static void hot_add_req(struct work_struct *dummy)
 948{
 949        struct dm_hot_add_response resp;
 950#ifdef CONFIG_MEMORY_HOTPLUG
 951        unsigned long pg_start, pfn_cnt;
 952        unsigned long rg_start, rg_sz;
 953#endif
 954        struct hv_dynmem_device *dm = &dm_device;
 955
 956        memset(&resp, 0, sizeof(struct dm_hot_add_response));
 957        resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
 958        resp.hdr.size = sizeof(struct dm_hot_add_response);
 959
 960#ifdef CONFIG_MEMORY_HOTPLUG
 961        pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
 962        pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
 963
 964        rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
 965        rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
 966
 967        if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
 968                unsigned long region_size;
 969                unsigned long region_start;
 970
 971                /*
 972                 * The host has not specified the hot-add region.
 973                 * Based on the hot-add page range being specified,
 974                 * compute a hot-add region that can cover the pages
 975                 * that need to be hot-added while ensuring the alignment
 976                 * and size requirements of Linux as it relates to hot-add.
 977                 */
 978                region_start = pg_start;
 979                region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
 980                if (pfn_cnt % HA_CHUNK)
 981                        region_size += HA_CHUNK;
 982
 983                region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
 984
 985                rg_start = region_start;
 986                rg_sz = region_size;
 987        }
 988
 989        if (do_hot_add)
 990                resp.page_count = process_hot_add(pg_start, pfn_cnt,
 991                                                rg_start, rg_sz);
 992
 993        dm->num_pages_added += resp.page_count;
 994#endif
 995        /*
 996         * The result field of the response structure has the
 997         * following semantics:
 998         *
 999         * 1. If all or some pages hot-added: Guest should return success.
1000         *
1001         * 2. If no pages could be hot-added:
1002         *
1003         * If the guest returns success, then the host
1004         * will not attempt any further hot-add operations. This
1005         * signifies a permanent failure.
1006         *
1007         * If the guest returns failure, then this failure will be
1008         * treated as a transient failure and the host may retry the
1009         * hot-add operation after some delay.
1010         */
1011        if (resp.page_count > 0)
1012                resp.result = 1;
1013        else if (!do_hot_add)
1014                resp.result = 1;
1015        else
1016                resp.result = 0;
1017
1018        if (!do_hot_add || (resp.page_count == 0))
1019                pr_info("Memory hot add failed\n");
1020
1021        dm->state = DM_INITIALIZED;
1022        resp.hdr.trans_id = atomic_inc_return(&trans_id);
1023        vmbus_sendpacket(dm->dev->channel, &resp,
1024                        sizeof(struct dm_hot_add_response),
1025                        (unsigned long)NULL,
1026                        VM_PKT_DATA_INBAND, 0);
1027}
1028
1029static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1030{
1031        struct dm_info_header *info_hdr;
1032
1033        info_hdr = (struct dm_info_header *)msg->info;
1034
1035        switch (info_hdr->type) {
1036        case INFO_TYPE_MAX_PAGE_CNT:
1037                if (info_hdr->data_size == sizeof(__u64)) {
1038                        __u64 *max_page_count = (__u64 *)&info_hdr[1];
1039
1040                        pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n",
1041                                *max_page_count);
1042                }
1043
1044                break;
1045        default:
1046                pr_info("Received Unknown type: %d\n", info_hdr->type);
1047        }
1048}
1049
1050static unsigned long compute_balloon_floor(void)
1051{
1052        unsigned long min_pages;
1053#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1054        /* Simple continuous piecewiese linear function:
1055         *  max MiB -> min MiB  gradient
1056         *       0         0
1057         *      16        16
1058         *      32        24
1059         *     128        72    (1/2)
1060         *     512       168    (1/4)
1061         *    2048       360    (1/8)
1062         *    8192       744    (1/16)
1063         *   32768      1512    (1/32)
1064         */
1065        if (totalram_pages < MB2PAGES(128))
1066                min_pages = MB2PAGES(8) + (totalram_pages >> 1);
1067        else if (totalram_pages < MB2PAGES(512))
1068                min_pages = MB2PAGES(40) + (totalram_pages >> 2);
1069        else if (totalram_pages < MB2PAGES(2048))
1070                min_pages = MB2PAGES(104) + (totalram_pages >> 3);
1071        else if (totalram_pages < MB2PAGES(8192))
1072                min_pages = MB2PAGES(232) + (totalram_pages >> 4);
1073        else
1074                min_pages = MB2PAGES(488) + (totalram_pages >> 5);
1075#undef MB2PAGES
1076        return min_pages;
1077}
1078
1079/*
1080 * Post our status as it relates memory pressure to the
1081 * host. Host expects the guests to post this status
1082 * periodically at 1 second intervals.
1083 *
1084 * The metrics specified in this protocol are very Windows
1085 * specific and so we cook up numbers here to convey our memory
1086 * pressure.
1087 */
1088
1089static void post_status(struct hv_dynmem_device *dm)
1090{
1091        struct dm_status status;
1092        unsigned long now = jiffies;
1093        unsigned long last_post = last_post_time;
1094
1095        if (pressure_report_delay > 0) {
1096                --pressure_report_delay;
1097                return;
1098        }
1099
1100        if (!time_after(now, (last_post_time + HZ)))
1101                return;
1102
1103        memset(&status, 0, sizeof(struct dm_status));
1104        status.hdr.type = DM_STATUS_REPORT;
1105        status.hdr.size = sizeof(struct dm_status);
1106        status.hdr.trans_id = atomic_inc_return(&trans_id);
1107
1108        /*
1109         * The host expects the guest to report free and committed memory.
1110         * Furthermore, the host expects the pressure information to include
1111         * the ballooned out pages. For a given amount of memory that we are
1112         * managing we need to compute a floor below which we should not
1113         * balloon. Compute this and add it to the pressure report.
1114         * We also need to report all offline pages (num_pages_added -
1115         * num_pages_onlined) as committed to the host, otherwise it can try
1116         * asking us to balloon them out.
1117         */
1118        status.num_avail = si_mem_available();
1119        status.num_committed = vm_memory_committed() +
1120                dm->num_pages_ballooned +
1121                (dm->num_pages_added > dm->num_pages_onlined ?
1122                 dm->num_pages_added - dm->num_pages_onlined : 0) +
1123                compute_balloon_floor();
1124
1125        /*
1126         * If our transaction ID is no longer current, just don't
1127         * send the status. This can happen if we were interrupted
1128         * after we picked our transaction ID.
1129         */
1130        if (status.hdr.trans_id != atomic_read(&trans_id))
1131                return;
1132
1133        /*
1134         * If the last post time that we sampled has changed,
1135         * we have raced, don't post the status.
1136         */
1137        if (last_post != last_post_time)
1138                return;
1139
1140        last_post_time = jiffies;
1141        vmbus_sendpacket(dm->dev->channel, &status,
1142                                sizeof(struct dm_status),
1143                                (unsigned long)NULL,
1144                                VM_PKT_DATA_INBAND, 0);
1145
1146}
1147
1148static void free_balloon_pages(struct hv_dynmem_device *dm,
1149                         union dm_mem_page_range *range_array)
1150{
1151        int num_pages = range_array->finfo.page_cnt;
1152        __u64 start_frame = range_array->finfo.start_page;
1153        struct page *pg;
1154        int i;
1155
1156        for (i = 0; i < num_pages; i++) {
1157                pg = pfn_to_page(i + start_frame);
1158                __free_page(pg);
1159                dm->num_pages_ballooned--;
1160        }
1161}
1162
1163
1164
1165static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1166                                        unsigned int num_pages,
1167                                        struct dm_balloon_response *bl_resp,
1168                                        int alloc_unit)
1169{
1170        unsigned int i = 0;
1171        struct page *pg;
1172
1173        if (num_pages < alloc_unit)
1174                return 0;
1175
1176        for (i = 0; (i * alloc_unit) < num_pages; i++) {
1177                if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1178                        PAGE_SIZE)
1179                        return i * alloc_unit;
1180
1181                /*
1182                 * We execute this code in a thread context. Furthermore,
1183                 * we don't want the kernel to try too hard.
1184                 */
1185                pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1186                                __GFP_NOMEMALLOC | __GFP_NOWARN,
1187                                get_order(alloc_unit << PAGE_SHIFT));
1188
1189                if (!pg)
1190                        return i * alloc_unit;
1191
1192                dm->num_pages_ballooned += alloc_unit;
1193
1194                /*
1195                 * If we allocatted 2M pages; split them so we
1196                 * can free them in any order we get.
1197                 */
1198
1199                if (alloc_unit != 1)
1200                        split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1201
1202                bl_resp->range_count++;
1203                bl_resp->range_array[i].finfo.start_page =
1204                        page_to_pfn(pg);
1205                bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1206                bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1207
1208        }
1209
1210        return num_pages;
1211}
1212
1213static void balloon_up(struct work_struct *dummy)
1214{
1215        unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1216        unsigned int num_ballooned = 0;
1217        struct dm_balloon_response *bl_resp;
1218        int alloc_unit;
1219        int ret;
1220        bool done = false;
1221        int i;
1222        long avail_pages;
1223        unsigned long floor;
1224
1225        /* The host balloons pages in 2M granularity. */
1226        WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1227
1228        /*
1229         * We will attempt 2M allocations. However, if we fail to
1230         * allocate 2M chunks, we will go back to 4k allocations.
1231         */
1232        alloc_unit = 512;
1233
1234        avail_pages = si_mem_available();
1235        floor = compute_balloon_floor();
1236
1237        /* Refuse to balloon below the floor, keep the 2M granularity. */
1238        if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1239                pr_warn("Balloon request will be partially fulfilled. %s\n",
1240                        avail_pages < num_pages ? "Not enough memory." :
1241                        "Balloon floor reached.");
1242
1243                num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1244                num_pages -= num_pages % PAGES_IN_2M;
1245        }
1246
1247        while (!done) {
1248                bl_resp = (struct dm_balloon_response *)send_buffer;
1249                memset(send_buffer, 0, PAGE_SIZE);
1250                bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1251                bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1252                bl_resp->more_pages = 1;
1253
1254                num_pages -= num_ballooned;
1255                num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1256                                                    bl_resp, alloc_unit);
1257
1258                if (alloc_unit != 1 && num_ballooned == 0) {
1259                        alloc_unit = 1;
1260                        continue;
1261                }
1262
1263                if (num_ballooned == 0 || num_ballooned == num_pages) {
1264                        pr_debug("Ballooned %u out of %u requested pages.\n",
1265                                num_pages, dm_device.balloon_wrk.num_pages);
1266
1267                        bl_resp->more_pages = 0;
1268                        done = true;
1269                        dm_device.state = DM_INITIALIZED;
1270                }
1271
1272                /*
1273                 * We are pushing a lot of data through the channel;
1274                 * deal with transient failures caused because of the
1275                 * lack of space in the ring buffer.
1276                 */
1277
1278                do {
1279                        bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1280                        ret = vmbus_sendpacket(dm_device.dev->channel,
1281                                                bl_resp,
1282                                                bl_resp->hdr.size,
1283                                                (unsigned long)NULL,
1284                                                VM_PKT_DATA_INBAND, 0);
1285
1286                        if (ret == -EAGAIN)
1287                                msleep(20);
1288                        post_status(&dm_device);
1289                } while (ret == -EAGAIN);
1290
1291                if (ret) {
1292                        /*
1293                         * Free up the memory we allocatted.
1294                         */
1295                        pr_info("Balloon response failed\n");
1296
1297                        for (i = 0; i < bl_resp->range_count; i++)
1298                                free_balloon_pages(&dm_device,
1299                                                 &bl_resp->range_array[i]);
1300
1301                        done = true;
1302                }
1303        }
1304
1305}
1306
1307static void balloon_down(struct hv_dynmem_device *dm,
1308                        struct dm_unballoon_request *req)
1309{
1310        union dm_mem_page_range *range_array = req->range_array;
1311        int range_count = req->range_count;
1312        struct dm_unballoon_response resp;
1313        int i;
1314        unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
1315
1316        for (i = 0; i < range_count; i++) {
1317                free_balloon_pages(dm, &range_array[i]);
1318                complete(&dm_device.config_event);
1319        }
1320
1321        pr_debug("Freed %u ballooned pages.\n",
1322                prev_pages_ballooned - dm->num_pages_ballooned);
1323
1324        if (req->more_pages == 1)
1325                return;
1326
1327        memset(&resp, 0, sizeof(struct dm_unballoon_response));
1328        resp.hdr.type = DM_UNBALLOON_RESPONSE;
1329        resp.hdr.trans_id = atomic_inc_return(&trans_id);
1330        resp.hdr.size = sizeof(struct dm_unballoon_response);
1331
1332        vmbus_sendpacket(dm_device.dev->channel, &resp,
1333                                sizeof(struct dm_unballoon_response),
1334                                (unsigned long)NULL,
1335                                VM_PKT_DATA_INBAND, 0);
1336
1337        dm->state = DM_INITIALIZED;
1338}
1339
1340static void balloon_onchannelcallback(void *context);
1341
1342static int dm_thread_func(void *dm_dev)
1343{
1344        struct hv_dynmem_device *dm = dm_dev;
1345
1346        while (!kthread_should_stop()) {
1347                wait_for_completion_interruptible_timeout(
1348                                                &dm_device.config_event, 1*HZ);
1349                /*
1350                 * The host expects us to post information on the memory
1351                 * pressure every second.
1352                 */
1353                reinit_completion(&dm_device.config_event);
1354                post_status(dm);
1355        }
1356
1357        return 0;
1358}
1359
1360
1361static void version_resp(struct hv_dynmem_device *dm,
1362                        struct dm_version_response *vresp)
1363{
1364        struct dm_version_request version_req;
1365        int ret;
1366
1367        if (vresp->is_accepted) {
1368                /*
1369                 * We are done; wakeup the
1370                 * context waiting for version
1371                 * negotiation.
1372                 */
1373                complete(&dm->host_event);
1374                return;
1375        }
1376        /*
1377         * If there are more versions to try, continue
1378         * with negotiations; if not
1379         * shutdown the service since we are not able
1380         * to negotiate a suitable version number
1381         * with the host.
1382         */
1383        if (dm->next_version == 0)
1384                goto version_error;
1385
1386        memset(&version_req, 0, sizeof(struct dm_version_request));
1387        version_req.hdr.type = DM_VERSION_REQUEST;
1388        version_req.hdr.size = sizeof(struct dm_version_request);
1389        version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1390        version_req.version.version = dm->next_version;
1391        dm->version = version_req.version.version;
1392
1393        /*
1394         * Set the next version to try in case current version fails.
1395         * Win7 protocol ought to be the last one to try.
1396         */
1397        switch (version_req.version.version) {
1398        case DYNMEM_PROTOCOL_VERSION_WIN8:
1399                dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1400                version_req.is_last_attempt = 0;
1401                break;
1402        default:
1403                dm->next_version = 0;
1404                version_req.is_last_attempt = 1;
1405        }
1406
1407        ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1408                                sizeof(struct dm_version_request),
1409                                (unsigned long)NULL,
1410                                VM_PKT_DATA_INBAND, 0);
1411
1412        if (ret)
1413                goto version_error;
1414
1415        return;
1416
1417version_error:
1418        dm->state = DM_INIT_ERROR;
1419        complete(&dm->host_event);
1420}
1421
1422static void cap_resp(struct hv_dynmem_device *dm,
1423                        struct dm_capabilities_resp_msg *cap_resp)
1424{
1425        if (!cap_resp->is_accepted) {
1426                pr_info("Capabilities not accepted by host\n");
1427                dm->state = DM_INIT_ERROR;
1428        }
1429        complete(&dm->host_event);
1430}
1431
1432static void balloon_onchannelcallback(void *context)
1433{
1434        struct hv_device *dev = context;
1435        u32 recvlen;
1436        u64 requestid;
1437        struct dm_message *dm_msg;
1438        struct dm_header *dm_hdr;
1439        struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1440        struct dm_balloon *bal_msg;
1441        struct dm_hot_add *ha_msg;
1442        union dm_mem_page_range *ha_pg_range;
1443        union dm_mem_page_range *ha_region;
1444
1445        memset(recv_buffer, 0, sizeof(recv_buffer));
1446        vmbus_recvpacket(dev->channel, recv_buffer,
1447                         PAGE_SIZE, &recvlen, &requestid);
1448
1449        if (recvlen > 0) {
1450                dm_msg = (struct dm_message *)recv_buffer;
1451                dm_hdr = &dm_msg->hdr;
1452
1453                switch (dm_hdr->type) {
1454                case DM_VERSION_RESPONSE:
1455                        version_resp(dm,
1456                                 (struct dm_version_response *)dm_msg);
1457                        break;
1458
1459                case DM_CAPABILITIES_RESPONSE:
1460                        cap_resp(dm,
1461                                 (struct dm_capabilities_resp_msg *)dm_msg);
1462                        break;
1463
1464                case DM_BALLOON_REQUEST:
1465                        if (dm->state == DM_BALLOON_UP)
1466                                pr_warn("Currently ballooning\n");
1467                        bal_msg = (struct dm_balloon *)recv_buffer;
1468                        dm->state = DM_BALLOON_UP;
1469                        dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1470                        schedule_work(&dm_device.balloon_wrk.wrk);
1471                        break;
1472
1473                case DM_UNBALLOON_REQUEST:
1474                        dm->state = DM_BALLOON_DOWN;
1475                        balloon_down(dm,
1476                                 (struct dm_unballoon_request *)recv_buffer);
1477                        break;
1478
1479                case DM_MEM_HOT_ADD_REQUEST:
1480                        if (dm->state == DM_HOT_ADD)
1481                                pr_warn("Currently hot-adding\n");
1482                        dm->state = DM_HOT_ADD;
1483                        ha_msg = (struct dm_hot_add *)recv_buffer;
1484                        if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1485                                /*
1486                                 * This is a normal hot-add request specifying
1487                                 * hot-add memory.
1488                                 */
1489                                dm->host_specified_ha_region = false;
1490                                ha_pg_range = &ha_msg->range;
1491                                dm->ha_wrk.ha_page_range = *ha_pg_range;
1492                                dm->ha_wrk.ha_region_range.page_range = 0;
1493                        } else {
1494                                /*
1495                                 * Host is specifying that we first hot-add
1496                                 * a region and then partially populate this
1497                                 * region.
1498                                 */
1499                                dm->host_specified_ha_region = true;
1500                                ha_pg_range = &ha_msg->range;
1501                                ha_region = &ha_pg_range[1];
1502                                dm->ha_wrk.ha_page_range = *ha_pg_range;
1503                                dm->ha_wrk.ha_region_range = *ha_region;
1504                        }
1505                        schedule_work(&dm_device.ha_wrk.wrk);
1506                        break;
1507
1508                case DM_INFO_MESSAGE:
1509                        process_info(dm, (struct dm_info_msg *)dm_msg);
1510                        break;
1511
1512                default:
1513                        pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1514
1515                }
1516        }
1517
1518}
1519
1520static int balloon_probe(struct hv_device *dev,
1521                        const struct hv_vmbus_device_id *dev_id)
1522{
1523        int ret;
1524        unsigned long t;
1525        struct dm_version_request version_req;
1526        struct dm_capabilities cap_msg;
1527
1528#ifdef CONFIG_MEMORY_HOTPLUG
1529        do_hot_add = hot_add;
1530#else
1531        do_hot_add = false;
1532#endif
1533
1534        /*
1535         * First allocate a send buffer.
1536         */
1537
1538        send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1539        if (!send_buffer)
1540                return -ENOMEM;
1541
1542        ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1543                        balloon_onchannelcallback, dev);
1544
1545        if (ret)
1546                goto probe_error0;
1547
1548        dm_device.dev = dev;
1549        dm_device.state = DM_INITIALIZING;
1550        dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1551        init_completion(&dm_device.host_event);
1552        init_completion(&dm_device.config_event);
1553        INIT_LIST_HEAD(&dm_device.ha_region_list);
1554        spin_lock_init(&dm_device.ha_lock);
1555        INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1556        INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1557        dm_device.host_specified_ha_region = false;
1558
1559        dm_device.thread =
1560                 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1561        if (IS_ERR(dm_device.thread)) {
1562                ret = PTR_ERR(dm_device.thread);
1563                goto probe_error1;
1564        }
1565
1566#ifdef CONFIG_MEMORY_HOTPLUG
1567        set_online_page_callback(&hv_online_page);
1568        register_memory_notifier(&hv_memory_nb);
1569#endif
1570
1571        hv_set_drvdata(dev, &dm_device);
1572        /*
1573         * Initiate the hand shake with the host and negotiate
1574         * a version that the host can support. We start with the
1575         * highest version number and go down if the host cannot
1576         * support it.
1577         */
1578        memset(&version_req, 0, sizeof(struct dm_version_request));
1579        version_req.hdr.type = DM_VERSION_REQUEST;
1580        version_req.hdr.size = sizeof(struct dm_version_request);
1581        version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1582        version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1583        version_req.is_last_attempt = 0;
1584        dm_device.version = version_req.version.version;
1585
1586        ret = vmbus_sendpacket(dev->channel, &version_req,
1587                                sizeof(struct dm_version_request),
1588                                (unsigned long)NULL,
1589                                VM_PKT_DATA_INBAND, 0);
1590        if (ret)
1591                goto probe_error2;
1592
1593        t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1594        if (t == 0) {
1595                ret = -ETIMEDOUT;
1596                goto probe_error2;
1597        }
1598
1599        /*
1600         * If we could not negotiate a compatible version with the host
1601         * fail the probe function.
1602         */
1603        if (dm_device.state == DM_INIT_ERROR) {
1604                ret = -ETIMEDOUT;
1605                goto probe_error2;
1606        }
1607
1608        pr_info("Using Dynamic Memory protocol version %u.%u\n",
1609                DYNMEM_MAJOR_VERSION(dm_device.version),
1610                DYNMEM_MINOR_VERSION(dm_device.version));
1611
1612        /*
1613         * Now submit our capabilities to the host.
1614         */
1615        memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1616        cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1617        cap_msg.hdr.size = sizeof(struct dm_capabilities);
1618        cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1619
1620        cap_msg.caps.cap_bits.balloon = 1;
1621        cap_msg.caps.cap_bits.hot_add = 1;
1622
1623        /*
1624         * Specify our alignment requirements as it relates
1625         * memory hot-add. Specify 128MB alignment.
1626         */
1627        cap_msg.caps.cap_bits.hot_add_alignment = 7;
1628
1629        /*
1630         * Currently the host does not use these
1631         * values and we set them to what is done in the
1632         * Windows driver.
1633         */
1634        cap_msg.min_page_cnt = 0;
1635        cap_msg.max_page_number = -1;
1636
1637        ret = vmbus_sendpacket(dev->channel, &cap_msg,
1638                                sizeof(struct dm_capabilities),
1639                                (unsigned long)NULL,
1640                                VM_PKT_DATA_INBAND, 0);
1641        if (ret)
1642                goto probe_error2;
1643
1644        t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1645        if (t == 0) {
1646                ret = -ETIMEDOUT;
1647                goto probe_error2;
1648        }
1649
1650        /*
1651         * If the host does not like our capabilities,
1652         * fail the probe function.
1653         */
1654        if (dm_device.state == DM_INIT_ERROR) {
1655                ret = -ETIMEDOUT;
1656                goto probe_error2;
1657        }
1658
1659        dm_device.state = DM_INITIALIZED;
1660
1661        return 0;
1662
1663probe_error2:
1664#ifdef CONFIG_MEMORY_HOTPLUG
1665        restore_online_page_callback(&hv_online_page);
1666#endif
1667        kthread_stop(dm_device.thread);
1668
1669probe_error1:
1670        vmbus_close(dev->channel);
1671probe_error0:
1672        kfree(send_buffer);
1673        return ret;
1674}
1675
1676static int balloon_remove(struct hv_device *dev)
1677{
1678        struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1679        struct hv_hotadd_state *has, *tmp;
1680        struct hv_hotadd_gap *gap, *tmp_gap;
1681        unsigned long flags;
1682
1683        if (dm->num_pages_ballooned != 0)
1684                pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1685
1686        cancel_work_sync(&dm->balloon_wrk.wrk);
1687        cancel_work_sync(&dm->ha_wrk.wrk);
1688
1689        vmbus_close(dev->channel);
1690        kthread_stop(dm->thread);
1691        kfree(send_buffer);
1692#ifdef CONFIG_MEMORY_HOTPLUG
1693        restore_online_page_callback(&hv_online_page);
1694        unregister_memory_notifier(&hv_memory_nb);
1695#endif
1696        spin_lock_irqsave(&dm_device.ha_lock, flags);
1697        list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1698                list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1699                        list_del(&gap->list);
1700                        kfree(gap);
1701                }
1702                list_del(&has->list);
1703                kfree(has);
1704        }
1705        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1706
1707        return 0;
1708}
1709
1710static const struct hv_vmbus_device_id id_table[] = {
1711        /* Dynamic Memory Class ID */
1712        /* 525074DC-8985-46e2-8057-A307DC18A502 */
1713        { HV_DM_GUID, },
1714        { },
1715};
1716
1717MODULE_DEVICE_TABLE(vmbus, id_table);
1718
1719static  struct hv_driver balloon_drv = {
1720        .name = "hv_balloon",
1721        .id_table = id_table,
1722        .probe =  balloon_probe,
1723        .remove =  balloon_remove,
1724};
1725
1726static int __init init_balloon_drv(void)
1727{
1728
1729        return vmbus_driver_register(&balloon_drv);
1730}
1731
1732module_init(init_balloon_drv);
1733
1734MODULE_DESCRIPTION("Hyper-V Balloon");
1735MODULE_LICENSE("GPL");
1736