linux/drivers/hv/hv_balloon.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2012, Microsoft Corporation.
   4 *
   5 * Author:
   6 *   K. Y. Srinivasan <kys@microsoft.com>
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/kernel.h>
  12#include <linux/jiffies.h>
  13#include <linux/mman.h>
  14#include <linux/delay.h>
  15#include <linux/init.h>
  16#include <linux/module.h>
  17#include <linux/slab.h>
  18#include <linux/kthread.h>
  19#include <linux/completion.h>
  20#include <linux/memory_hotplug.h>
  21#include <linux/memory.h>
  22#include <linux/notifier.h>
  23#include <linux/percpu_counter.h>
  24
  25#include <linux/hyperv.h>
  26
  27#define CREATE_TRACE_POINTS
  28#include "hv_trace_balloon.h"
  29
  30/*
  31 * We begin with definitions supporting the Dynamic Memory protocol
  32 * with the host.
  33 *
  34 * Begin protocol definitions.
  35 */
  36
  37
  38
  39/*
  40 * Protocol versions. The low word is the minor version, the high word the major
  41 * version.
  42 *
  43 * History:
  44 * Initial version 1.0
  45 * Changed to 0.1 on 2009/03/25
  46 * Changes to 0.2 on 2009/05/14
  47 * Changes to 0.3 on 2009/12/03
  48 * Changed to 1.0 on 2011/04/05
  49 */
  50
  51#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
  52#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
  53#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
  54
  55enum {
  56        DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
  57        DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
  58        DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
  59
  60        DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
  61        DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
  62        DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
  63
  64        DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
  65};
  66
  67
  68
  69/*
  70 * Message Types
  71 */
  72
  73enum dm_message_type {
  74        /*
  75         * Version 0.3
  76         */
  77        DM_ERROR                        = 0,
  78        DM_VERSION_REQUEST              = 1,
  79        DM_VERSION_RESPONSE             = 2,
  80        DM_CAPABILITIES_REPORT          = 3,
  81        DM_CAPABILITIES_RESPONSE        = 4,
  82        DM_STATUS_REPORT                = 5,
  83        DM_BALLOON_REQUEST              = 6,
  84        DM_BALLOON_RESPONSE             = 7,
  85        DM_UNBALLOON_REQUEST            = 8,
  86        DM_UNBALLOON_RESPONSE           = 9,
  87        DM_MEM_HOT_ADD_REQUEST          = 10,
  88        DM_MEM_HOT_ADD_RESPONSE         = 11,
  89        DM_VERSION_03_MAX               = 11,
  90        /*
  91         * Version 1.0.
  92         */
  93        DM_INFO_MESSAGE                 = 12,
  94        DM_VERSION_1_MAX                = 12
  95};
  96
  97
  98/*
  99 * Structures defining the dynamic memory management
 100 * protocol.
 101 */
 102
 103union dm_version {
 104        struct {
 105                __u16 minor_version;
 106                __u16 major_version;
 107        };
 108        __u32 version;
 109} __packed;
 110
 111
 112union dm_caps {
 113        struct {
 114                __u64 balloon:1;
 115                __u64 hot_add:1;
 116                /*
 117                 * To support guests that may have alignment
 118                 * limitations on hot-add, the guest can specify
 119                 * its alignment requirements; a value of n
 120                 * represents an alignment of 2^n in mega bytes.
 121                 */
 122                __u64 hot_add_alignment:4;
 123                __u64 reservedz:58;
 124        } cap_bits;
 125        __u64 caps;
 126} __packed;
 127
 128union dm_mem_page_range {
 129        struct  {
 130                /*
 131                 * The PFN number of the first page in the range.
 132                 * 40 bits is the architectural limit of a PFN
 133                 * number for AMD64.
 134                 */
 135                __u64 start_page:40;
 136                /*
 137                 * The number of pages in the range.
 138                 */
 139                __u64 page_cnt:24;
 140        } finfo;
 141        __u64  page_range;
 142} __packed;
 143
 144
 145
 146/*
 147 * The header for all dynamic memory messages:
 148 *
 149 * type: Type of the message.
 150 * size: Size of the message in bytes; including the header.
 151 * trans_id: The guest is responsible for manufacturing this ID.
 152 */
 153
 154struct dm_header {
 155        __u16 type;
 156        __u16 size;
 157        __u32 trans_id;
 158} __packed;
 159
 160/*
 161 * A generic message format for dynamic memory.
 162 * Specific message formats are defined later in the file.
 163 */
 164
 165struct dm_message {
 166        struct dm_header hdr;
 167        __u8 data[]; /* enclosed message */
 168} __packed;
 169
 170
 171/*
 172 * Specific message types supporting the dynamic memory protocol.
 173 */
 174
 175/*
 176 * Version negotiation message. Sent from the guest to the host.
 177 * The guest is free to try different versions until the host
 178 * accepts the version.
 179 *
 180 * dm_version: The protocol version requested.
 181 * is_last_attempt: If TRUE, this is the last version guest will request.
 182 * reservedz: Reserved field, set to zero.
 183 */
 184
 185struct dm_version_request {
 186        struct dm_header hdr;
 187        union dm_version version;
 188        __u32 is_last_attempt:1;
 189        __u32 reservedz:31;
 190} __packed;
 191
 192/*
 193 * Version response message; Host to Guest and indicates
 194 * if the host has accepted the version sent by the guest.
 195 *
 196 * is_accepted: If TRUE, host has accepted the version and the guest
 197 * should proceed to the next stage of the protocol. FALSE indicates that
 198 * guest should re-try with a different version.
 199 *
 200 * reservedz: Reserved field, set to zero.
 201 */
 202
 203struct dm_version_response {
 204        struct dm_header hdr;
 205        __u64 is_accepted:1;
 206        __u64 reservedz:63;
 207} __packed;
 208
 209/*
 210 * Message reporting capabilities. This is sent from the guest to the
 211 * host.
 212 */
 213
 214struct dm_capabilities {
 215        struct dm_header hdr;
 216        union dm_caps caps;
 217        __u64 min_page_cnt;
 218        __u64 max_page_number;
 219} __packed;
 220
 221/*
 222 * Response to the capabilities message. This is sent from the host to the
 223 * guest. This message notifies if the host has accepted the guest's
 224 * capabilities. If the host has not accepted, the guest must shutdown
 225 * the service.
 226 *
 227 * is_accepted: Indicates if the host has accepted guest's capabilities.
 228 * reservedz: Must be 0.
 229 */
 230
 231struct dm_capabilities_resp_msg {
 232        struct dm_header hdr;
 233        __u64 is_accepted:1;
 234        __u64 reservedz:63;
 235} __packed;
 236
 237/*
 238 * This message is used to report memory pressure from the guest.
 239 * This message is not part of any transaction and there is no
 240 * response to this message.
 241 *
 242 * num_avail: Available memory in pages.
 243 * num_committed: Committed memory in pages.
 244 * page_file_size: The accumulated size of all page files
 245 *                 in the system in pages.
 246 * zero_free: The nunber of zero and free pages.
 247 * page_file_writes: The writes to the page file in pages.
 248 * io_diff: An indicator of file cache efficiency or page file activity,
 249 *          calculated as File Cache Page Fault Count - Page Read Count.
 250 *          This value is in pages.
 251 *
 252 * Some of these metrics are Windows specific and fortunately
 253 * the algorithm on the host side that computes the guest memory
 254 * pressure only uses num_committed value.
 255 */
 256
 257struct dm_status {
 258        struct dm_header hdr;
 259        __u64 num_avail;
 260        __u64 num_committed;
 261        __u64 page_file_size;
 262        __u64 zero_free;
 263        __u32 page_file_writes;
 264        __u32 io_diff;
 265} __packed;
 266
 267
 268/*
 269 * Message to ask the guest to allocate memory - balloon up message.
 270 * This message is sent from the host to the guest. The guest may not be
 271 * able to allocate as much memory as requested.
 272 *
 273 * num_pages: number of pages to allocate.
 274 */
 275
 276struct dm_balloon {
 277        struct dm_header hdr;
 278        __u32 num_pages;
 279        __u32 reservedz;
 280} __packed;
 281
 282
 283/*
 284 * Balloon response message; this message is sent from the guest
 285 * to the host in response to the balloon message.
 286 *
 287 * reservedz: Reserved; must be set to zero.
 288 * more_pages: If FALSE, this is the last message of the transaction.
 289 * if TRUE there will atleast one more message from the guest.
 290 *
 291 * range_count: The number of ranges in the range array.
 292 *
 293 * range_array: An array of page ranges returned to the host.
 294 *
 295 */
 296
 297struct dm_balloon_response {
 298        struct dm_header hdr;
 299        __u32 reservedz;
 300        __u32 more_pages:1;
 301        __u32 range_count:31;
 302        union dm_mem_page_range range_array[];
 303} __packed;
 304
 305/*
 306 * Un-balloon message; this message is sent from the host
 307 * to the guest to give guest more memory.
 308 *
 309 * more_pages: If FALSE, this is the last message of the transaction.
 310 * if TRUE there will atleast one more message from the guest.
 311 *
 312 * reservedz: Reserved; must be set to zero.
 313 *
 314 * range_count: The number of ranges in the range array.
 315 *
 316 * range_array: An array of page ranges returned to the host.
 317 *
 318 */
 319
 320struct dm_unballoon_request {
 321        struct dm_header hdr;
 322        __u32 more_pages:1;
 323        __u32 reservedz:31;
 324        __u32 range_count;
 325        union dm_mem_page_range range_array[];
 326} __packed;
 327
 328/*
 329 * Un-balloon response message; this message is sent from the guest
 330 * to the host in response to an unballoon request.
 331 *
 332 */
 333
 334struct dm_unballoon_response {
 335        struct dm_header hdr;
 336} __packed;
 337
 338
 339/*
 340 * Hot add request message. Message sent from the host to the guest.
 341 *
 342 * mem_range: Memory range to hot add.
 343 *
 344 * On Linux we currently don't support this since we cannot hot add
 345 * arbitrary granularity of memory.
 346 */
 347
 348struct dm_hot_add {
 349        struct dm_header hdr;
 350        union dm_mem_page_range range;
 351} __packed;
 352
 353/*
 354 * Hot add response message.
 355 * This message is sent by the guest to report the status of a hot add request.
 356 * If page_count is less than the requested page count, then the host should
 357 * assume all further hot add requests will fail, since this indicates that
 358 * the guest has hit an upper physical memory barrier.
 359 *
 360 * Hot adds may also fail due to low resources; in this case, the guest must
 361 * not complete this message until the hot add can succeed, and the host must
 362 * not send a new hot add request until the response is sent.
 363 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
 364 * times it fails the request.
 365 *
 366 *
 367 * page_count: number of pages that were successfully hot added.
 368 *
 369 * result: result of the operation 1: success, 0: failure.
 370 *
 371 */
 372
 373struct dm_hot_add_response {
 374        struct dm_header hdr;
 375        __u32 page_count;
 376        __u32 result;
 377} __packed;
 378
 379/*
 380 * Types of information sent from host to the guest.
 381 */
 382
 383enum dm_info_type {
 384        INFO_TYPE_MAX_PAGE_CNT = 0,
 385        MAX_INFO_TYPE
 386};
 387
 388
 389/*
 390 * Header for the information message.
 391 */
 392
 393struct dm_info_header {
 394        enum dm_info_type type;
 395        __u32 data_size;
 396} __packed;
 397
 398/*
 399 * This message is sent from the host to the guest to pass
 400 * some relevant information (win8 addition).
 401 *
 402 * reserved: no used.
 403 * info_size: size of the information blob.
 404 * info: information blob.
 405 */
 406
 407struct dm_info_msg {
 408        struct dm_header hdr;
 409        __u32 reserved;
 410        __u32 info_size;
 411        __u8  info[];
 412};
 413
 414/*
 415 * End protocol definitions.
 416 */
 417
 418/*
 419 * State to manage hot adding memory into the guest.
 420 * The range start_pfn : end_pfn specifies the range
 421 * that the host has asked us to hot add. The range
 422 * start_pfn : ha_end_pfn specifies the range that we have
 423 * currently hot added. We hot add in multiples of 128M
 424 * chunks; it is possible that we may not be able to bring
 425 * online all the pages in the region. The range
 426 * covered_start_pfn:covered_end_pfn defines the pages that can
 427 * be brough online.
 428 */
 429
 430struct hv_hotadd_state {
 431        struct list_head list;
 432        unsigned long start_pfn;
 433        unsigned long covered_start_pfn;
 434        unsigned long covered_end_pfn;
 435        unsigned long ha_end_pfn;
 436        unsigned long end_pfn;
 437        /*
 438         * A list of gaps.
 439         */
 440        struct list_head gap_list;
 441};
 442
 443struct hv_hotadd_gap {
 444        struct list_head list;
 445        unsigned long start_pfn;
 446        unsigned long end_pfn;
 447};
 448
 449struct balloon_state {
 450        __u32 num_pages;
 451        struct work_struct wrk;
 452};
 453
 454struct hot_add_wrk {
 455        union dm_mem_page_range ha_page_range;
 456        union dm_mem_page_range ha_region_range;
 457        struct work_struct wrk;
 458};
 459
 460static bool hot_add = true;
 461static bool do_hot_add;
 462/*
 463 * Delay reporting memory pressure by
 464 * the specified number of seconds.
 465 */
 466static uint pressure_report_delay = 45;
 467
 468/*
 469 * The last time we posted a pressure report to host.
 470 */
 471static unsigned long last_post_time;
 472
 473module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
 474MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
 475
 476module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
 477MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
 478static atomic_t trans_id = ATOMIC_INIT(0);
 479
 480static int dm_ring_size = (5 * PAGE_SIZE);
 481
 482/*
 483 * Driver specific state.
 484 */
 485
 486enum hv_dm_state {
 487        DM_INITIALIZING = 0,
 488        DM_INITIALIZED,
 489        DM_BALLOON_UP,
 490        DM_BALLOON_DOWN,
 491        DM_HOT_ADD,
 492        DM_INIT_ERROR
 493};
 494
 495
 496static __u8 recv_buffer[PAGE_SIZE];
 497static __u8 *send_buffer;
 498#define PAGES_IN_2M     512
 499#define HA_CHUNK (32 * 1024)
 500
 501struct hv_dynmem_device {
 502        struct hv_device *dev;
 503        enum hv_dm_state state;
 504        struct completion host_event;
 505        struct completion config_event;
 506
 507        /*
 508         * Number of pages we have currently ballooned out.
 509         */
 510        unsigned int num_pages_ballooned;
 511        unsigned int num_pages_onlined;
 512        unsigned int num_pages_added;
 513
 514        /*
 515         * State to manage the ballooning (up) operation.
 516         */
 517        struct balloon_state balloon_wrk;
 518
 519        /*
 520         * State to execute the "hot-add" operation.
 521         */
 522        struct hot_add_wrk ha_wrk;
 523
 524        /*
 525         * This state tracks if the host has specified a hot-add
 526         * region.
 527         */
 528        bool host_specified_ha_region;
 529
 530        /*
 531         * State to synchronize hot-add.
 532         */
 533        struct completion  ol_waitevent;
 534        bool ha_waiting;
 535        /*
 536         * This thread handles hot-add
 537         * requests from the host as well as notifying
 538         * the host with regards to memory pressure in
 539         * the guest.
 540         */
 541        struct task_struct *thread;
 542
 543        /*
 544         * Protects ha_region_list, num_pages_onlined counter and individual
 545         * regions from ha_region_list.
 546         */
 547        spinlock_t ha_lock;
 548
 549        /*
 550         * A list of hot-add regions.
 551         */
 552        struct list_head ha_region_list;
 553
 554        /*
 555         * We start with the highest version we can support
 556         * and downgrade based on the host; we save here the
 557         * next version to try.
 558         */
 559        __u32 next_version;
 560
 561        /*
 562         * The negotiated version agreed by host.
 563         */
 564        __u32 version;
 565};
 566
 567static struct hv_dynmem_device dm_device;
 568
 569static void post_status(struct hv_dynmem_device *dm);
 570
 571#ifdef CONFIG_MEMORY_HOTPLUG
 572static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
 573                                     unsigned long pfn)
 574{
 575        struct hv_hotadd_gap *gap;
 576
 577        /* The page is not backed. */
 578        if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
 579                return false;
 580
 581        /* Check for gaps. */
 582        list_for_each_entry(gap, &has->gap_list, list) {
 583                if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
 584                        return false;
 585        }
 586
 587        return true;
 588}
 589
 590static unsigned long hv_page_offline_check(unsigned long start_pfn,
 591                                           unsigned long nr_pages)
 592{
 593        unsigned long pfn = start_pfn, count = 0;
 594        struct hv_hotadd_state *has;
 595        bool found;
 596
 597        while (pfn < start_pfn + nr_pages) {
 598                /*
 599                 * Search for HAS which covers the pfn and when we find one
 600                 * count how many consequitive PFNs are covered.
 601                 */
 602                found = false;
 603                list_for_each_entry(has, &dm_device.ha_region_list, list) {
 604                        while ((pfn >= has->start_pfn) &&
 605                               (pfn < has->end_pfn) &&
 606                               (pfn < start_pfn + nr_pages)) {
 607                                found = true;
 608                                if (has_pfn_is_backed(has, pfn))
 609                                        count++;
 610                                pfn++;
 611                        }
 612                }
 613
 614                /*
 615                 * This PFN is not in any HAS (e.g. we're offlining a region
 616                 * which was present at boot), no need to account for it. Go
 617                 * to the next one.
 618                 */
 619                if (!found)
 620                        pfn++;
 621        }
 622
 623        return count;
 624}
 625
 626static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
 627                              void *v)
 628{
 629        struct memory_notify *mem = (struct memory_notify *)v;
 630        unsigned long flags, pfn_count;
 631
 632        switch (val) {
 633        case MEM_ONLINE:
 634        case MEM_CANCEL_ONLINE:
 635                if (dm_device.ha_waiting) {
 636                        dm_device.ha_waiting = false;
 637                        complete(&dm_device.ol_waitevent);
 638                }
 639                break;
 640
 641        case MEM_OFFLINE:
 642                spin_lock_irqsave(&dm_device.ha_lock, flags);
 643                pfn_count = hv_page_offline_check(mem->start_pfn,
 644                                                  mem->nr_pages);
 645                if (pfn_count <= dm_device.num_pages_onlined) {
 646                        dm_device.num_pages_onlined -= pfn_count;
 647                } else {
 648                        /*
 649                         * We're offlining more pages than we managed to online.
 650                         * This is unexpected. In any case don't let
 651                         * num_pages_onlined wrap around zero.
 652                         */
 653                        WARN_ON_ONCE(1);
 654                        dm_device.num_pages_onlined = 0;
 655                }
 656                spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 657                break;
 658        case MEM_GOING_ONLINE:
 659        case MEM_GOING_OFFLINE:
 660        case MEM_CANCEL_OFFLINE:
 661                break;
 662        }
 663        return NOTIFY_OK;
 664}
 665
 666static struct notifier_block hv_memory_nb = {
 667        .notifier_call = hv_memory_notifier,
 668        .priority = 0
 669};
 670
 671/* Check if the particular page is backed and can be onlined and online it. */
 672static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
 673{
 674        if (!has_pfn_is_backed(has, page_to_pfn(pg))) {
 675                if (!PageOffline(pg))
 676                        __SetPageOffline(pg);
 677                return;
 678        }
 679        if (PageOffline(pg))
 680                __ClearPageOffline(pg);
 681
 682        /* This frame is currently backed; online the page. */
 683        __online_page_set_limits(pg);
 684        __online_page_increment_counters(pg);
 685        __online_page_free(pg);
 686
 687        lockdep_assert_held(&dm_device.ha_lock);
 688        dm_device.num_pages_onlined++;
 689}
 690
 691static void hv_bring_pgs_online(struct hv_hotadd_state *has,
 692                                unsigned long start_pfn, unsigned long size)
 693{
 694        int i;
 695
 696        pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
 697        for (i = 0; i < size; i++)
 698                hv_page_online_one(has, pfn_to_page(start_pfn + i));
 699}
 700
 701static void hv_mem_hot_add(unsigned long start, unsigned long size,
 702                                unsigned long pfn_count,
 703                                struct hv_hotadd_state *has)
 704{
 705        int ret = 0;
 706        int i, nid;
 707        unsigned long start_pfn;
 708        unsigned long processed_pfn;
 709        unsigned long total_pfn = pfn_count;
 710        unsigned long flags;
 711
 712        for (i = 0; i < (size/HA_CHUNK); i++) {
 713                start_pfn = start + (i * HA_CHUNK);
 714
 715                spin_lock_irqsave(&dm_device.ha_lock, flags);
 716                has->ha_end_pfn +=  HA_CHUNK;
 717
 718                if (total_pfn > HA_CHUNK) {
 719                        processed_pfn = HA_CHUNK;
 720                        total_pfn -= HA_CHUNK;
 721                } else {
 722                        processed_pfn = total_pfn;
 723                        total_pfn = 0;
 724                }
 725
 726                has->covered_end_pfn +=  processed_pfn;
 727                spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 728
 729                init_completion(&dm_device.ol_waitevent);
 730                dm_device.ha_waiting = !memhp_auto_online;
 731
 732                nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
 733                ret = add_memory(nid, PFN_PHYS((start_pfn)),
 734                                (HA_CHUNK << PAGE_SHIFT));
 735
 736                if (ret) {
 737                        pr_err("hot_add memory failed error is %d\n", ret);
 738                        if (ret == -EEXIST) {
 739                                /*
 740                                 * This error indicates that the error
 741                                 * is not a transient failure. This is the
 742                                 * case where the guest's physical address map
 743                                 * precludes hot adding memory. Stop all further
 744                                 * memory hot-add.
 745                                 */
 746                                do_hot_add = false;
 747                        }
 748                        spin_lock_irqsave(&dm_device.ha_lock, flags);
 749                        has->ha_end_pfn -= HA_CHUNK;
 750                        has->covered_end_pfn -=  processed_pfn;
 751                        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 752                        break;
 753                }
 754
 755                /*
 756                 * Wait for the memory block to be onlined when memory onlining
 757                 * is done outside of kernel (memhp_auto_online). Since the hot
 758                 * add has succeeded, it is ok to proceed even if the pages in
 759                 * the hot added region have not been "onlined" within the
 760                 * allowed time.
 761                 */
 762                if (dm_device.ha_waiting)
 763                        wait_for_completion_timeout(&dm_device.ol_waitevent,
 764                                                    5*HZ);
 765                post_status(&dm_device);
 766        }
 767}
 768
 769static void hv_online_page(struct page *pg, unsigned int order)
 770{
 771        struct hv_hotadd_state *has;
 772        unsigned long flags;
 773        unsigned long pfn = page_to_pfn(pg);
 774
 775        spin_lock_irqsave(&dm_device.ha_lock, flags);
 776        list_for_each_entry(has, &dm_device.ha_region_list, list) {
 777                /* The page belongs to a different HAS. */
 778                if ((pfn < has->start_pfn) ||
 779                                (pfn + (1UL << order) > has->end_pfn))
 780                        continue;
 781
 782                hv_bring_pgs_online(has, pfn, 1UL << order);
 783                break;
 784        }
 785        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 786}
 787
 788static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
 789{
 790        struct hv_hotadd_state *has;
 791        struct hv_hotadd_gap *gap;
 792        unsigned long residual, new_inc;
 793        int ret = 0;
 794        unsigned long flags;
 795
 796        spin_lock_irqsave(&dm_device.ha_lock, flags);
 797        list_for_each_entry(has, &dm_device.ha_region_list, list) {
 798                /*
 799                 * If the pfn range we are dealing with is not in the current
 800                 * "hot add block", move on.
 801                 */
 802                if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
 803                        continue;
 804
 805                /*
 806                 * If the current start pfn is not where the covered_end
 807                 * is, create a gap and update covered_end_pfn.
 808                 */
 809                if (has->covered_end_pfn != start_pfn) {
 810                        gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
 811                        if (!gap) {
 812                                ret = -ENOMEM;
 813                                break;
 814                        }
 815
 816                        INIT_LIST_HEAD(&gap->list);
 817                        gap->start_pfn = has->covered_end_pfn;
 818                        gap->end_pfn = start_pfn;
 819                        list_add_tail(&gap->list, &has->gap_list);
 820
 821                        has->covered_end_pfn = start_pfn;
 822                }
 823
 824                /*
 825                 * If the current hot add-request extends beyond
 826                 * our current limit; extend it.
 827                 */
 828                if ((start_pfn + pfn_cnt) > has->end_pfn) {
 829                        residual = (start_pfn + pfn_cnt - has->end_pfn);
 830                        /*
 831                         * Extend the region by multiples of HA_CHUNK.
 832                         */
 833                        new_inc = (residual / HA_CHUNK) * HA_CHUNK;
 834                        if (residual % HA_CHUNK)
 835                                new_inc += HA_CHUNK;
 836
 837                        has->end_pfn += new_inc;
 838                }
 839
 840                ret = 1;
 841                break;
 842        }
 843        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 844
 845        return ret;
 846}
 847
 848static unsigned long handle_pg_range(unsigned long pg_start,
 849                                        unsigned long pg_count)
 850{
 851        unsigned long start_pfn = pg_start;
 852        unsigned long pfn_cnt = pg_count;
 853        unsigned long size;
 854        struct hv_hotadd_state *has;
 855        unsigned long pgs_ol = 0;
 856        unsigned long old_covered_state;
 857        unsigned long res = 0, flags;
 858
 859        pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
 860                pg_start);
 861
 862        spin_lock_irqsave(&dm_device.ha_lock, flags);
 863        list_for_each_entry(has, &dm_device.ha_region_list, list) {
 864                /*
 865                 * If the pfn range we are dealing with is not in the current
 866                 * "hot add block", move on.
 867                 */
 868                if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
 869                        continue;
 870
 871                old_covered_state = has->covered_end_pfn;
 872
 873                if (start_pfn < has->ha_end_pfn) {
 874                        /*
 875                         * This is the case where we are backing pages
 876                         * in an already hot added region. Bring
 877                         * these pages online first.
 878                         */
 879                        pgs_ol = has->ha_end_pfn - start_pfn;
 880                        if (pgs_ol > pfn_cnt)
 881                                pgs_ol = pfn_cnt;
 882
 883                        has->covered_end_pfn +=  pgs_ol;
 884                        pfn_cnt -= pgs_ol;
 885                        /*
 886                         * Check if the corresponding memory block is already
 887                         * online. It is possible to observe struct pages still
 888                         * being uninitialized here so check section instead.
 889                         * In case the section is online we need to bring the
 890                         * rest of pfns (which were not backed previously)
 891                         * online too.
 892                         */
 893                        if (start_pfn > has->start_pfn &&
 894                            online_section_nr(pfn_to_section_nr(start_pfn)))
 895                                hv_bring_pgs_online(has, start_pfn, pgs_ol);
 896
 897                }
 898
 899                if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
 900                        /*
 901                         * We have some residual hot add range
 902                         * that needs to be hot added; hot add
 903                         * it now. Hot add a multiple of
 904                         * of HA_CHUNK that fully covers the pages
 905                         * we have.
 906                         */
 907                        size = (has->end_pfn - has->ha_end_pfn);
 908                        if (pfn_cnt <= size) {
 909                                size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
 910                                if (pfn_cnt % HA_CHUNK)
 911                                        size += HA_CHUNK;
 912                        } else {
 913                                pfn_cnt = size;
 914                        }
 915                        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 916                        hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
 917                        spin_lock_irqsave(&dm_device.ha_lock, flags);
 918                }
 919                /*
 920                 * If we managed to online any pages that were given to us,
 921                 * we declare success.
 922                 */
 923                res = has->covered_end_pfn - old_covered_state;
 924                break;
 925        }
 926        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 927
 928        return res;
 929}
 930
 931static unsigned long process_hot_add(unsigned long pg_start,
 932                                        unsigned long pfn_cnt,
 933                                        unsigned long rg_start,
 934                                        unsigned long rg_size)
 935{
 936        struct hv_hotadd_state *ha_region = NULL;
 937        int covered;
 938        unsigned long flags;
 939
 940        if (pfn_cnt == 0)
 941                return 0;
 942
 943        if (!dm_device.host_specified_ha_region) {
 944                covered = pfn_covered(pg_start, pfn_cnt);
 945                if (covered < 0)
 946                        return 0;
 947
 948                if (covered)
 949                        goto do_pg_range;
 950        }
 951
 952        /*
 953         * If the host has specified a hot-add range; deal with it first.
 954         */
 955
 956        if (rg_size != 0) {
 957                ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
 958                if (!ha_region)
 959                        return 0;
 960
 961                INIT_LIST_HEAD(&ha_region->list);
 962                INIT_LIST_HEAD(&ha_region->gap_list);
 963
 964                ha_region->start_pfn = rg_start;
 965                ha_region->ha_end_pfn = rg_start;
 966                ha_region->covered_start_pfn = pg_start;
 967                ha_region->covered_end_pfn = pg_start;
 968                ha_region->end_pfn = rg_start + rg_size;
 969
 970                spin_lock_irqsave(&dm_device.ha_lock, flags);
 971                list_add_tail(&ha_region->list, &dm_device.ha_region_list);
 972                spin_unlock_irqrestore(&dm_device.ha_lock, flags);
 973        }
 974
 975do_pg_range:
 976        /*
 977         * Process the page range specified; bringing them
 978         * online if possible.
 979         */
 980        return handle_pg_range(pg_start, pfn_cnt);
 981}
 982
 983#endif
 984
 985static void hot_add_req(struct work_struct *dummy)
 986{
 987        struct dm_hot_add_response resp;
 988#ifdef CONFIG_MEMORY_HOTPLUG
 989        unsigned long pg_start, pfn_cnt;
 990        unsigned long rg_start, rg_sz;
 991#endif
 992        struct hv_dynmem_device *dm = &dm_device;
 993
 994        memset(&resp, 0, sizeof(struct dm_hot_add_response));
 995        resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
 996        resp.hdr.size = sizeof(struct dm_hot_add_response);
 997
 998#ifdef CONFIG_MEMORY_HOTPLUG
 999        pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
1000        pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
1001
1002        rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
1003        rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
1004
1005        if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
1006                unsigned long region_size;
1007                unsigned long region_start;
1008
1009                /*
1010                 * The host has not specified the hot-add region.
1011                 * Based on the hot-add page range being specified,
1012                 * compute a hot-add region that can cover the pages
1013                 * that need to be hot-added while ensuring the alignment
1014                 * and size requirements of Linux as it relates to hot-add.
1015                 */
1016                region_start = pg_start;
1017                region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
1018                if (pfn_cnt % HA_CHUNK)
1019                        region_size += HA_CHUNK;
1020
1021                region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
1022
1023                rg_start = region_start;
1024                rg_sz = region_size;
1025        }
1026
1027        if (do_hot_add)
1028                resp.page_count = process_hot_add(pg_start, pfn_cnt,
1029                                                rg_start, rg_sz);
1030
1031        dm->num_pages_added += resp.page_count;
1032#endif
1033        /*
1034         * The result field of the response structure has the
1035         * following semantics:
1036         *
1037         * 1. If all or some pages hot-added: Guest should return success.
1038         *
1039         * 2. If no pages could be hot-added:
1040         *
1041         * If the guest returns success, then the host
1042         * will not attempt any further hot-add operations. This
1043         * signifies a permanent failure.
1044         *
1045         * If the guest returns failure, then this failure will be
1046         * treated as a transient failure and the host may retry the
1047         * hot-add operation after some delay.
1048         */
1049        if (resp.page_count > 0)
1050                resp.result = 1;
1051        else if (!do_hot_add)
1052                resp.result = 1;
1053        else
1054                resp.result = 0;
1055
1056        if (!do_hot_add || (resp.page_count == 0))
1057                pr_err("Memory hot add failed\n");
1058
1059        dm->state = DM_INITIALIZED;
1060        resp.hdr.trans_id = atomic_inc_return(&trans_id);
1061        vmbus_sendpacket(dm->dev->channel, &resp,
1062                        sizeof(struct dm_hot_add_response),
1063                        (unsigned long)NULL,
1064                        VM_PKT_DATA_INBAND, 0);
1065}
1066
1067static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1068{
1069        struct dm_info_header *info_hdr;
1070
1071        info_hdr = (struct dm_info_header *)msg->info;
1072
1073        switch (info_hdr->type) {
1074        case INFO_TYPE_MAX_PAGE_CNT:
1075                if (info_hdr->data_size == sizeof(__u64)) {
1076                        __u64 *max_page_count = (__u64 *)&info_hdr[1];
1077
1078                        pr_info("Max. dynamic memory size: %llu MB\n",
1079                                (*max_page_count) >> (20 - PAGE_SHIFT));
1080                }
1081
1082                break;
1083        default:
1084                pr_warn("Received Unknown type: %d\n", info_hdr->type);
1085        }
1086}
1087
1088static unsigned long compute_balloon_floor(void)
1089{
1090        unsigned long min_pages;
1091        unsigned long nr_pages = totalram_pages();
1092#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1093        /* Simple continuous piecewiese linear function:
1094         *  max MiB -> min MiB  gradient
1095         *       0         0
1096         *      16        16
1097         *      32        24
1098         *     128        72    (1/2)
1099         *     512       168    (1/4)
1100         *    2048       360    (1/8)
1101         *    8192       744    (1/16)
1102         *   32768      1512    (1/32)
1103         */
1104        if (nr_pages < MB2PAGES(128))
1105                min_pages = MB2PAGES(8) + (nr_pages >> 1);
1106        else if (nr_pages < MB2PAGES(512))
1107                min_pages = MB2PAGES(40) + (nr_pages >> 2);
1108        else if (nr_pages < MB2PAGES(2048))
1109                min_pages = MB2PAGES(104) + (nr_pages >> 3);
1110        else if (nr_pages < MB2PAGES(8192))
1111                min_pages = MB2PAGES(232) + (nr_pages >> 4);
1112        else
1113                min_pages = MB2PAGES(488) + (nr_pages >> 5);
1114#undef MB2PAGES
1115        return min_pages;
1116}
1117
1118/*
1119 * Post our status as it relates memory pressure to the
1120 * host. Host expects the guests to post this status
1121 * periodically at 1 second intervals.
1122 *
1123 * The metrics specified in this protocol are very Windows
1124 * specific and so we cook up numbers here to convey our memory
1125 * pressure.
1126 */
1127
1128static void post_status(struct hv_dynmem_device *dm)
1129{
1130        struct dm_status status;
1131        unsigned long now = jiffies;
1132        unsigned long last_post = last_post_time;
1133
1134        if (pressure_report_delay > 0) {
1135                --pressure_report_delay;
1136                return;
1137        }
1138
1139        if (!time_after(now, (last_post_time + HZ)))
1140                return;
1141
1142        memset(&status, 0, sizeof(struct dm_status));
1143        status.hdr.type = DM_STATUS_REPORT;
1144        status.hdr.size = sizeof(struct dm_status);
1145        status.hdr.trans_id = atomic_inc_return(&trans_id);
1146
1147        /*
1148         * The host expects the guest to report free and committed memory.
1149         * Furthermore, the host expects the pressure information to include
1150         * the ballooned out pages. For a given amount of memory that we are
1151         * managing we need to compute a floor below which we should not
1152         * balloon. Compute this and add it to the pressure report.
1153         * We also need to report all offline pages (num_pages_added -
1154         * num_pages_onlined) as committed to the host, otherwise it can try
1155         * asking us to balloon them out.
1156         */
1157        status.num_avail = si_mem_available();
1158        status.num_committed = vm_memory_committed() +
1159                dm->num_pages_ballooned +
1160                (dm->num_pages_added > dm->num_pages_onlined ?
1161                 dm->num_pages_added - dm->num_pages_onlined : 0) +
1162                compute_balloon_floor();
1163
1164        trace_balloon_status(status.num_avail, status.num_committed,
1165                             vm_memory_committed(), dm->num_pages_ballooned,
1166                             dm->num_pages_added, dm->num_pages_onlined);
1167        /*
1168         * If our transaction ID is no longer current, just don't
1169         * send the status. This can happen if we were interrupted
1170         * after we picked our transaction ID.
1171         */
1172        if (status.hdr.trans_id != atomic_read(&trans_id))
1173                return;
1174
1175        /*
1176         * If the last post time that we sampled has changed,
1177         * we have raced, don't post the status.
1178         */
1179        if (last_post != last_post_time)
1180                return;
1181
1182        last_post_time = jiffies;
1183        vmbus_sendpacket(dm->dev->channel, &status,
1184                                sizeof(struct dm_status),
1185                                (unsigned long)NULL,
1186                                VM_PKT_DATA_INBAND, 0);
1187
1188}
1189
1190static void free_balloon_pages(struct hv_dynmem_device *dm,
1191                         union dm_mem_page_range *range_array)
1192{
1193        int num_pages = range_array->finfo.page_cnt;
1194        __u64 start_frame = range_array->finfo.start_page;
1195        struct page *pg;
1196        int i;
1197
1198        for (i = 0; i < num_pages; i++) {
1199                pg = pfn_to_page(i + start_frame);
1200                __ClearPageOffline(pg);
1201                __free_page(pg);
1202                dm->num_pages_ballooned--;
1203        }
1204}
1205
1206
1207
1208static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1209                                        unsigned int num_pages,
1210                                        struct dm_balloon_response *bl_resp,
1211                                        int alloc_unit)
1212{
1213        unsigned int i, j;
1214        struct page *pg;
1215
1216        if (num_pages < alloc_unit)
1217                return 0;
1218
1219        for (i = 0; (i * alloc_unit) < num_pages; i++) {
1220                if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1221                        PAGE_SIZE)
1222                        return i * alloc_unit;
1223
1224                /*
1225                 * We execute this code in a thread context. Furthermore,
1226                 * we don't want the kernel to try too hard.
1227                 */
1228                pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1229                                __GFP_NOMEMALLOC | __GFP_NOWARN,
1230                                get_order(alloc_unit << PAGE_SHIFT));
1231
1232                if (!pg)
1233                        return i * alloc_unit;
1234
1235                dm->num_pages_ballooned += alloc_unit;
1236
1237                /*
1238                 * If we allocatted 2M pages; split them so we
1239                 * can free them in any order we get.
1240                 */
1241
1242                if (alloc_unit != 1)
1243                        split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1244
1245                /* mark all pages offline */
1246                for (j = 0; j < (1 << get_order(alloc_unit << PAGE_SHIFT)); j++)
1247                        __SetPageOffline(pg + j);
1248
1249                bl_resp->range_count++;
1250                bl_resp->range_array[i].finfo.start_page =
1251                        page_to_pfn(pg);
1252                bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1253                bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1254
1255        }
1256
1257        return num_pages;
1258}
1259
1260static void balloon_up(struct work_struct *dummy)
1261{
1262        unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1263        unsigned int num_ballooned = 0;
1264        struct dm_balloon_response *bl_resp;
1265        int alloc_unit;
1266        int ret;
1267        bool done = false;
1268        int i;
1269        long avail_pages;
1270        unsigned long floor;
1271
1272        /* The host balloons pages in 2M granularity. */
1273        WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1274
1275        /*
1276         * We will attempt 2M allocations. However, if we fail to
1277         * allocate 2M chunks, we will go back to 4k allocations.
1278         */
1279        alloc_unit = 512;
1280
1281        avail_pages = si_mem_available();
1282        floor = compute_balloon_floor();
1283
1284        /* Refuse to balloon below the floor, keep the 2M granularity. */
1285        if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1286                pr_warn("Balloon request will be partially fulfilled. %s\n",
1287                        avail_pages < num_pages ? "Not enough memory." :
1288                        "Balloon floor reached.");
1289
1290                num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1291                num_pages -= num_pages % PAGES_IN_2M;
1292        }
1293
1294        while (!done) {
1295                bl_resp = (struct dm_balloon_response *)send_buffer;
1296                memset(send_buffer, 0, PAGE_SIZE);
1297                bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1298                bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1299                bl_resp->more_pages = 1;
1300
1301                num_pages -= num_ballooned;
1302                num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1303                                                    bl_resp, alloc_unit);
1304
1305                if (alloc_unit != 1 && num_ballooned == 0) {
1306                        alloc_unit = 1;
1307                        continue;
1308                }
1309
1310                if (num_ballooned == 0 || num_ballooned == num_pages) {
1311                        pr_debug("Ballooned %u out of %u requested pages.\n",
1312                                num_pages, dm_device.balloon_wrk.num_pages);
1313
1314                        bl_resp->more_pages = 0;
1315                        done = true;
1316                        dm_device.state = DM_INITIALIZED;
1317                }
1318
1319                /*
1320                 * We are pushing a lot of data through the channel;
1321                 * deal with transient failures caused because of the
1322                 * lack of space in the ring buffer.
1323                 */
1324
1325                do {
1326                        bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1327                        ret = vmbus_sendpacket(dm_device.dev->channel,
1328                                                bl_resp,
1329                                                bl_resp->hdr.size,
1330                                                (unsigned long)NULL,
1331                                                VM_PKT_DATA_INBAND, 0);
1332
1333                        if (ret == -EAGAIN)
1334                                msleep(20);
1335                        post_status(&dm_device);
1336                } while (ret == -EAGAIN);
1337
1338                if (ret) {
1339                        /*
1340                         * Free up the memory we allocatted.
1341                         */
1342                        pr_err("Balloon response failed\n");
1343
1344                        for (i = 0; i < bl_resp->range_count; i++)
1345                                free_balloon_pages(&dm_device,
1346                                                 &bl_resp->range_array[i]);
1347
1348                        done = true;
1349                }
1350        }
1351
1352}
1353
1354static void balloon_down(struct hv_dynmem_device *dm,
1355                        struct dm_unballoon_request *req)
1356{
1357        union dm_mem_page_range *range_array = req->range_array;
1358        int range_count = req->range_count;
1359        struct dm_unballoon_response resp;
1360        int i;
1361        unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
1362
1363        for (i = 0; i < range_count; i++) {
1364                free_balloon_pages(dm, &range_array[i]);
1365                complete(&dm_device.config_event);
1366        }
1367
1368        pr_debug("Freed %u ballooned pages.\n",
1369                prev_pages_ballooned - dm->num_pages_ballooned);
1370
1371        if (req->more_pages == 1)
1372                return;
1373
1374        memset(&resp, 0, sizeof(struct dm_unballoon_response));
1375        resp.hdr.type = DM_UNBALLOON_RESPONSE;
1376        resp.hdr.trans_id = atomic_inc_return(&trans_id);
1377        resp.hdr.size = sizeof(struct dm_unballoon_response);
1378
1379        vmbus_sendpacket(dm_device.dev->channel, &resp,
1380                                sizeof(struct dm_unballoon_response),
1381                                (unsigned long)NULL,
1382                                VM_PKT_DATA_INBAND, 0);
1383
1384        dm->state = DM_INITIALIZED;
1385}
1386
1387static void balloon_onchannelcallback(void *context);
1388
1389static int dm_thread_func(void *dm_dev)
1390{
1391        struct hv_dynmem_device *dm = dm_dev;
1392
1393        while (!kthread_should_stop()) {
1394                wait_for_completion_interruptible_timeout(
1395                                                &dm_device.config_event, 1*HZ);
1396                /*
1397                 * The host expects us to post information on the memory
1398                 * pressure every second.
1399                 */
1400                reinit_completion(&dm_device.config_event);
1401                post_status(dm);
1402        }
1403
1404        return 0;
1405}
1406
1407
1408static void version_resp(struct hv_dynmem_device *dm,
1409                        struct dm_version_response *vresp)
1410{
1411        struct dm_version_request version_req;
1412        int ret;
1413
1414        if (vresp->is_accepted) {
1415                /*
1416                 * We are done; wakeup the
1417                 * context waiting for version
1418                 * negotiation.
1419                 */
1420                complete(&dm->host_event);
1421                return;
1422        }
1423        /*
1424         * If there are more versions to try, continue
1425         * with negotiations; if not
1426         * shutdown the service since we are not able
1427         * to negotiate a suitable version number
1428         * with the host.
1429         */
1430        if (dm->next_version == 0)
1431                goto version_error;
1432
1433        memset(&version_req, 0, sizeof(struct dm_version_request));
1434        version_req.hdr.type = DM_VERSION_REQUEST;
1435        version_req.hdr.size = sizeof(struct dm_version_request);
1436        version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1437        version_req.version.version = dm->next_version;
1438        dm->version = version_req.version.version;
1439
1440        /*
1441         * Set the next version to try in case current version fails.
1442         * Win7 protocol ought to be the last one to try.
1443         */
1444        switch (version_req.version.version) {
1445        case DYNMEM_PROTOCOL_VERSION_WIN8:
1446                dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1447                version_req.is_last_attempt = 0;
1448                break;
1449        default:
1450                dm->next_version = 0;
1451                version_req.is_last_attempt = 1;
1452        }
1453
1454        ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1455                                sizeof(struct dm_version_request),
1456                                (unsigned long)NULL,
1457                                VM_PKT_DATA_INBAND, 0);
1458
1459        if (ret)
1460                goto version_error;
1461
1462        return;
1463
1464version_error:
1465        dm->state = DM_INIT_ERROR;
1466        complete(&dm->host_event);
1467}
1468
1469static void cap_resp(struct hv_dynmem_device *dm,
1470                        struct dm_capabilities_resp_msg *cap_resp)
1471{
1472        if (!cap_resp->is_accepted) {
1473                pr_err("Capabilities not accepted by host\n");
1474                dm->state = DM_INIT_ERROR;
1475        }
1476        complete(&dm->host_event);
1477}
1478
1479static void balloon_onchannelcallback(void *context)
1480{
1481        struct hv_device *dev = context;
1482        u32 recvlen;
1483        u64 requestid;
1484        struct dm_message *dm_msg;
1485        struct dm_header *dm_hdr;
1486        struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1487        struct dm_balloon *bal_msg;
1488        struct dm_hot_add *ha_msg;
1489        union dm_mem_page_range *ha_pg_range;
1490        union dm_mem_page_range *ha_region;
1491
1492        memset(recv_buffer, 0, sizeof(recv_buffer));
1493        vmbus_recvpacket(dev->channel, recv_buffer,
1494                         PAGE_SIZE, &recvlen, &requestid);
1495
1496        if (recvlen > 0) {
1497                dm_msg = (struct dm_message *)recv_buffer;
1498                dm_hdr = &dm_msg->hdr;
1499
1500                switch (dm_hdr->type) {
1501                case DM_VERSION_RESPONSE:
1502                        version_resp(dm,
1503                                 (struct dm_version_response *)dm_msg);
1504                        break;
1505
1506                case DM_CAPABILITIES_RESPONSE:
1507                        cap_resp(dm,
1508                                 (struct dm_capabilities_resp_msg *)dm_msg);
1509                        break;
1510
1511                case DM_BALLOON_REQUEST:
1512                        if (dm->state == DM_BALLOON_UP)
1513                                pr_warn("Currently ballooning\n");
1514                        bal_msg = (struct dm_balloon *)recv_buffer;
1515                        dm->state = DM_BALLOON_UP;
1516                        dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1517                        schedule_work(&dm_device.balloon_wrk.wrk);
1518                        break;
1519
1520                case DM_UNBALLOON_REQUEST:
1521                        dm->state = DM_BALLOON_DOWN;
1522                        balloon_down(dm,
1523                                 (struct dm_unballoon_request *)recv_buffer);
1524                        break;
1525
1526                case DM_MEM_HOT_ADD_REQUEST:
1527                        if (dm->state == DM_HOT_ADD)
1528                                pr_warn("Currently hot-adding\n");
1529                        dm->state = DM_HOT_ADD;
1530                        ha_msg = (struct dm_hot_add *)recv_buffer;
1531                        if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1532                                /*
1533                                 * This is a normal hot-add request specifying
1534                                 * hot-add memory.
1535                                 */
1536                                dm->host_specified_ha_region = false;
1537                                ha_pg_range = &ha_msg->range;
1538                                dm->ha_wrk.ha_page_range = *ha_pg_range;
1539                                dm->ha_wrk.ha_region_range.page_range = 0;
1540                        } else {
1541                                /*
1542                                 * Host is specifying that we first hot-add
1543                                 * a region and then partially populate this
1544                                 * region.
1545                                 */
1546                                dm->host_specified_ha_region = true;
1547                                ha_pg_range = &ha_msg->range;
1548                                ha_region = &ha_pg_range[1];
1549                                dm->ha_wrk.ha_page_range = *ha_pg_range;
1550                                dm->ha_wrk.ha_region_range = *ha_region;
1551                        }
1552                        schedule_work(&dm_device.ha_wrk.wrk);
1553                        break;
1554
1555                case DM_INFO_MESSAGE:
1556                        process_info(dm, (struct dm_info_msg *)dm_msg);
1557                        break;
1558
1559                default:
1560                        pr_warn("Unhandled message: type: %d\n", dm_hdr->type);
1561
1562                }
1563        }
1564
1565}
1566
1567static int balloon_probe(struct hv_device *dev,
1568                        const struct hv_vmbus_device_id *dev_id)
1569{
1570        int ret;
1571        unsigned long t;
1572        struct dm_version_request version_req;
1573        struct dm_capabilities cap_msg;
1574
1575#ifdef CONFIG_MEMORY_HOTPLUG
1576        do_hot_add = hot_add;
1577#else
1578        do_hot_add = false;
1579#endif
1580
1581        /*
1582         * First allocate a send buffer.
1583         */
1584
1585        send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1586        if (!send_buffer)
1587                return -ENOMEM;
1588
1589        ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1590                        balloon_onchannelcallback, dev);
1591
1592        if (ret)
1593                goto probe_error0;
1594
1595        dm_device.dev = dev;
1596        dm_device.state = DM_INITIALIZING;
1597        dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1598        init_completion(&dm_device.host_event);
1599        init_completion(&dm_device.config_event);
1600        INIT_LIST_HEAD(&dm_device.ha_region_list);
1601        spin_lock_init(&dm_device.ha_lock);
1602        INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1603        INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1604        dm_device.host_specified_ha_region = false;
1605
1606        dm_device.thread =
1607                 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1608        if (IS_ERR(dm_device.thread)) {
1609                ret = PTR_ERR(dm_device.thread);
1610                goto probe_error1;
1611        }
1612
1613#ifdef CONFIG_MEMORY_HOTPLUG
1614        set_online_page_callback(&hv_online_page);
1615        register_memory_notifier(&hv_memory_nb);
1616#endif
1617
1618        hv_set_drvdata(dev, &dm_device);
1619        /*
1620         * Initiate the hand shake with the host and negotiate
1621         * a version that the host can support. We start with the
1622         * highest version number and go down if the host cannot
1623         * support it.
1624         */
1625        memset(&version_req, 0, sizeof(struct dm_version_request));
1626        version_req.hdr.type = DM_VERSION_REQUEST;
1627        version_req.hdr.size = sizeof(struct dm_version_request);
1628        version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1629        version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1630        version_req.is_last_attempt = 0;
1631        dm_device.version = version_req.version.version;
1632
1633        ret = vmbus_sendpacket(dev->channel, &version_req,
1634                                sizeof(struct dm_version_request),
1635                                (unsigned long)NULL,
1636                                VM_PKT_DATA_INBAND, 0);
1637        if (ret)
1638                goto probe_error2;
1639
1640        t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1641        if (t == 0) {
1642                ret = -ETIMEDOUT;
1643                goto probe_error2;
1644        }
1645
1646        /*
1647         * If we could not negotiate a compatible version with the host
1648         * fail the probe function.
1649         */
1650        if (dm_device.state == DM_INIT_ERROR) {
1651                ret = -ETIMEDOUT;
1652                goto probe_error2;
1653        }
1654
1655        pr_info("Using Dynamic Memory protocol version %u.%u\n",
1656                DYNMEM_MAJOR_VERSION(dm_device.version),
1657                DYNMEM_MINOR_VERSION(dm_device.version));
1658
1659        /*
1660         * Now submit our capabilities to the host.
1661         */
1662        memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1663        cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1664        cap_msg.hdr.size = sizeof(struct dm_capabilities);
1665        cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1666
1667        cap_msg.caps.cap_bits.balloon = 1;
1668        cap_msg.caps.cap_bits.hot_add = 1;
1669
1670        /*
1671         * Specify our alignment requirements as it relates
1672         * memory hot-add. Specify 128MB alignment.
1673         */
1674        cap_msg.caps.cap_bits.hot_add_alignment = 7;
1675
1676        /*
1677         * Currently the host does not use these
1678         * values and we set them to what is done in the
1679         * Windows driver.
1680         */
1681        cap_msg.min_page_cnt = 0;
1682        cap_msg.max_page_number = -1;
1683
1684        ret = vmbus_sendpacket(dev->channel, &cap_msg,
1685                                sizeof(struct dm_capabilities),
1686                                (unsigned long)NULL,
1687                                VM_PKT_DATA_INBAND, 0);
1688        if (ret)
1689                goto probe_error2;
1690
1691        t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1692        if (t == 0) {
1693                ret = -ETIMEDOUT;
1694                goto probe_error2;
1695        }
1696
1697        /*
1698         * If the host does not like our capabilities,
1699         * fail the probe function.
1700         */
1701        if (dm_device.state == DM_INIT_ERROR) {
1702                ret = -ETIMEDOUT;
1703                goto probe_error2;
1704        }
1705
1706        dm_device.state = DM_INITIALIZED;
1707        last_post_time = jiffies;
1708
1709        return 0;
1710
1711probe_error2:
1712#ifdef CONFIG_MEMORY_HOTPLUG
1713        restore_online_page_callback(&hv_online_page);
1714#endif
1715        kthread_stop(dm_device.thread);
1716
1717probe_error1:
1718        vmbus_close(dev->channel);
1719probe_error0:
1720        kfree(send_buffer);
1721        return ret;
1722}
1723
1724static int balloon_remove(struct hv_device *dev)
1725{
1726        struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1727        struct hv_hotadd_state *has, *tmp;
1728        struct hv_hotadd_gap *gap, *tmp_gap;
1729        unsigned long flags;
1730
1731        if (dm->num_pages_ballooned != 0)
1732                pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1733
1734        cancel_work_sync(&dm->balloon_wrk.wrk);
1735        cancel_work_sync(&dm->ha_wrk.wrk);
1736
1737        vmbus_close(dev->channel);
1738        kthread_stop(dm->thread);
1739        kfree(send_buffer);
1740#ifdef CONFIG_MEMORY_HOTPLUG
1741        restore_online_page_callback(&hv_online_page);
1742        unregister_memory_notifier(&hv_memory_nb);
1743#endif
1744        spin_lock_irqsave(&dm_device.ha_lock, flags);
1745        list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1746                list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1747                        list_del(&gap->list);
1748                        kfree(gap);
1749                }
1750                list_del(&has->list);
1751                kfree(has);
1752        }
1753        spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1754
1755        return 0;
1756}
1757
1758static const struct hv_vmbus_device_id id_table[] = {
1759        /* Dynamic Memory Class ID */
1760        /* 525074DC-8985-46e2-8057-A307DC18A502 */
1761        { HV_DM_GUID, },
1762        { },
1763};
1764
1765MODULE_DEVICE_TABLE(vmbus, id_table);
1766
1767static  struct hv_driver balloon_drv = {
1768        .name = "hv_balloon",
1769        .id_table = id_table,
1770        .probe =  balloon_probe,
1771        .remove =  balloon_remove,
1772        .driver = {
1773                .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1774        },
1775};
1776
1777static int __init init_balloon_drv(void)
1778{
1779
1780        return vmbus_driver_register(&balloon_drv);
1781}
1782
1783module_init(init_balloon_drv);
1784
1785MODULE_DESCRIPTION("Hyper-V Balloon");
1786MODULE_LICENSE("GPL");
1787